1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/skbuff.h>
5 #include <net/devlink.h>
6 #include <net/pkt_cls.h>
7 
8 #include "cmsg.h"
9 #include "main.h"
10 #include "conntrack.h"
11 #include "../nfpcore/nfp_cpp.h"
12 #include "../nfpcore/nfp_nsp.h"
13 #include "../nfp_app.h"
14 #include "../nfp_main.h"
15 #include "../nfp_net.h"
16 #include "../nfp_port.h"
17 
18 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \
19 	(TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
20 	 TCPHDR_PSH | TCPHDR_URG)
21 
22 #define NFP_FLOWER_SUPPORTED_CTLFLAGS \
23 	(FLOW_DIS_IS_FRAGMENT | \
24 	 FLOW_DIS_FIRST_FRAG)
25 
26 #define NFP_FLOWER_WHITELIST_DISSECTOR \
27 	(BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
28 	 BIT(FLOW_DISSECTOR_KEY_BASIC) | \
29 	 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
30 	 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
31 	 BIT(FLOW_DISSECTOR_KEY_TCP) | \
32 	 BIT(FLOW_DISSECTOR_KEY_PORTS) | \
33 	 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
34 	 BIT(FLOW_DISSECTOR_KEY_VLAN) | \
35 	 BIT(FLOW_DISSECTOR_KEY_CVLAN) | \
36 	 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
37 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
38 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
39 	 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
40 	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
41 	 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
42 	 BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
43 	 BIT(FLOW_DISSECTOR_KEY_MPLS) | \
44 	 BIT(FLOW_DISSECTOR_KEY_CT) | \
45 	 BIT(FLOW_DISSECTOR_KEY_META) | \
46 	 BIT(FLOW_DISSECTOR_KEY_IP))
47 
48 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
49 	(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
50 	 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
51 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
52 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
53 	 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
54 	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
55 	 BIT(FLOW_DISSECTOR_KEY_ENC_IP))
56 
57 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
58 	(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
59 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS))
60 
61 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R \
62 	(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
63 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS))
64 
65 #define NFP_FLOWER_MERGE_FIELDS \
66 	(NFP_FLOWER_LAYER_PORT | \
67 	 NFP_FLOWER_LAYER_MAC | \
68 	 NFP_FLOWER_LAYER_TP | \
69 	 NFP_FLOWER_LAYER_IPV4 | \
70 	 NFP_FLOWER_LAYER_IPV6)
71 
72 #define NFP_FLOWER_PRE_TUN_RULE_FIELDS \
73 	(NFP_FLOWER_LAYER_EXT_META | \
74 	 NFP_FLOWER_LAYER_PORT | \
75 	 NFP_FLOWER_LAYER_MAC | \
76 	 NFP_FLOWER_LAYER_IPV4 | \
77 	 NFP_FLOWER_LAYER_IPV6)
78 
79 struct nfp_flower_merge_check {
80 	union {
81 		struct {
82 			__be16 tci;
83 			struct nfp_flower_mac_mpls l2;
84 			struct nfp_flower_tp_ports l4;
85 			union {
86 				struct nfp_flower_ipv4 ipv4;
87 				struct nfp_flower_ipv6 ipv6;
88 			};
89 		};
90 		unsigned long vals[8];
91 	};
92 };
93 
94 int
95 nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
96 		     u8 mtype)
97 {
98 	u32 meta_len, key_len, mask_len, act_len, tot_len;
99 	struct sk_buff *skb;
100 	unsigned char *msg;
101 
102 	meta_len =  sizeof(struct nfp_fl_rule_metadata);
103 	key_len = nfp_flow->meta.key_len;
104 	mask_len = nfp_flow->meta.mask_len;
105 	act_len = nfp_flow->meta.act_len;
106 
107 	tot_len = meta_len + key_len + mask_len + act_len;
108 
109 	/* Convert to long words as firmware expects
110 	 * lengths in units of NFP_FL_LW_SIZ.
111 	 */
112 	nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
113 	nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
114 	nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
115 
116 	skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
117 	if (!skb)
118 		return -ENOMEM;
119 
120 	msg = nfp_flower_cmsg_get_data(skb);
121 	memcpy(msg, &nfp_flow->meta, meta_len);
122 	memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
123 	memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
124 	memcpy(&msg[meta_len + key_len + mask_len],
125 	       nfp_flow->action_data, act_len);
126 
127 	/* Convert back to bytes as software expects
128 	 * lengths in units of bytes.
129 	 */
130 	nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
131 	nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
132 	nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
133 
134 	nfp_ctrl_tx(app->ctrl, skb);
135 
136 	return 0;
137 }
138 
139 static bool nfp_flower_check_higher_than_mac(struct flow_rule *rule)
140 {
141 	return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
142 	       flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
143 	       flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
144 	       flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
145 }
146 
147 static bool nfp_flower_check_higher_than_l3(struct flow_rule *rule)
148 {
149 	return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
150 	       flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
151 }
152 
153 static int
154 nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
155 			  u32 *key_layer_two, int *key_size, bool ipv6,
156 			  struct netlink_ext_ack *extack)
157 {
158 	if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY ||
159 	    (ipv6 && enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY_V6)) {
160 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length");
161 		return -EOPNOTSUPP;
162 	}
163 
164 	if (enc_opts->len > 0) {
165 		*key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
166 		*key_size += sizeof(struct nfp_flower_geneve_options);
167 	}
168 
169 	return 0;
170 }
171 
172 static int
173 nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
174 			      struct flow_dissector_key_enc_opts *enc_op,
175 			      u32 *key_layer_two, u8 *key_layer, int *key_size,
176 			      struct nfp_flower_priv *priv,
177 			      enum nfp_flower_tun_type *tun_type, bool ipv6,
178 			      struct netlink_ext_ack *extack)
179 {
180 	int err;
181 
182 	switch (enc_ports->dst) {
183 	case htons(IANA_VXLAN_UDP_PORT):
184 		*tun_type = NFP_FL_TUNNEL_VXLAN;
185 		*key_layer |= NFP_FLOWER_LAYER_VXLAN;
186 
187 		if (ipv6) {
188 			*key_layer |= NFP_FLOWER_LAYER_EXT_META;
189 			*key_size += sizeof(struct nfp_flower_ext_meta);
190 			*key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
191 			*key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
192 		} else {
193 			*key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
194 		}
195 
196 		if (enc_op) {
197 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels");
198 			return -EOPNOTSUPP;
199 		}
200 		break;
201 	case htons(GENEVE_UDP_PORT):
202 		if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) {
203 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve offload");
204 			return -EOPNOTSUPP;
205 		}
206 		*tun_type = NFP_FL_TUNNEL_GENEVE;
207 		*key_layer |= NFP_FLOWER_LAYER_EXT_META;
208 		*key_size += sizeof(struct nfp_flower_ext_meta);
209 		*key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
210 
211 		if (ipv6) {
212 			*key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
213 			*key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
214 		} else {
215 			*key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
216 		}
217 
218 		if (!enc_op)
219 			break;
220 		if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) {
221 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload");
222 			return -EOPNOTSUPP;
223 		}
224 		err = nfp_flower_calc_opt_layer(enc_op, key_layer_two, key_size,
225 						ipv6, extack);
226 		if (err)
227 			return err;
228 		break;
229 	default:
230 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel type unknown");
231 		return -EOPNOTSUPP;
232 	}
233 
234 	return 0;
235 }
236 
237 int
238 nfp_flower_calculate_key_layers(struct nfp_app *app,
239 				struct net_device *netdev,
240 				struct nfp_fl_key_ls *ret_key_ls,
241 				struct flow_rule *rule,
242 				enum nfp_flower_tun_type *tun_type,
243 				struct netlink_ext_ack *extack)
244 {
245 	struct flow_dissector *dissector = rule->match.dissector;
246 	struct flow_match_basic basic = { NULL, NULL};
247 	struct nfp_flower_priv *priv = app->priv;
248 	u32 key_layer_two;
249 	u8 key_layer;
250 	int key_size;
251 	int err;
252 
253 	if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) {
254 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match not supported");
255 		return -EOPNOTSUPP;
256 	}
257 
258 	/* If any tun dissector is used then the required set must be used. */
259 	if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
260 	    (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R)
261 	    != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R &&
262 	    (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
263 	    != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) {
264 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported");
265 		return -EOPNOTSUPP;
266 	}
267 
268 	key_layer_two = 0;
269 	key_layer = NFP_FLOWER_LAYER_PORT;
270 	key_size = sizeof(struct nfp_flower_meta_tci) +
271 		   sizeof(struct nfp_flower_in_port);
272 
273 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
274 	    flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
275 		key_layer |= NFP_FLOWER_LAYER_MAC;
276 		key_size += sizeof(struct nfp_flower_mac_mpls);
277 	}
278 
279 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
280 		struct flow_match_vlan vlan;
281 
282 		flow_rule_match_vlan(rule, &vlan);
283 		if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
284 		    vlan.key->vlan_priority) {
285 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload");
286 			return -EOPNOTSUPP;
287 		}
288 		if (priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ &&
289 		    !(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) {
290 			key_layer |= NFP_FLOWER_LAYER_EXT_META;
291 			key_size += sizeof(struct nfp_flower_ext_meta);
292 			key_size += sizeof(struct nfp_flower_vlan);
293 			key_layer_two |= NFP_FLOWER_LAYER2_QINQ;
294 		}
295 	}
296 
297 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
298 		struct flow_match_vlan cvlan;
299 
300 		if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
301 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN QinQ offload");
302 			return -EOPNOTSUPP;
303 		}
304 
305 		flow_rule_match_vlan(rule, &cvlan);
306 		if (!(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) {
307 			key_layer |= NFP_FLOWER_LAYER_EXT_META;
308 			key_size += sizeof(struct nfp_flower_ext_meta);
309 			key_size += sizeof(struct nfp_flower_vlan);
310 			key_layer_two |= NFP_FLOWER_LAYER2_QINQ;
311 		}
312 	}
313 
314 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
315 		struct flow_match_enc_opts enc_op = { NULL, NULL };
316 		struct flow_match_ipv4_addrs ipv4_addrs;
317 		struct flow_match_ipv6_addrs ipv6_addrs;
318 		struct flow_match_control enc_ctl;
319 		struct flow_match_ports enc_ports;
320 		bool ipv6_tun = false;
321 
322 		flow_rule_match_enc_control(rule, &enc_ctl);
323 
324 		if (enc_ctl.mask->addr_type != 0xffff) {
325 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported");
326 			return -EOPNOTSUPP;
327 		}
328 
329 		ipv6_tun = enc_ctl.key->addr_type ==
330 				FLOW_DISSECTOR_KEY_IPV6_ADDRS;
331 		if (ipv6_tun &&
332 		    !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN)) {
333 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: firmware does not support IPv6 tunnels");
334 			return -EOPNOTSUPP;
335 		}
336 
337 		if (!ipv6_tun &&
338 		    enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
339 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel address type not IPv4 or IPv6");
340 			return -EOPNOTSUPP;
341 		}
342 
343 		if (ipv6_tun) {
344 			flow_rule_match_enc_ipv6_addrs(rule, &ipv6_addrs);
345 			if (memchr_inv(&ipv6_addrs.mask->dst, 0xff,
346 				       sizeof(ipv6_addrs.mask->dst))) {
347 				NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv6 destination address is supported");
348 				return -EOPNOTSUPP;
349 			}
350 		} else {
351 			flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
352 			if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
353 				NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
354 				return -EOPNOTSUPP;
355 			}
356 		}
357 
358 		if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
359 			flow_rule_match_enc_opts(rule, &enc_op);
360 
361 		if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
362 			/* check if GRE, which has no enc_ports */
363 			if (!netif_is_gretap(netdev)) {
364 				NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
365 				return -EOPNOTSUPP;
366 			}
367 
368 			*tun_type = NFP_FL_TUNNEL_GRE;
369 			key_layer |= NFP_FLOWER_LAYER_EXT_META;
370 			key_size += sizeof(struct nfp_flower_ext_meta);
371 			key_layer_two |= NFP_FLOWER_LAYER2_GRE;
372 
373 			if (ipv6_tun) {
374 				key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
375 				key_size +=
376 					sizeof(struct nfp_flower_ipv6_udp_tun);
377 			} else {
378 				key_size +=
379 					sizeof(struct nfp_flower_ipv4_udp_tun);
380 			}
381 
382 			if (enc_op.key) {
383 				NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels");
384 				return -EOPNOTSUPP;
385 			}
386 		} else {
387 			flow_rule_match_enc_ports(rule, &enc_ports);
388 			if (enc_ports.mask->dst != cpu_to_be16(~0)) {
389 				NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match L4 destination port is supported");
390 				return -EOPNOTSUPP;
391 			}
392 
393 			err = nfp_flower_calc_udp_tun_layer(enc_ports.key,
394 							    enc_op.key,
395 							    &key_layer_two,
396 							    &key_layer,
397 							    &key_size, priv,
398 							    tun_type, ipv6_tun,
399 							    extack);
400 			if (err)
401 				return err;
402 
403 			/* Ensure the ingress netdev matches the expected
404 			 * tun type.
405 			 */
406 			if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) {
407 				NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress netdev does not match the expected tunnel type");
408 				return -EOPNOTSUPP;
409 			}
410 		}
411 	}
412 
413 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC))
414 		flow_rule_match_basic(rule, &basic);
415 
416 	if (basic.mask && basic.mask->n_proto) {
417 		/* Ethernet type is present in the key. */
418 		switch (basic.key->n_proto) {
419 		case cpu_to_be16(ETH_P_IP):
420 			key_layer |= NFP_FLOWER_LAYER_IPV4;
421 			key_size += sizeof(struct nfp_flower_ipv4);
422 			break;
423 
424 		case cpu_to_be16(ETH_P_IPV6):
425 			key_layer |= NFP_FLOWER_LAYER_IPV6;
426 			key_size += sizeof(struct nfp_flower_ipv6);
427 			break;
428 
429 		/* Currently we do not offload ARP
430 		 * because we rely on it to get to the host.
431 		 */
432 		case cpu_to_be16(ETH_P_ARP):
433 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ARP not supported");
434 			return -EOPNOTSUPP;
435 
436 		case cpu_to_be16(ETH_P_MPLS_UC):
437 		case cpu_to_be16(ETH_P_MPLS_MC):
438 			if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
439 				key_layer |= NFP_FLOWER_LAYER_MAC;
440 				key_size += sizeof(struct nfp_flower_mac_mpls);
441 			}
442 			break;
443 
444 		/* Will be included in layer 2. */
445 		case cpu_to_be16(ETH_P_8021Q):
446 			break;
447 
448 		default:
449 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on given EtherType is not supported");
450 			return -EOPNOTSUPP;
451 		}
452 	} else if (nfp_flower_check_higher_than_mac(rule)) {
453 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match above L2 without specified EtherType");
454 		return -EOPNOTSUPP;
455 	}
456 
457 	if (basic.mask && basic.mask->ip_proto) {
458 		switch (basic.key->ip_proto) {
459 		case IPPROTO_TCP:
460 		case IPPROTO_UDP:
461 		case IPPROTO_SCTP:
462 		case IPPROTO_ICMP:
463 		case IPPROTO_ICMPV6:
464 			key_layer |= NFP_FLOWER_LAYER_TP;
465 			key_size += sizeof(struct nfp_flower_tp_ports);
466 			break;
467 		}
468 	}
469 
470 	if (!(key_layer & NFP_FLOWER_LAYER_TP) &&
471 	    nfp_flower_check_higher_than_l3(rule)) {
472 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match on L4 information without specified IP protocol type");
473 		return -EOPNOTSUPP;
474 	}
475 
476 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
477 		struct flow_match_tcp tcp;
478 		u32 tcp_flags;
479 
480 		flow_rule_match_tcp(rule, &tcp);
481 		tcp_flags = be16_to_cpu(tcp.key->flags);
482 
483 		if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) {
484 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: no match support for selected TCP flags");
485 			return -EOPNOTSUPP;
486 		}
487 
488 		/* We only support PSH and URG flags when either
489 		 * FIN, SYN or RST is present as well.
490 		 */
491 		if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
492 		    !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) {
493 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: PSH and URG is only supported when used with FIN, SYN or RST");
494 			return -EOPNOTSUPP;
495 		}
496 
497 		/* We need to store TCP flags in the either the IPv4 or IPv6 key
498 		 * space, thus we need to ensure we include a IPv4/IPv6 key
499 		 * layer if we have not done so already.
500 		 */
501 		if (!basic.key) {
502 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on L3 protocol");
503 			return -EOPNOTSUPP;
504 		}
505 
506 		if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
507 		    !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
508 			switch (basic.key->n_proto) {
509 			case cpu_to_be16(ETH_P_IP):
510 				key_layer |= NFP_FLOWER_LAYER_IPV4;
511 				key_size += sizeof(struct nfp_flower_ipv4);
512 				break;
513 
514 			case cpu_to_be16(ETH_P_IPV6):
515 					key_layer |= NFP_FLOWER_LAYER_IPV6;
516 				key_size += sizeof(struct nfp_flower_ipv6);
517 				break;
518 
519 			default:
520 				NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on IPv4/IPv6");
521 				return -EOPNOTSUPP;
522 			}
523 		}
524 	}
525 
526 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
527 		struct flow_match_control ctl;
528 
529 		flow_rule_match_control(rule, &ctl);
530 		if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) {
531 			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on unknown control flag");
532 			return -EOPNOTSUPP;
533 		}
534 	}
535 
536 	ret_key_ls->key_layer = key_layer;
537 	ret_key_ls->key_layer_two = key_layer_two;
538 	ret_key_ls->key_size = key_size;
539 
540 	return 0;
541 }
542 
543 struct nfp_fl_payload *
544 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
545 {
546 	struct nfp_fl_payload *flow_pay;
547 
548 	flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
549 	if (!flow_pay)
550 		return NULL;
551 
552 	flow_pay->meta.key_len = key_layer->key_size;
553 	flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
554 	if (!flow_pay->unmasked_data)
555 		goto err_free_flow;
556 
557 	flow_pay->meta.mask_len = key_layer->key_size;
558 	flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
559 	if (!flow_pay->mask_data)
560 		goto err_free_unmasked;
561 
562 	flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
563 	if (!flow_pay->action_data)
564 		goto err_free_mask;
565 
566 	flow_pay->nfp_tun_ipv4_addr = 0;
567 	flow_pay->nfp_tun_ipv6 = NULL;
568 	flow_pay->meta.flags = 0;
569 	INIT_LIST_HEAD(&flow_pay->linked_flows);
570 	flow_pay->in_hw = false;
571 	flow_pay->pre_tun_rule.dev = NULL;
572 
573 	return flow_pay;
574 
575 err_free_mask:
576 	kfree(flow_pay->mask_data);
577 err_free_unmasked:
578 	kfree(flow_pay->unmasked_data);
579 err_free_flow:
580 	kfree(flow_pay);
581 	return NULL;
582 }
583 
584 static int
585 nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
586 				     struct nfp_flower_merge_check *merge,
587 				     u8 *last_act_id, int *act_out)
588 {
589 	struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl;
590 	struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos;
591 	struct nfp_fl_set_ip4_addrs *ipv4_add;
592 	struct nfp_fl_set_ipv6_addr *ipv6_add;
593 	struct nfp_fl_push_vlan *push_vlan;
594 	struct nfp_fl_pre_tunnel *pre_tun;
595 	struct nfp_fl_set_tport *tport;
596 	struct nfp_fl_set_eth *eth;
597 	struct nfp_fl_act_head *a;
598 	unsigned int act_off = 0;
599 	bool ipv6_tun = false;
600 	u8 act_id = 0;
601 	u8 *ports;
602 	int i;
603 
604 	while (act_off < flow->meta.act_len) {
605 		a = (struct nfp_fl_act_head *)&flow->action_data[act_off];
606 		act_id = a->jump_id;
607 
608 		switch (act_id) {
609 		case NFP_FL_ACTION_OPCODE_OUTPUT:
610 			if (act_out)
611 				(*act_out)++;
612 			break;
613 		case NFP_FL_ACTION_OPCODE_PUSH_VLAN:
614 			push_vlan = (struct nfp_fl_push_vlan *)a;
615 			if (push_vlan->vlan_tci)
616 				merge->tci = cpu_to_be16(0xffff);
617 			break;
618 		case NFP_FL_ACTION_OPCODE_POP_VLAN:
619 			merge->tci = cpu_to_be16(0);
620 			break;
621 		case NFP_FL_ACTION_OPCODE_SET_TUNNEL:
622 			/* New tunnel header means l2 to l4 can be matched. */
623 			eth_broadcast_addr(&merge->l2.mac_dst[0]);
624 			eth_broadcast_addr(&merge->l2.mac_src[0]);
625 			memset(&merge->l4, 0xff,
626 			       sizeof(struct nfp_flower_tp_ports));
627 			if (ipv6_tun)
628 				memset(&merge->ipv6, 0xff,
629 				       sizeof(struct nfp_flower_ipv6));
630 			else
631 				memset(&merge->ipv4, 0xff,
632 				       sizeof(struct nfp_flower_ipv4));
633 			break;
634 		case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
635 			eth = (struct nfp_fl_set_eth *)a;
636 			for (i = 0; i < ETH_ALEN; i++)
637 				merge->l2.mac_dst[i] |= eth->eth_addr_mask[i];
638 			for (i = 0; i < ETH_ALEN; i++)
639 				merge->l2.mac_src[i] |=
640 					eth->eth_addr_mask[ETH_ALEN + i];
641 			break;
642 		case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS:
643 			ipv4_add = (struct nfp_fl_set_ip4_addrs *)a;
644 			merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask;
645 			merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask;
646 			break;
647 		case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS:
648 			ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a;
649 			merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask;
650 			merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask;
651 			break;
652 		case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC:
653 			ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
654 			for (i = 0; i < 4; i++)
655 				merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |=
656 					ipv6_add->ipv6[i].mask;
657 			break;
658 		case NFP_FL_ACTION_OPCODE_SET_IPV6_DST:
659 			ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
660 			for (i = 0; i < 4; i++)
661 				merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |=
662 					ipv6_add->ipv6[i].mask;
663 			break;
664 		case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL:
665 			ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a;
666 			merge->ipv6.ip_ext.ttl |=
667 				ipv6_tc_hl_fl->ipv6_hop_limit_mask;
668 			merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask;
669 			merge->ipv6.ipv6_flow_label_exthdr |=
670 				ipv6_tc_hl_fl->ipv6_label_mask;
671 			break;
672 		case NFP_FL_ACTION_OPCODE_SET_UDP:
673 		case NFP_FL_ACTION_OPCODE_SET_TCP:
674 			tport = (struct nfp_fl_set_tport *)a;
675 			ports = (u8 *)&merge->l4.port_src;
676 			for (i = 0; i < 4; i++)
677 				ports[i] |= tport->tp_port_mask[i];
678 			break;
679 		case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
680 			pre_tun = (struct nfp_fl_pre_tunnel *)a;
681 			ipv6_tun = be16_to_cpu(pre_tun->flags) &
682 					NFP_FL_PRE_TUN_IPV6;
683 			break;
684 		case NFP_FL_ACTION_OPCODE_PRE_LAG:
685 		case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
686 			break;
687 		default:
688 			return -EOPNOTSUPP;
689 		}
690 
691 		act_off += a->len_lw << NFP_FL_LW_SIZ;
692 	}
693 
694 	if (last_act_id)
695 		*last_act_id = act_id;
696 
697 	return 0;
698 }
699 
700 static int
701 nfp_flower_populate_merge_match(struct nfp_fl_payload *flow,
702 				struct nfp_flower_merge_check *merge,
703 				bool extra_fields)
704 {
705 	struct nfp_flower_meta_tci *meta_tci;
706 	u8 *mask = flow->mask_data;
707 	u8 key_layer, match_size;
708 
709 	memset(merge, 0, sizeof(struct nfp_flower_merge_check));
710 
711 	meta_tci = (struct nfp_flower_meta_tci *)mask;
712 	key_layer = meta_tci->nfp_flow_key_layer;
713 
714 	if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields)
715 		return -EOPNOTSUPP;
716 
717 	merge->tci = meta_tci->tci;
718 	mask += sizeof(struct nfp_flower_meta_tci);
719 
720 	if (key_layer & NFP_FLOWER_LAYER_EXT_META)
721 		mask += sizeof(struct nfp_flower_ext_meta);
722 
723 	mask += sizeof(struct nfp_flower_in_port);
724 
725 	if (key_layer & NFP_FLOWER_LAYER_MAC) {
726 		match_size = sizeof(struct nfp_flower_mac_mpls);
727 		memcpy(&merge->l2, mask, match_size);
728 		mask += match_size;
729 	}
730 
731 	if (key_layer & NFP_FLOWER_LAYER_TP) {
732 		match_size = sizeof(struct nfp_flower_tp_ports);
733 		memcpy(&merge->l4, mask, match_size);
734 		mask += match_size;
735 	}
736 
737 	if (key_layer & NFP_FLOWER_LAYER_IPV4) {
738 		match_size = sizeof(struct nfp_flower_ipv4);
739 		memcpy(&merge->ipv4, mask, match_size);
740 	}
741 
742 	if (key_layer & NFP_FLOWER_LAYER_IPV6) {
743 		match_size = sizeof(struct nfp_flower_ipv6);
744 		memcpy(&merge->ipv6, mask, match_size);
745 	}
746 
747 	return 0;
748 }
749 
750 static int
751 nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1,
752 		     struct nfp_fl_payload *sub_flow2)
753 {
754 	/* Two flows can be merged if sub_flow2 only matches on bits that are
755 	 * either matched by sub_flow1 or set by a sub_flow1 action. This
756 	 * ensures that every packet that hits sub_flow1 and recirculates is
757 	 * guaranteed to hit sub_flow2.
758 	 */
759 	struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge;
760 	int err, act_out = 0;
761 	u8 last_act_id = 0;
762 
763 	err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge,
764 					      true);
765 	if (err)
766 		return err;
767 
768 	err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge,
769 					      false);
770 	if (err)
771 		return err;
772 
773 	err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge,
774 						   &last_act_id, &act_out);
775 	if (err)
776 		return err;
777 
778 	/* Must only be 1 output action and it must be the last in sequence. */
779 	if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT)
780 		return -EOPNOTSUPP;
781 
782 	/* Reject merge if sub_flow2 matches on something that is not matched
783 	 * on or set in an action by sub_flow1.
784 	 */
785 	err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals,
786 			    sub_flow1_merge.vals,
787 			    sizeof(struct nfp_flower_merge_check) * 8);
788 	if (err)
789 		return -EINVAL;
790 
791 	return 0;
792 }
793 
794 static unsigned int
795 nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
796 			    bool *tunnel_act)
797 {
798 	unsigned int act_off = 0, act_len;
799 	struct nfp_fl_act_head *a;
800 	u8 act_id = 0;
801 
802 	while (act_off < len) {
803 		a = (struct nfp_fl_act_head *)&act_src[act_off];
804 		act_len = a->len_lw << NFP_FL_LW_SIZ;
805 		act_id = a->jump_id;
806 
807 		switch (act_id) {
808 		case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
809 			if (tunnel_act)
810 				*tunnel_act = true;
811 			fallthrough;
812 		case NFP_FL_ACTION_OPCODE_PRE_LAG:
813 			memcpy(act_dst + act_off, act_src + act_off, act_len);
814 			break;
815 		default:
816 			return act_off;
817 		}
818 
819 		act_off += act_len;
820 	}
821 
822 	return act_off;
823 }
824 
825 static int
826 nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan)
827 {
828 	struct nfp_fl_act_head *a;
829 	unsigned int act_off = 0;
830 
831 	while (act_off < len) {
832 		a = (struct nfp_fl_act_head *)&acts[act_off];
833 
834 		if (a->jump_id == NFP_FL_ACTION_OPCODE_PUSH_VLAN && !act_off)
835 			*vlan = (struct nfp_fl_push_vlan *)a;
836 		else if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
837 			return -EOPNOTSUPP;
838 
839 		act_off += a->len_lw << NFP_FL_LW_SIZ;
840 	}
841 
842 	/* Ensure any VLAN push also has an egress action. */
843 	if (*vlan && act_off <= sizeof(struct nfp_fl_push_vlan))
844 		return -EOPNOTSUPP;
845 
846 	return 0;
847 }
848 
849 static int
850 nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan)
851 {
852 	struct nfp_fl_set_tun *tun;
853 	struct nfp_fl_act_head *a;
854 	unsigned int act_off = 0;
855 
856 	while (act_off < len) {
857 		a = (struct nfp_fl_act_head *)&acts[act_off];
858 
859 		if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_TUNNEL) {
860 			tun = (struct nfp_fl_set_tun *)a;
861 			tun->outer_vlan_tpid = vlan->vlan_tpid;
862 			tun->outer_vlan_tci = vlan->vlan_tci;
863 
864 			return 0;
865 		}
866 
867 		act_off += a->len_lw << NFP_FL_LW_SIZ;
868 	}
869 
870 	/* Return error if no tunnel action is found. */
871 	return -EOPNOTSUPP;
872 }
873 
874 static int
875 nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
876 			struct nfp_fl_payload *sub_flow2,
877 			struct nfp_fl_payload *merge_flow)
878 {
879 	unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
880 	struct nfp_fl_push_vlan *post_tun_push_vlan = NULL;
881 	bool tunnel_act = false;
882 	char *merge_act;
883 	int err;
884 
885 	/* The last action of sub_flow1 must be output - do not merge this. */
886 	sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output);
887 	sub2_act_len = sub_flow2->meta.act_len;
888 
889 	if (!sub2_act_len)
890 		return -EINVAL;
891 
892 	if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ)
893 		return -EINVAL;
894 
895 	/* A shortcut can only be applied if there is a single action. */
896 	if (sub1_act_len)
897 		merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
898 	else
899 		merge_flow->meta.shortcut = sub_flow2->meta.shortcut;
900 
901 	merge_flow->meta.act_len = sub1_act_len + sub2_act_len;
902 	merge_act = merge_flow->action_data;
903 
904 	/* Copy any pre-actions to the start of merge flow action list. */
905 	pre_off1 = nfp_flower_copy_pre_actions(merge_act,
906 					       sub_flow1->action_data,
907 					       sub1_act_len, &tunnel_act);
908 	merge_act += pre_off1;
909 	sub1_act_len -= pre_off1;
910 	pre_off2 = nfp_flower_copy_pre_actions(merge_act,
911 					       sub_flow2->action_data,
912 					       sub2_act_len, NULL);
913 	merge_act += pre_off2;
914 	sub2_act_len -= pre_off2;
915 
916 	/* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
917 	 * a tunnel, there are restrictions on what sub_flow 2 actions lead to a
918 	 * valid merge.
919 	 */
920 	if (tunnel_act) {
921 		char *post_tun_acts = &sub_flow2->action_data[pre_off2];
922 
923 		err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len,
924 						  &post_tun_push_vlan);
925 		if (err)
926 			return err;
927 
928 		if (post_tun_push_vlan) {
929 			pre_off2 += sizeof(*post_tun_push_vlan);
930 			sub2_act_len -= sizeof(*post_tun_push_vlan);
931 		}
932 	}
933 
934 	/* Copy remaining actions from sub_flows 1 and 2. */
935 	memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
936 
937 	if (post_tun_push_vlan) {
938 		/* Update tunnel action in merge to include VLAN push. */
939 		err = nfp_fl_push_vlan_after_tun(merge_act, sub1_act_len,
940 						 post_tun_push_vlan);
941 		if (err)
942 			return err;
943 
944 		merge_flow->meta.act_len -= sizeof(*post_tun_push_vlan);
945 	}
946 
947 	merge_act += sub1_act_len;
948 	memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
949 
950 	return 0;
951 }
952 
953 /* Flow link code should only be accessed under RTNL. */
954 static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link)
955 {
956 	list_del(&link->merge_flow.list);
957 	list_del(&link->sub_flow.list);
958 	kfree(link);
959 }
960 
961 static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow,
962 				    struct nfp_fl_payload *sub_flow)
963 {
964 	struct nfp_fl_payload_link *link;
965 
966 	list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list)
967 		if (link->sub_flow.flow == sub_flow) {
968 			nfp_flower_unlink_flow(link);
969 			return;
970 		}
971 }
972 
973 static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow,
974 				 struct nfp_fl_payload *sub_flow)
975 {
976 	struct nfp_fl_payload_link *link;
977 
978 	link = kmalloc(sizeof(*link), GFP_KERNEL);
979 	if (!link)
980 		return -ENOMEM;
981 
982 	link->merge_flow.flow = merge_flow;
983 	list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows);
984 	link->sub_flow.flow = sub_flow;
985 	list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows);
986 
987 	return 0;
988 }
989 
990 /**
991  * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow.
992  * @app:	Pointer to the APP handle
993  * @sub_flow1:	Initial flow matched to produce merge hint
994  * @sub_flow2:	Post recirculation flow matched in merge hint
995  *
996  * Combines 2 flows (if valid) to a single flow, removing the initial from hw
997  * and offloading the new, merged flow.
998  *
999  * Return: negative value on error, 0 in success.
1000  */
1001 int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
1002 				     struct nfp_fl_payload *sub_flow1,
1003 				     struct nfp_fl_payload *sub_flow2)
1004 {
1005 	struct nfp_flower_priv *priv = app->priv;
1006 	struct nfp_fl_payload *merge_flow;
1007 	struct nfp_fl_key_ls merge_key_ls;
1008 	struct nfp_merge_info *merge_info;
1009 	u64 parent_ctx = 0;
1010 	int err;
1011 
1012 	ASSERT_RTNL();
1013 
1014 	if (sub_flow1 == sub_flow2 ||
1015 	    nfp_flower_is_merge_flow(sub_flow1) ||
1016 	    nfp_flower_is_merge_flow(sub_flow2))
1017 		return -EINVAL;
1018 
1019 	/* check if the two flows are already merged */
1020 	parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32;
1021 	parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id));
1022 	if (rhashtable_lookup_fast(&priv->merge_table,
1023 				   &parent_ctx, merge_table_params)) {
1024 		nfp_flower_cmsg_warn(app, "The two flows are already merged.\n");
1025 		return 0;
1026 	}
1027 
1028 	err = nfp_flower_can_merge(sub_flow1, sub_flow2);
1029 	if (err)
1030 		return err;
1031 
1032 	merge_key_ls.key_size = sub_flow1->meta.key_len;
1033 
1034 	merge_flow = nfp_flower_allocate_new(&merge_key_ls);
1035 	if (!merge_flow)
1036 		return -ENOMEM;
1037 
1038 	merge_flow->tc_flower_cookie = (unsigned long)merge_flow;
1039 	merge_flow->ingress_dev = sub_flow1->ingress_dev;
1040 
1041 	memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data,
1042 	       sub_flow1->meta.key_len);
1043 	memcpy(merge_flow->mask_data, sub_flow1->mask_data,
1044 	       sub_flow1->meta.mask_len);
1045 
1046 	err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow);
1047 	if (err)
1048 		goto err_destroy_merge_flow;
1049 
1050 	err = nfp_flower_link_flows(merge_flow, sub_flow1);
1051 	if (err)
1052 		goto err_destroy_merge_flow;
1053 
1054 	err = nfp_flower_link_flows(merge_flow, sub_flow2);
1055 	if (err)
1056 		goto err_unlink_sub_flow1;
1057 
1058 	err = nfp_compile_flow_metadata(app, merge_flow->tc_flower_cookie, merge_flow,
1059 					merge_flow->ingress_dev, NULL);
1060 	if (err)
1061 		goto err_unlink_sub_flow2;
1062 
1063 	err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node,
1064 				     nfp_flower_table_params);
1065 	if (err)
1066 		goto err_release_metadata;
1067 
1068 	merge_info = kmalloc(sizeof(*merge_info), GFP_KERNEL);
1069 	if (!merge_info) {
1070 		err = -ENOMEM;
1071 		goto err_remove_rhash;
1072 	}
1073 	merge_info->parent_ctx = parent_ctx;
1074 	err = rhashtable_insert_fast(&priv->merge_table, &merge_info->ht_node,
1075 				     merge_table_params);
1076 	if (err)
1077 		goto err_destroy_merge_info;
1078 
1079 	err = nfp_flower_xmit_flow(app, merge_flow,
1080 				   NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
1081 	if (err)
1082 		goto err_remove_merge_info;
1083 
1084 	merge_flow->in_hw = true;
1085 	sub_flow1->in_hw = false;
1086 
1087 	return 0;
1088 
1089 err_remove_merge_info:
1090 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
1091 					    &merge_info->ht_node,
1092 					    merge_table_params));
1093 err_destroy_merge_info:
1094 	kfree(merge_info);
1095 err_remove_rhash:
1096 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1097 					    &merge_flow->fl_node,
1098 					    nfp_flower_table_params));
1099 err_release_metadata:
1100 	nfp_modify_flow_metadata(app, merge_flow);
1101 err_unlink_sub_flow2:
1102 	nfp_flower_unlink_flows(merge_flow, sub_flow2);
1103 err_unlink_sub_flow1:
1104 	nfp_flower_unlink_flows(merge_flow, sub_flow1);
1105 err_destroy_merge_flow:
1106 	kfree(merge_flow->action_data);
1107 	kfree(merge_flow->mask_data);
1108 	kfree(merge_flow->unmasked_data);
1109 	kfree(merge_flow);
1110 	return err;
1111 }
1112 
1113 /**
1114  * nfp_flower_validate_pre_tun_rule()
1115  * @app:	Pointer to the APP handle
1116  * @flow:	Pointer to NFP flow representation of rule
1117  * @key_ls:	Pointer to NFP key layers structure
1118  * @extack:	Netlink extended ACK report
1119  *
1120  * Verifies the flow as a pre-tunnel rule.
1121  *
1122  * Return: negative value on error, 0 if verified.
1123  */
1124 static int
1125 nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
1126 				 struct nfp_fl_payload *flow,
1127 				 struct nfp_fl_key_ls *key_ls,
1128 				 struct netlink_ext_ack *extack)
1129 {
1130 	struct nfp_flower_priv *priv = app->priv;
1131 	struct nfp_flower_meta_tci *meta_tci;
1132 	struct nfp_flower_mac_mpls *mac;
1133 	u8 *ext = flow->unmasked_data;
1134 	struct nfp_fl_act_head *act;
1135 	u8 *mask = flow->mask_data;
1136 	bool vlan = false;
1137 	int act_offset;
1138 	u8 key_layer;
1139 
1140 	meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data;
1141 	key_layer = key_ls->key_layer;
1142 	if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
1143 		if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) {
1144 			u16 vlan_tci = be16_to_cpu(meta_tci->tci);
1145 
1146 			vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
1147 			flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
1148 			vlan = true;
1149 		} else {
1150 			flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
1151 		}
1152 	}
1153 
1154 	if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) {
1155 		NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields");
1156 		return -EOPNOTSUPP;
1157 	} else if (key_ls->key_layer_two & ~NFP_FLOWER_LAYER2_QINQ) {
1158 		NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non-vlan in extended match fields");
1159 		return -EOPNOTSUPP;
1160 	}
1161 
1162 	if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
1163 		NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MAC fields match required");
1164 		return -EOPNOTSUPP;
1165 	}
1166 
1167 	if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
1168 	    !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
1169 		NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on ipv4/ipv6 eth_type must be present");
1170 		return -EOPNOTSUPP;
1171 	}
1172 
1173 	/* Skip fields known to exist. */
1174 	mask += sizeof(struct nfp_flower_meta_tci);
1175 	ext += sizeof(struct nfp_flower_meta_tci);
1176 	if (key_ls->key_layer_two) {
1177 		mask += sizeof(struct nfp_flower_ext_meta);
1178 		ext += sizeof(struct nfp_flower_ext_meta);
1179 	}
1180 	mask += sizeof(struct nfp_flower_in_port);
1181 	ext += sizeof(struct nfp_flower_in_port);
1182 
1183 	/* Ensure destination MAC address matches pre_tun_dev. */
1184 	mac = (struct nfp_flower_mac_mpls *)ext;
1185 	if (memcmp(&mac->mac_dst[0], flow->pre_tun_rule.dev->dev_addr, 6)) {
1186 		NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC must match output dev MAC");
1187 		return -EOPNOTSUPP;
1188 	}
1189 
1190 	/* Ensure destination MAC address is fully matched. */
1191 	mac = (struct nfp_flower_mac_mpls *)mask;
1192 	if (!is_broadcast_ether_addr(&mac->mac_dst[0])) {
1193 		NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked");
1194 		return -EOPNOTSUPP;
1195 	}
1196 
1197 	if (mac->mpls_lse) {
1198 		NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MPLS not supported");
1199 		return -EOPNOTSUPP;
1200 	}
1201 
1202 	mask += sizeof(struct nfp_flower_mac_mpls);
1203 	ext += sizeof(struct nfp_flower_mac_mpls);
1204 	if (key_layer & NFP_FLOWER_LAYER_IPV4 ||
1205 	    key_layer & NFP_FLOWER_LAYER_IPV6) {
1206 		/* Flags and proto fields have same offset in IPv4 and IPv6. */
1207 		int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags);
1208 		int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto);
1209 		int size;
1210 		int i;
1211 
1212 		size = key_layer & NFP_FLOWER_LAYER_IPV4 ?
1213 			sizeof(struct nfp_flower_ipv4) :
1214 			sizeof(struct nfp_flower_ipv6);
1215 
1216 
1217 		/* Ensure proto and flags are the only IP layer fields. */
1218 		for (i = 0; i < size; i++)
1219 			if (mask[i] && i != ip_flags && i != ip_proto) {
1220 				NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header");
1221 				return -EOPNOTSUPP;
1222 			}
1223 		ext += size;
1224 		mask += size;
1225 	}
1226 
1227 	if ((priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
1228 		if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
1229 			struct nfp_flower_vlan *vlan_tags;
1230 			u16 vlan_tci;
1231 
1232 			vlan_tags = (struct nfp_flower_vlan *)ext;
1233 
1234 			vlan_tci = be16_to_cpu(vlan_tags->outer_tci);
1235 
1236 			vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
1237 			flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
1238 			vlan = true;
1239 		} else {
1240 			flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
1241 		}
1242 	}
1243 
1244 	/* Action must be a single egress or pop_vlan and egress. */
1245 	act_offset = 0;
1246 	act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
1247 	if (vlan) {
1248 		if (act->jump_id != NFP_FL_ACTION_OPCODE_POP_VLAN) {
1249 			NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first action");
1250 			return -EOPNOTSUPP;
1251 		}
1252 
1253 		act_offset += act->len_lw << NFP_FL_LW_SIZ;
1254 		act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
1255 	}
1256 
1257 	if (act->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) {
1258 		NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non egress action detected where egress was expected");
1259 		return -EOPNOTSUPP;
1260 	}
1261 
1262 	act_offset += act->len_lw << NFP_FL_LW_SIZ;
1263 
1264 	/* Ensure there are no more actions after egress. */
1265 	if (act_offset != flow->meta.act_len) {
1266 		NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: egress is not the last action");
1267 		return -EOPNOTSUPP;
1268 	}
1269 
1270 	return 0;
1271 }
1272 
1273 static bool offload_pre_check(struct flow_cls_offload *flow)
1274 {
1275 	struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
1276 	struct flow_dissector *dissector = rule->match.dissector;
1277 
1278 	if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT))
1279 		return false;
1280 
1281 	if (flow->common.chain_index)
1282 		return false;
1283 
1284 	return true;
1285 }
1286 
1287 /**
1288  * nfp_flower_add_offload() - Adds a new flow to hardware.
1289  * @app:	Pointer to the APP handle
1290  * @netdev:	netdev structure.
1291  * @flow:	TC flower classifier offload structure.
1292  *
1293  * Adds a new flow to the repeated hash structure and action payload.
1294  *
1295  * Return: negative value on error, 0 if configured successfully.
1296  */
1297 static int
1298 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
1299 		       struct flow_cls_offload *flow)
1300 {
1301 	struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
1302 	enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
1303 	struct nfp_flower_priv *priv = app->priv;
1304 	struct netlink_ext_ack *extack = NULL;
1305 	struct nfp_fl_payload *flow_pay;
1306 	struct nfp_fl_key_ls *key_layer;
1307 	struct nfp_port *port = NULL;
1308 	int err;
1309 
1310 	extack = flow->common.extack;
1311 	if (nfp_netdev_is_nfp_repr(netdev))
1312 		port = nfp_port_from_netdev(netdev);
1313 
1314 	if (is_pre_ct_flow(flow))
1315 		return nfp_fl_ct_handle_pre_ct(priv, netdev, flow, extack);
1316 
1317 	if (is_post_ct_flow(flow))
1318 		return nfp_fl_ct_handle_post_ct(priv, netdev, flow, extack);
1319 
1320 	if (!offload_pre_check(flow))
1321 		return -EOPNOTSUPP;
1322 
1323 	key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
1324 	if (!key_layer)
1325 		return -ENOMEM;
1326 
1327 	err = nfp_flower_calculate_key_layers(app, netdev, key_layer, rule,
1328 					      &tun_type, extack);
1329 	if (err)
1330 		goto err_free_key_ls;
1331 
1332 	flow_pay = nfp_flower_allocate_new(key_layer);
1333 	if (!flow_pay) {
1334 		err = -ENOMEM;
1335 		goto err_free_key_ls;
1336 	}
1337 
1338 	err = nfp_flower_compile_flow_match(app, rule, key_layer, netdev,
1339 					    flow_pay, tun_type, extack);
1340 	if (err)
1341 		goto err_destroy_flow;
1342 
1343 	err = nfp_flower_compile_action(app, rule, netdev, flow_pay, extack);
1344 	if (err)
1345 		goto err_destroy_flow;
1346 
1347 	if (flow_pay->pre_tun_rule.dev) {
1348 		err = nfp_flower_validate_pre_tun_rule(app, flow_pay, key_layer, extack);
1349 		if (err)
1350 			goto err_destroy_flow;
1351 	}
1352 
1353 	err = nfp_compile_flow_metadata(app, flow->cookie, flow_pay, netdev, extack);
1354 	if (err)
1355 		goto err_destroy_flow;
1356 
1357 	flow_pay->tc_flower_cookie = flow->cookie;
1358 	err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
1359 				     nfp_flower_table_params);
1360 	if (err) {
1361 		NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot insert flow into tables for offloads");
1362 		goto err_release_metadata;
1363 	}
1364 
1365 	if (flow_pay->pre_tun_rule.dev)
1366 		err = nfp_flower_xmit_pre_tun_flow(app, flow_pay);
1367 	else
1368 		err = nfp_flower_xmit_flow(app, flow_pay,
1369 					   NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
1370 	if (err)
1371 		goto err_remove_rhash;
1372 
1373 	if (port)
1374 		port->tc_offload_cnt++;
1375 
1376 	flow_pay->in_hw = true;
1377 
1378 	/* Deallocate flow payload when flower rule has been destroyed. */
1379 	kfree(key_layer);
1380 
1381 	return 0;
1382 
1383 err_remove_rhash:
1384 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1385 					    &flow_pay->fl_node,
1386 					    nfp_flower_table_params));
1387 err_release_metadata:
1388 	nfp_modify_flow_metadata(app, flow_pay);
1389 err_destroy_flow:
1390 	if (flow_pay->nfp_tun_ipv6)
1391 		nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6);
1392 	kfree(flow_pay->action_data);
1393 	kfree(flow_pay->mask_data);
1394 	kfree(flow_pay->unmasked_data);
1395 	kfree(flow_pay);
1396 err_free_key_ls:
1397 	kfree(key_layer);
1398 	return err;
1399 }
1400 
1401 static void
1402 nfp_flower_remove_merge_flow(struct nfp_app *app,
1403 			     struct nfp_fl_payload *del_sub_flow,
1404 			     struct nfp_fl_payload *merge_flow)
1405 {
1406 	struct nfp_flower_priv *priv = app->priv;
1407 	struct nfp_fl_payload_link *link, *temp;
1408 	struct nfp_merge_info *merge_info;
1409 	struct nfp_fl_payload *origin;
1410 	u64 parent_ctx = 0;
1411 	bool mod = false;
1412 	int err;
1413 
1414 	link = list_first_entry(&merge_flow->linked_flows,
1415 				struct nfp_fl_payload_link, merge_flow.list);
1416 	origin = link->sub_flow.flow;
1417 
1418 	/* Re-add rule the merge had overwritten if it has not been deleted. */
1419 	if (origin != del_sub_flow)
1420 		mod = true;
1421 
1422 	err = nfp_modify_flow_metadata(app, merge_flow);
1423 	if (err) {
1424 		nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n");
1425 		goto err_free_links;
1426 	}
1427 
1428 	if (!mod) {
1429 		err = nfp_flower_xmit_flow(app, merge_flow,
1430 					   NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1431 		if (err) {
1432 			nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n");
1433 			goto err_free_links;
1434 		}
1435 	} else {
1436 		__nfp_modify_flow_metadata(priv, origin);
1437 		err = nfp_flower_xmit_flow(app, origin,
1438 					   NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
1439 		if (err)
1440 			nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n");
1441 		origin->in_hw = true;
1442 	}
1443 
1444 err_free_links:
1445 	/* Clean any links connected with the merged flow. */
1446 	list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
1447 				 merge_flow.list) {
1448 		u32 ctx_id = be32_to_cpu(link->sub_flow.flow->meta.host_ctx_id);
1449 
1450 		parent_ctx = (parent_ctx << 32) | (u64)(ctx_id);
1451 		nfp_flower_unlink_flow(link);
1452 	}
1453 
1454 	merge_info = rhashtable_lookup_fast(&priv->merge_table,
1455 					    &parent_ctx,
1456 					    merge_table_params);
1457 	if (merge_info) {
1458 		WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
1459 						    &merge_info->ht_node,
1460 						    merge_table_params));
1461 		kfree(merge_info);
1462 	}
1463 
1464 	kfree(merge_flow->action_data);
1465 	kfree(merge_flow->mask_data);
1466 	kfree(merge_flow->unmasked_data);
1467 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1468 					    &merge_flow->fl_node,
1469 					    nfp_flower_table_params));
1470 	kfree_rcu(merge_flow, rcu);
1471 }
1472 
1473 void
1474 nfp_flower_del_linked_merge_flows(struct nfp_app *app,
1475 				  struct nfp_fl_payload *sub_flow)
1476 {
1477 	struct nfp_fl_payload_link *link, *temp;
1478 
1479 	/* Remove any merge flow formed from the deleted sub_flow. */
1480 	list_for_each_entry_safe(link, temp, &sub_flow->linked_flows,
1481 				 sub_flow.list)
1482 		nfp_flower_remove_merge_flow(app, sub_flow,
1483 					     link->merge_flow.flow);
1484 }
1485 
1486 /**
1487  * nfp_flower_del_offload() - Removes a flow from hardware.
1488  * @app:	Pointer to the APP handle
1489  * @netdev:	netdev structure.
1490  * @flow:	TC flower classifier offload structure
1491  *
1492  * Removes a flow from the repeated hash structure and clears the
1493  * action payload. Any flows merged from this are also deleted.
1494  *
1495  * Return: negative value on error, 0 if removed successfully.
1496  */
1497 static int
1498 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
1499 		       struct flow_cls_offload *flow)
1500 {
1501 	struct nfp_flower_priv *priv = app->priv;
1502 	struct nfp_fl_ct_map_entry *ct_map_ent;
1503 	struct netlink_ext_ack *extack = NULL;
1504 	struct nfp_fl_payload *nfp_flow;
1505 	struct nfp_port *port = NULL;
1506 	int err;
1507 
1508 	extack = flow->common.extack;
1509 	if (nfp_netdev_is_nfp_repr(netdev))
1510 		port = nfp_port_from_netdev(netdev);
1511 
1512 	/* Check ct_map_table */
1513 	ct_map_ent = rhashtable_lookup_fast(&priv->ct_map_table, &flow->cookie,
1514 					    nfp_ct_map_params);
1515 	if (ct_map_ent) {
1516 		err = nfp_fl_ct_del_flow(ct_map_ent);
1517 		return err;
1518 	}
1519 
1520 	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1521 	if (!nfp_flow) {
1522 		NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist");
1523 		return -ENOENT;
1524 	}
1525 
1526 	err = nfp_modify_flow_metadata(app, nfp_flow);
1527 	if (err)
1528 		goto err_free_merge_flow;
1529 
1530 	if (nfp_flow->nfp_tun_ipv4_addr)
1531 		nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
1532 
1533 	if (nfp_flow->nfp_tun_ipv6)
1534 		nfp_tunnel_put_ipv6_off(app, nfp_flow->nfp_tun_ipv6);
1535 
1536 	if (!nfp_flow->in_hw) {
1537 		err = 0;
1538 		goto err_free_merge_flow;
1539 	}
1540 
1541 	if (nfp_flow->pre_tun_rule.dev)
1542 		err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow);
1543 	else
1544 		err = nfp_flower_xmit_flow(app, nfp_flow,
1545 					   NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1546 	/* Fall through on error. */
1547 
1548 err_free_merge_flow:
1549 	nfp_flower_del_linked_merge_flows(app, nfp_flow);
1550 	if (port)
1551 		port->tc_offload_cnt--;
1552 	kfree(nfp_flow->action_data);
1553 	kfree(nfp_flow->mask_data);
1554 	kfree(nfp_flow->unmasked_data);
1555 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1556 					    &nfp_flow->fl_node,
1557 					    nfp_flower_table_params));
1558 	kfree_rcu(nfp_flow, rcu);
1559 	return err;
1560 }
1561 
1562 static void
1563 __nfp_flower_update_merge_stats(struct nfp_app *app,
1564 				struct nfp_fl_payload *merge_flow)
1565 {
1566 	struct nfp_flower_priv *priv = app->priv;
1567 	struct nfp_fl_payload_link *link;
1568 	struct nfp_fl_payload *sub_flow;
1569 	u64 pkts, bytes, used;
1570 	u32 ctx_id;
1571 
1572 	ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id);
1573 	pkts = priv->stats[ctx_id].pkts;
1574 	/* Do not cycle subflows if no stats to distribute. */
1575 	if (!pkts)
1576 		return;
1577 	bytes = priv->stats[ctx_id].bytes;
1578 	used = priv->stats[ctx_id].used;
1579 
1580 	/* Reset stats for the merge flow. */
1581 	priv->stats[ctx_id].pkts = 0;
1582 	priv->stats[ctx_id].bytes = 0;
1583 
1584 	/* The merge flow has received stats updates from firmware.
1585 	 * Distribute these stats to all subflows that form the merge.
1586 	 * The stats will collected from TC via the subflows.
1587 	 */
1588 	list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) {
1589 		sub_flow = link->sub_flow.flow;
1590 		ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
1591 		priv->stats[ctx_id].pkts += pkts;
1592 		priv->stats[ctx_id].bytes += bytes;
1593 		priv->stats[ctx_id].used = max_t(u64, used,
1594 						 priv->stats[ctx_id].used);
1595 	}
1596 }
1597 
1598 void
1599 nfp_flower_update_merge_stats(struct nfp_app *app,
1600 			      struct nfp_fl_payload *sub_flow)
1601 {
1602 	struct nfp_fl_payload_link *link;
1603 
1604 	/* Get merge flows that the subflow forms to distribute their stats. */
1605 	list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list)
1606 		__nfp_flower_update_merge_stats(app, link->merge_flow.flow);
1607 }
1608 
1609 /**
1610  * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
1611  * @app:	Pointer to the APP handle
1612  * @netdev:	Netdev structure.
1613  * @flow:	TC flower classifier offload structure
1614  *
1615  * Populates a flow statistics structure which which corresponds to a
1616  * specific flow.
1617  *
1618  * Return: negative value on error, 0 if stats populated successfully.
1619  */
1620 static int
1621 nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
1622 		     struct flow_cls_offload *flow)
1623 {
1624 	struct nfp_flower_priv *priv = app->priv;
1625 	struct nfp_fl_ct_map_entry *ct_map_ent;
1626 	struct netlink_ext_ack *extack = NULL;
1627 	struct nfp_fl_payload *nfp_flow;
1628 	u32 ctx_id;
1629 
1630 	/* Check ct_map table first */
1631 	ct_map_ent = rhashtable_lookup_fast(&priv->ct_map_table, &flow->cookie,
1632 					    nfp_ct_map_params);
1633 	if (ct_map_ent)
1634 		return nfp_fl_ct_stats(flow, ct_map_ent);
1635 
1636 	extack = flow->common.extack;
1637 	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1638 	if (!nfp_flow) {
1639 		NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot dump stats for flow that does not exist");
1640 		return -EINVAL;
1641 	}
1642 
1643 	ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
1644 
1645 	spin_lock_bh(&priv->stats_lock);
1646 	/* If request is for a sub_flow, update stats from merged flows. */
1647 	if (!list_empty(&nfp_flow->linked_flows))
1648 		nfp_flower_update_merge_stats(app, nfp_flow);
1649 
1650 	flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
1651 			  priv->stats[ctx_id].pkts, 0, priv->stats[ctx_id].used,
1652 			  FLOW_ACTION_HW_STATS_DELAYED);
1653 
1654 	priv->stats[ctx_id].pkts = 0;
1655 	priv->stats[ctx_id].bytes = 0;
1656 	spin_unlock_bh(&priv->stats_lock);
1657 
1658 	return 0;
1659 }
1660 
1661 static int
1662 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
1663 			struct flow_cls_offload *flower)
1664 {
1665 	if (!eth_proto_is_802_3(flower->common.protocol))
1666 		return -EOPNOTSUPP;
1667 
1668 	switch (flower->command) {
1669 	case FLOW_CLS_REPLACE:
1670 		return nfp_flower_add_offload(app, netdev, flower);
1671 	case FLOW_CLS_DESTROY:
1672 		return nfp_flower_del_offload(app, netdev, flower);
1673 	case FLOW_CLS_STATS:
1674 		return nfp_flower_get_stats(app, netdev, flower);
1675 	default:
1676 		return -EOPNOTSUPP;
1677 	}
1678 }
1679 
1680 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
1681 					void *type_data, void *cb_priv)
1682 {
1683 	struct flow_cls_common_offload *common = type_data;
1684 	struct nfp_repr *repr = cb_priv;
1685 
1686 	if (!tc_can_offload_extack(repr->netdev, common->extack))
1687 		return -EOPNOTSUPP;
1688 
1689 	switch (type) {
1690 	case TC_SETUP_CLSFLOWER:
1691 		return nfp_flower_repr_offload(repr->app, repr->netdev,
1692 					       type_data);
1693 	case TC_SETUP_CLSMATCHALL:
1694 		return nfp_flower_setup_qos_offload(repr->app, repr->netdev,
1695 						    type_data);
1696 	default:
1697 		return -EOPNOTSUPP;
1698 	}
1699 }
1700 
1701 static LIST_HEAD(nfp_block_cb_list);
1702 
1703 static int nfp_flower_setup_tc_block(struct net_device *netdev,
1704 				     struct flow_block_offload *f)
1705 {
1706 	struct nfp_repr *repr = netdev_priv(netdev);
1707 	struct nfp_flower_repr_priv *repr_priv;
1708 	struct flow_block_cb *block_cb;
1709 
1710 	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1711 		return -EOPNOTSUPP;
1712 
1713 	repr_priv = repr->app_priv;
1714 	repr_priv->block_shared = f->block_shared;
1715 	f->driver_block_list = &nfp_block_cb_list;
1716 
1717 	switch (f->command) {
1718 	case FLOW_BLOCK_BIND:
1719 		if (flow_block_cb_is_busy(nfp_flower_setup_tc_block_cb, repr,
1720 					  &nfp_block_cb_list))
1721 			return -EBUSY;
1722 
1723 		block_cb = flow_block_cb_alloc(nfp_flower_setup_tc_block_cb,
1724 					       repr, repr, NULL);
1725 		if (IS_ERR(block_cb))
1726 			return PTR_ERR(block_cb);
1727 
1728 		flow_block_cb_add(block_cb, f);
1729 		list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
1730 		return 0;
1731 	case FLOW_BLOCK_UNBIND:
1732 		block_cb = flow_block_cb_lookup(f->block,
1733 						nfp_flower_setup_tc_block_cb,
1734 						repr);
1735 		if (!block_cb)
1736 			return -ENOENT;
1737 
1738 		flow_block_cb_remove(block_cb, f);
1739 		list_del(&block_cb->driver_list);
1740 		return 0;
1741 	default:
1742 		return -EOPNOTSUPP;
1743 	}
1744 }
1745 
1746 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
1747 			enum tc_setup_type type, void *type_data)
1748 {
1749 	switch (type) {
1750 	case TC_SETUP_BLOCK:
1751 		return nfp_flower_setup_tc_block(netdev, type_data);
1752 	default:
1753 		return -EOPNOTSUPP;
1754 	}
1755 }
1756 
1757 struct nfp_flower_indr_block_cb_priv {
1758 	struct net_device *netdev;
1759 	struct nfp_app *app;
1760 	struct list_head list;
1761 };
1762 
1763 static struct nfp_flower_indr_block_cb_priv *
1764 nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
1765 				     struct net_device *netdev)
1766 {
1767 	struct nfp_flower_indr_block_cb_priv *cb_priv;
1768 	struct nfp_flower_priv *priv = app->priv;
1769 
1770 	/* All callback list access should be protected by RTNL. */
1771 	ASSERT_RTNL();
1772 
1773 	list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
1774 		if (cb_priv->netdev == netdev)
1775 			return cb_priv;
1776 
1777 	return NULL;
1778 }
1779 
1780 static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
1781 					  void *type_data, void *cb_priv)
1782 {
1783 	struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
1784 
1785 	switch (type) {
1786 	case TC_SETUP_CLSFLOWER:
1787 		return nfp_flower_repr_offload(priv->app, priv->netdev,
1788 					       type_data);
1789 	default:
1790 		return -EOPNOTSUPP;
1791 	}
1792 }
1793 
1794 void nfp_flower_setup_indr_tc_release(void *cb_priv)
1795 {
1796 	struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
1797 
1798 	list_del(&priv->list);
1799 	kfree(priv);
1800 }
1801 
1802 static int
1803 nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, struct nfp_app *app,
1804 			       struct flow_block_offload *f, void *data,
1805 			       void (*cleanup)(struct flow_block_cb *block_cb))
1806 {
1807 	struct nfp_flower_indr_block_cb_priv *cb_priv;
1808 	struct nfp_flower_priv *priv = app->priv;
1809 	struct flow_block_cb *block_cb;
1810 
1811 	if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1812 	     !nfp_flower_internal_port_can_offload(app, netdev)) ||
1813 	    (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
1814 	     nfp_flower_internal_port_can_offload(app, netdev)))
1815 		return -EOPNOTSUPP;
1816 
1817 	switch (f->command) {
1818 	case FLOW_BLOCK_BIND:
1819 		cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1820 		if (cb_priv &&
1821 		    flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb,
1822 					  cb_priv,
1823 					  &nfp_block_cb_list))
1824 			return -EBUSY;
1825 
1826 		cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
1827 		if (!cb_priv)
1828 			return -ENOMEM;
1829 
1830 		cb_priv->netdev = netdev;
1831 		cb_priv->app = app;
1832 		list_add(&cb_priv->list, &priv->indr_block_cb_priv);
1833 
1834 		block_cb = flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb,
1835 						    cb_priv, cb_priv,
1836 						    nfp_flower_setup_indr_tc_release,
1837 						    f, netdev, sch, data, app, cleanup);
1838 		if (IS_ERR(block_cb)) {
1839 			list_del(&cb_priv->list);
1840 			kfree(cb_priv);
1841 			return PTR_ERR(block_cb);
1842 		}
1843 
1844 		flow_block_cb_add(block_cb, f);
1845 		list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
1846 		return 0;
1847 	case FLOW_BLOCK_UNBIND:
1848 		cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1849 		if (!cb_priv)
1850 			return -ENOENT;
1851 
1852 		block_cb = flow_block_cb_lookup(f->block,
1853 						nfp_flower_setup_indr_block_cb,
1854 						cb_priv);
1855 		if (!block_cb)
1856 			return -ENOENT;
1857 
1858 		flow_indr_block_cb_remove(block_cb, f);
1859 		list_del(&block_cb->driver_list);
1860 		return 0;
1861 	default:
1862 		return -EOPNOTSUPP;
1863 	}
1864 	return 0;
1865 }
1866 
1867 int
1868 nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
1869 			    enum tc_setup_type type, void *type_data,
1870 			    void *data,
1871 			    void (*cleanup)(struct flow_block_cb *block_cb))
1872 {
1873 	if (!nfp_fl_is_netdev_to_offload(netdev))
1874 		return -EOPNOTSUPP;
1875 
1876 	switch (type) {
1877 	case TC_SETUP_BLOCK:
1878 		return nfp_flower_setup_indr_tc_block(netdev, sch, cb_priv,
1879 						      type_data, data, cleanup);
1880 	default:
1881 		return -EOPNOTSUPP;
1882 	}
1883 }
1884