1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/skbuff.h>
5 #include <net/devlink.h>
6 #include <net/pkt_cls.h>
7 
8 #include "cmsg.h"
9 #include "main.h"
10 #include "../nfpcore/nfp_cpp.h"
11 #include "../nfpcore/nfp_nsp.h"
12 #include "../nfp_app.h"
13 #include "../nfp_main.h"
14 #include "../nfp_net.h"
15 #include "../nfp_port.h"
16 
17 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \
18 	(TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
19 	 TCPHDR_PSH | TCPHDR_URG)
20 
21 #define NFP_FLOWER_SUPPORTED_CTLFLAGS \
22 	(FLOW_DIS_IS_FRAGMENT | \
23 	 FLOW_DIS_FIRST_FRAG)
24 
25 #define NFP_FLOWER_WHITELIST_DISSECTOR \
26 	(BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
27 	 BIT(FLOW_DISSECTOR_KEY_BASIC) | \
28 	 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
29 	 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
30 	 BIT(FLOW_DISSECTOR_KEY_TCP) | \
31 	 BIT(FLOW_DISSECTOR_KEY_PORTS) | \
32 	 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
33 	 BIT(FLOW_DISSECTOR_KEY_VLAN) | \
34 	 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
35 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
36 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
37 	 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
38 	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
39 	 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
40 	 BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
41 	 BIT(FLOW_DISSECTOR_KEY_MPLS) | \
42 	 BIT(FLOW_DISSECTOR_KEY_IP))
43 
44 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
45 	(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
46 	 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
47 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
48 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
49 	 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
50 	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
51 	 BIT(FLOW_DISSECTOR_KEY_ENC_IP))
52 
53 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
54 	(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
55 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
56 	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
57 
58 #define NFP_FLOWER_MERGE_FIELDS \
59 	(NFP_FLOWER_LAYER_PORT | \
60 	 NFP_FLOWER_LAYER_MAC | \
61 	 NFP_FLOWER_LAYER_TP | \
62 	 NFP_FLOWER_LAYER_IPV4 | \
63 	 NFP_FLOWER_LAYER_IPV6)
64 
65 struct nfp_flower_merge_check {
66 	union {
67 		struct {
68 			__be16 tci;
69 			struct nfp_flower_mac_mpls l2;
70 			struct nfp_flower_tp_ports l4;
71 			union {
72 				struct nfp_flower_ipv4 ipv4;
73 				struct nfp_flower_ipv6 ipv6;
74 			};
75 		};
76 		unsigned long vals[8];
77 	};
78 };
79 
80 static int
81 nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
82 		     u8 mtype)
83 {
84 	u32 meta_len, key_len, mask_len, act_len, tot_len;
85 	struct sk_buff *skb;
86 	unsigned char *msg;
87 
88 	meta_len =  sizeof(struct nfp_fl_rule_metadata);
89 	key_len = nfp_flow->meta.key_len;
90 	mask_len = nfp_flow->meta.mask_len;
91 	act_len = nfp_flow->meta.act_len;
92 
93 	tot_len = meta_len + key_len + mask_len + act_len;
94 
95 	/* Convert to long words as firmware expects
96 	 * lengths in units of NFP_FL_LW_SIZ.
97 	 */
98 	nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
99 	nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
100 	nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
101 
102 	skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
103 	if (!skb)
104 		return -ENOMEM;
105 
106 	msg = nfp_flower_cmsg_get_data(skb);
107 	memcpy(msg, &nfp_flow->meta, meta_len);
108 	memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
109 	memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
110 	memcpy(&msg[meta_len + key_len + mask_len],
111 	       nfp_flow->action_data, act_len);
112 
113 	/* Convert back to bytes as software expects
114 	 * lengths in units of bytes.
115 	 */
116 	nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
117 	nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
118 	nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
119 
120 	nfp_ctrl_tx(app->ctrl, skb);
121 
122 	return 0;
123 }
124 
125 static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
126 {
127 	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
128 
129 	return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
130 	       flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
131 	       flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
132 	       flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
133 }
134 
135 static int
136 nfp_flower_calc_opt_layer(struct flow_match_enc_opts *enc_opts,
137 			  u32 *key_layer_two, int *key_size)
138 {
139 	if (enc_opts->key->len > NFP_FL_MAX_GENEVE_OPT_KEY)
140 		return -EOPNOTSUPP;
141 
142 	if (enc_opts->key->len > 0) {
143 		*key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
144 		*key_size += sizeof(struct nfp_flower_geneve_options);
145 	}
146 
147 	return 0;
148 }
149 
150 static int
151 nfp_flower_calculate_key_layers(struct nfp_app *app,
152 				struct net_device *netdev,
153 				struct nfp_fl_key_ls *ret_key_ls,
154 				struct tc_cls_flower_offload *flow,
155 				enum nfp_flower_tun_type *tun_type)
156 {
157 	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
158 	struct flow_dissector *dissector = rule->match.dissector;
159 	struct flow_match_basic basic = { NULL, NULL};
160 	struct nfp_flower_priv *priv = app->priv;
161 	u32 key_layer_two;
162 	u8 key_layer;
163 	int key_size;
164 	int err;
165 
166 	if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
167 		return -EOPNOTSUPP;
168 
169 	/* If any tun dissector is used then the required set must be used. */
170 	if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
171 	    (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
172 	    != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
173 		return -EOPNOTSUPP;
174 
175 	key_layer_two = 0;
176 	key_layer = NFP_FLOWER_LAYER_PORT;
177 	key_size = sizeof(struct nfp_flower_meta_tci) +
178 		   sizeof(struct nfp_flower_in_port);
179 
180 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
181 	    flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
182 		key_layer |= NFP_FLOWER_LAYER_MAC;
183 		key_size += sizeof(struct nfp_flower_mac_mpls);
184 	}
185 
186 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
187 		struct flow_match_vlan vlan;
188 
189 		flow_rule_match_vlan(rule, &vlan);
190 		if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
191 		    vlan.key->vlan_priority)
192 			return -EOPNOTSUPP;
193 	}
194 
195 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
196 		struct flow_match_enc_opts enc_op = { NULL, NULL };
197 		struct flow_match_ipv4_addrs ipv4_addrs;
198 		struct flow_match_control enc_ctl;
199 		struct flow_match_ports enc_ports;
200 
201 		flow_rule_match_enc_control(rule, &enc_ctl);
202 
203 		if (enc_ctl.mask->addr_type != 0xffff ||
204 		    enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
205 			return -EOPNOTSUPP;
206 
207 		/* These fields are already verified as used. */
208 		flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
209 		if (ipv4_addrs.mask->dst != cpu_to_be32(~0))
210 			return -EOPNOTSUPP;
211 
212 		flow_rule_match_enc_ports(rule, &enc_ports);
213 		if (enc_ports.mask->dst != cpu_to_be16(~0))
214 			return -EOPNOTSUPP;
215 
216 		if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
217 			flow_rule_match_enc_opts(rule, &enc_op);
218 
219 		switch (enc_ports.key->dst) {
220 		case htons(IANA_VXLAN_UDP_PORT):
221 			*tun_type = NFP_FL_TUNNEL_VXLAN;
222 			key_layer |= NFP_FLOWER_LAYER_VXLAN;
223 			key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
224 
225 			if (enc_op.key)
226 				return -EOPNOTSUPP;
227 			break;
228 		case htons(GENEVE_UDP_PORT):
229 			if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
230 				return -EOPNOTSUPP;
231 			*tun_type = NFP_FL_TUNNEL_GENEVE;
232 			key_layer |= NFP_FLOWER_LAYER_EXT_META;
233 			key_size += sizeof(struct nfp_flower_ext_meta);
234 			key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
235 			key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
236 
237 			if (!enc_op.key)
238 				break;
239 			if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))
240 				return -EOPNOTSUPP;
241 			err = nfp_flower_calc_opt_layer(&enc_op, &key_layer_two,
242 							&key_size);
243 			if (err)
244 				return err;
245 			break;
246 		default:
247 			return -EOPNOTSUPP;
248 		}
249 
250 		/* Ensure the ingress netdev matches the expected tun type. */
251 		if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type))
252 			return -EOPNOTSUPP;
253 	}
254 
255 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC))
256 		flow_rule_match_basic(rule, &basic);
257 
258 	if (basic.mask && basic.mask->n_proto) {
259 		/* Ethernet type is present in the key. */
260 		switch (basic.key->n_proto) {
261 		case cpu_to_be16(ETH_P_IP):
262 			key_layer |= NFP_FLOWER_LAYER_IPV4;
263 			key_size += sizeof(struct nfp_flower_ipv4);
264 			break;
265 
266 		case cpu_to_be16(ETH_P_IPV6):
267 			key_layer |= NFP_FLOWER_LAYER_IPV6;
268 			key_size += sizeof(struct nfp_flower_ipv6);
269 			break;
270 
271 		/* Currently we do not offload ARP
272 		 * because we rely on it to get to the host.
273 		 */
274 		case cpu_to_be16(ETH_P_ARP):
275 			return -EOPNOTSUPP;
276 
277 		case cpu_to_be16(ETH_P_MPLS_UC):
278 		case cpu_to_be16(ETH_P_MPLS_MC):
279 			if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
280 				key_layer |= NFP_FLOWER_LAYER_MAC;
281 				key_size += sizeof(struct nfp_flower_mac_mpls);
282 			}
283 			break;
284 
285 		/* Will be included in layer 2. */
286 		case cpu_to_be16(ETH_P_8021Q):
287 			break;
288 
289 		default:
290 			/* Other ethtype - we need check the masks for the
291 			 * remainder of the key to ensure we can offload.
292 			 */
293 			if (nfp_flower_check_higher_than_mac(flow))
294 				return -EOPNOTSUPP;
295 			break;
296 		}
297 	}
298 
299 	if (basic.mask && basic.mask->ip_proto) {
300 		/* Ethernet type is present in the key. */
301 		switch (basic.key->ip_proto) {
302 		case IPPROTO_TCP:
303 		case IPPROTO_UDP:
304 		case IPPROTO_SCTP:
305 		case IPPROTO_ICMP:
306 		case IPPROTO_ICMPV6:
307 			key_layer |= NFP_FLOWER_LAYER_TP;
308 			key_size += sizeof(struct nfp_flower_tp_ports);
309 			break;
310 		default:
311 			/* Other ip proto - we need check the masks for the
312 			 * remainder of the key to ensure we can offload.
313 			 */
314 			return -EOPNOTSUPP;
315 		}
316 	}
317 
318 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
319 		struct flow_match_tcp tcp;
320 		u32 tcp_flags;
321 
322 		flow_rule_match_tcp(rule, &tcp);
323 		tcp_flags = be16_to_cpu(tcp.key->flags);
324 
325 		if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS)
326 			return -EOPNOTSUPP;
327 
328 		/* We only support PSH and URG flags when either
329 		 * FIN, SYN or RST is present as well.
330 		 */
331 		if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
332 		    !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
333 			return -EOPNOTSUPP;
334 
335 		/* We need to store TCP flags in the either the IPv4 or IPv6 key
336 		 * space, thus we need to ensure we include a IPv4/IPv6 key
337 		 * layer if we have not done so already.
338 		 */
339 		if (!basic.key)
340 			return -EOPNOTSUPP;
341 
342 		if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
343 		    !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
344 			switch (basic.key->n_proto) {
345 			case cpu_to_be16(ETH_P_IP):
346 				key_layer |= NFP_FLOWER_LAYER_IPV4;
347 				key_size += sizeof(struct nfp_flower_ipv4);
348 				break;
349 
350 			case cpu_to_be16(ETH_P_IPV6):
351 					key_layer |= NFP_FLOWER_LAYER_IPV6;
352 				key_size += sizeof(struct nfp_flower_ipv6);
353 				break;
354 
355 			default:
356 				return -EOPNOTSUPP;
357 			}
358 		}
359 	}
360 
361 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
362 		struct flow_match_control ctl;
363 
364 		flow_rule_match_control(rule, &ctl);
365 		if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS)
366 			return -EOPNOTSUPP;
367 	}
368 
369 	ret_key_ls->key_layer = key_layer;
370 	ret_key_ls->key_layer_two = key_layer_two;
371 	ret_key_ls->key_size = key_size;
372 
373 	return 0;
374 }
375 
376 static struct nfp_fl_payload *
377 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
378 {
379 	struct nfp_fl_payload *flow_pay;
380 
381 	flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
382 	if (!flow_pay)
383 		return NULL;
384 
385 	flow_pay->meta.key_len = key_layer->key_size;
386 	flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
387 	if (!flow_pay->unmasked_data)
388 		goto err_free_flow;
389 
390 	flow_pay->meta.mask_len = key_layer->key_size;
391 	flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
392 	if (!flow_pay->mask_data)
393 		goto err_free_unmasked;
394 
395 	flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
396 	if (!flow_pay->action_data)
397 		goto err_free_mask;
398 
399 	flow_pay->nfp_tun_ipv4_addr = 0;
400 	flow_pay->meta.flags = 0;
401 	INIT_LIST_HEAD(&flow_pay->linked_flows);
402 	flow_pay->in_hw = false;
403 
404 	return flow_pay;
405 
406 err_free_mask:
407 	kfree(flow_pay->mask_data);
408 err_free_unmasked:
409 	kfree(flow_pay->unmasked_data);
410 err_free_flow:
411 	kfree(flow_pay);
412 	return NULL;
413 }
414 
415 static int
416 nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
417 				     struct nfp_flower_merge_check *merge,
418 				     u8 *last_act_id, int *act_out)
419 {
420 	struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl;
421 	struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos;
422 	struct nfp_fl_set_ip4_addrs *ipv4_add;
423 	struct nfp_fl_set_ipv6_addr *ipv6_add;
424 	struct nfp_fl_push_vlan *push_vlan;
425 	struct nfp_fl_set_tport *tport;
426 	struct nfp_fl_set_eth *eth;
427 	struct nfp_fl_act_head *a;
428 	unsigned int act_off = 0;
429 	u8 act_id = 0;
430 	u8 *ports;
431 	int i;
432 
433 	while (act_off < flow->meta.act_len) {
434 		a = (struct nfp_fl_act_head *)&flow->action_data[act_off];
435 		act_id = a->jump_id;
436 
437 		switch (act_id) {
438 		case NFP_FL_ACTION_OPCODE_OUTPUT:
439 			if (act_out)
440 				(*act_out)++;
441 			break;
442 		case NFP_FL_ACTION_OPCODE_PUSH_VLAN:
443 			push_vlan = (struct nfp_fl_push_vlan *)a;
444 			if (push_vlan->vlan_tci)
445 				merge->tci = cpu_to_be16(0xffff);
446 			break;
447 		case NFP_FL_ACTION_OPCODE_POP_VLAN:
448 			merge->tci = cpu_to_be16(0);
449 			break;
450 		case NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL:
451 			/* New tunnel header means l2 to l4 can be matched. */
452 			eth_broadcast_addr(&merge->l2.mac_dst[0]);
453 			eth_broadcast_addr(&merge->l2.mac_src[0]);
454 			memset(&merge->l4, 0xff,
455 			       sizeof(struct nfp_flower_tp_ports));
456 			memset(&merge->ipv4, 0xff,
457 			       sizeof(struct nfp_flower_ipv4));
458 			break;
459 		case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
460 			eth = (struct nfp_fl_set_eth *)a;
461 			for (i = 0; i < ETH_ALEN; i++)
462 				merge->l2.mac_dst[i] |= eth->eth_addr_mask[i];
463 			for (i = 0; i < ETH_ALEN; i++)
464 				merge->l2.mac_src[i] |=
465 					eth->eth_addr_mask[ETH_ALEN + i];
466 			break;
467 		case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS:
468 			ipv4_add = (struct nfp_fl_set_ip4_addrs *)a;
469 			merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask;
470 			merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask;
471 			break;
472 		case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS:
473 			ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a;
474 			merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask;
475 			merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask;
476 			break;
477 		case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC:
478 			ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
479 			for (i = 0; i < 4; i++)
480 				merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |=
481 					ipv6_add->ipv6[i].mask;
482 			break;
483 		case NFP_FL_ACTION_OPCODE_SET_IPV6_DST:
484 			ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
485 			for (i = 0; i < 4; i++)
486 				merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |=
487 					ipv6_add->ipv6[i].mask;
488 			break;
489 		case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL:
490 			ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a;
491 			merge->ipv6.ip_ext.ttl |=
492 				ipv6_tc_hl_fl->ipv6_hop_limit_mask;
493 			merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask;
494 			merge->ipv6.ipv6_flow_label_exthdr |=
495 				ipv6_tc_hl_fl->ipv6_label_mask;
496 			break;
497 		case NFP_FL_ACTION_OPCODE_SET_UDP:
498 		case NFP_FL_ACTION_OPCODE_SET_TCP:
499 			tport = (struct nfp_fl_set_tport *)a;
500 			ports = (u8 *)&merge->l4.port_src;
501 			for (i = 0; i < 4; i++)
502 				ports[i] |= tport->tp_port_mask[i];
503 			break;
504 		case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
505 		case NFP_FL_ACTION_OPCODE_PRE_LAG:
506 		case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
507 			break;
508 		default:
509 			return -EOPNOTSUPP;
510 		}
511 
512 		act_off += a->len_lw << NFP_FL_LW_SIZ;
513 	}
514 
515 	if (last_act_id)
516 		*last_act_id = act_id;
517 
518 	return 0;
519 }
520 
521 static int
522 nfp_flower_populate_merge_match(struct nfp_fl_payload *flow,
523 				struct nfp_flower_merge_check *merge,
524 				bool extra_fields)
525 {
526 	struct nfp_flower_meta_tci *meta_tci;
527 	u8 *mask = flow->mask_data;
528 	u8 key_layer, match_size;
529 
530 	memset(merge, 0, sizeof(struct nfp_flower_merge_check));
531 
532 	meta_tci = (struct nfp_flower_meta_tci *)mask;
533 	key_layer = meta_tci->nfp_flow_key_layer;
534 
535 	if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields)
536 		return -EOPNOTSUPP;
537 
538 	merge->tci = meta_tci->tci;
539 	mask += sizeof(struct nfp_flower_meta_tci);
540 
541 	if (key_layer & NFP_FLOWER_LAYER_EXT_META)
542 		mask += sizeof(struct nfp_flower_ext_meta);
543 
544 	mask += sizeof(struct nfp_flower_in_port);
545 
546 	if (key_layer & NFP_FLOWER_LAYER_MAC) {
547 		match_size = sizeof(struct nfp_flower_mac_mpls);
548 		memcpy(&merge->l2, mask, match_size);
549 		mask += match_size;
550 	}
551 
552 	if (key_layer & NFP_FLOWER_LAYER_TP) {
553 		match_size = sizeof(struct nfp_flower_tp_ports);
554 		memcpy(&merge->l4, mask, match_size);
555 		mask += match_size;
556 	}
557 
558 	if (key_layer & NFP_FLOWER_LAYER_IPV4) {
559 		match_size = sizeof(struct nfp_flower_ipv4);
560 		memcpy(&merge->ipv4, mask, match_size);
561 	}
562 
563 	if (key_layer & NFP_FLOWER_LAYER_IPV6) {
564 		match_size = sizeof(struct nfp_flower_ipv6);
565 		memcpy(&merge->ipv6, mask, match_size);
566 	}
567 
568 	return 0;
569 }
570 
571 static int
572 nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1,
573 		     struct nfp_fl_payload *sub_flow2)
574 {
575 	/* Two flows can be merged if sub_flow2 only matches on bits that are
576 	 * either matched by sub_flow1 or set by a sub_flow1 action. This
577 	 * ensures that every packet that hits sub_flow1 and recirculates is
578 	 * guaranteed to hit sub_flow2.
579 	 */
580 	struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge;
581 	int err, act_out = 0;
582 	u8 last_act_id = 0;
583 
584 	err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge,
585 					      true);
586 	if (err)
587 		return err;
588 
589 	err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge,
590 					      false);
591 	if (err)
592 		return err;
593 
594 	err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge,
595 						   &last_act_id, &act_out);
596 	if (err)
597 		return err;
598 
599 	/* Must only be 1 output action and it must be the last in sequence. */
600 	if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT)
601 		return -EOPNOTSUPP;
602 
603 	/* Reject merge if sub_flow2 matches on something that is not matched
604 	 * on or set in an action by sub_flow1.
605 	 */
606 	err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals,
607 			    sub_flow1_merge.vals,
608 			    sizeof(struct nfp_flower_merge_check) * 8);
609 	if (err)
610 		return -EINVAL;
611 
612 	return 0;
613 }
614 
615 static unsigned int
616 nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
617 			    bool *tunnel_act)
618 {
619 	unsigned int act_off = 0, act_len;
620 	struct nfp_fl_act_head *a;
621 	u8 act_id = 0;
622 
623 	while (act_off < len) {
624 		a = (struct nfp_fl_act_head *)&act_src[act_off];
625 		act_len = a->len_lw << NFP_FL_LW_SIZ;
626 		act_id = a->jump_id;
627 
628 		switch (act_id) {
629 		case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
630 			if (tunnel_act)
631 				*tunnel_act = true;
632 			/* fall through */
633 		case NFP_FL_ACTION_OPCODE_PRE_LAG:
634 			memcpy(act_dst + act_off, act_src + act_off, act_len);
635 			break;
636 		default:
637 			return act_off;
638 		}
639 
640 		act_off += act_len;
641 	}
642 
643 	return act_off;
644 }
645 
646 static int nfp_fl_verify_post_tun_acts(char *acts, int len)
647 {
648 	struct nfp_fl_act_head *a;
649 	unsigned int act_off = 0;
650 
651 	while (act_off < len) {
652 		a = (struct nfp_fl_act_head *)&acts[act_off];
653 		if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
654 			return -EOPNOTSUPP;
655 
656 		act_off += a->len_lw << NFP_FL_LW_SIZ;
657 	}
658 
659 	return 0;
660 }
661 
662 static int
663 nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
664 			struct nfp_fl_payload *sub_flow2,
665 			struct nfp_fl_payload *merge_flow)
666 {
667 	unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
668 	bool tunnel_act = false;
669 	char *merge_act;
670 	int err;
671 
672 	/* The last action of sub_flow1 must be output - do not merge this. */
673 	sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output);
674 	sub2_act_len = sub_flow2->meta.act_len;
675 
676 	if (!sub2_act_len)
677 		return -EINVAL;
678 
679 	if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ)
680 		return -EINVAL;
681 
682 	/* A shortcut can only be applied if there is a single action. */
683 	if (sub1_act_len)
684 		merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
685 	else
686 		merge_flow->meta.shortcut = sub_flow2->meta.shortcut;
687 
688 	merge_flow->meta.act_len = sub1_act_len + sub2_act_len;
689 	merge_act = merge_flow->action_data;
690 
691 	/* Copy any pre-actions to the start of merge flow action list. */
692 	pre_off1 = nfp_flower_copy_pre_actions(merge_act,
693 					       sub_flow1->action_data,
694 					       sub1_act_len, &tunnel_act);
695 	merge_act += pre_off1;
696 	sub1_act_len -= pre_off1;
697 	pre_off2 = nfp_flower_copy_pre_actions(merge_act,
698 					       sub_flow2->action_data,
699 					       sub2_act_len, NULL);
700 	merge_act += pre_off2;
701 	sub2_act_len -= pre_off2;
702 
703 	/* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
704 	 * a tunnel, sub_flow 2 can only have output actions for a valid merge.
705 	 */
706 	if (tunnel_act) {
707 		char *post_tun_acts = &sub_flow2->action_data[pre_off2];
708 
709 		err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len);
710 		if (err)
711 			return err;
712 	}
713 
714 	/* Copy remaining actions from sub_flows 1 and 2. */
715 	memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
716 	merge_act += sub1_act_len;
717 	memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
718 
719 	return 0;
720 }
721 
722 /* Flow link code should only be accessed under RTNL. */
723 static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link)
724 {
725 	list_del(&link->merge_flow.list);
726 	list_del(&link->sub_flow.list);
727 	kfree(link);
728 }
729 
730 static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow,
731 				    struct nfp_fl_payload *sub_flow)
732 {
733 	struct nfp_fl_payload_link *link;
734 
735 	list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list)
736 		if (link->sub_flow.flow == sub_flow) {
737 			nfp_flower_unlink_flow(link);
738 			return;
739 		}
740 }
741 
742 static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow,
743 				 struct nfp_fl_payload *sub_flow)
744 {
745 	struct nfp_fl_payload_link *link;
746 
747 	link = kmalloc(sizeof(*link), GFP_KERNEL);
748 	if (!link)
749 		return -ENOMEM;
750 
751 	link->merge_flow.flow = merge_flow;
752 	list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows);
753 	link->sub_flow.flow = sub_flow;
754 	list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows);
755 
756 	return 0;
757 }
758 
759 /**
760  * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow.
761  * @app:	Pointer to the APP handle
762  * @sub_flow1:	Initial flow matched to produce merge hint
763  * @sub_flow2:	Post recirculation flow matched in merge hint
764  *
765  * Combines 2 flows (if valid) to a single flow, removing the initial from hw
766  * and offloading the new, merged flow.
767  *
768  * Return: negative value on error, 0 in success.
769  */
770 int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
771 				     struct nfp_fl_payload *sub_flow1,
772 				     struct nfp_fl_payload *sub_flow2)
773 {
774 	struct tc_cls_flower_offload merge_tc_off;
775 	struct nfp_flower_priv *priv = app->priv;
776 	struct nfp_fl_payload *merge_flow;
777 	struct nfp_fl_key_ls merge_key_ls;
778 	int err;
779 
780 	ASSERT_RTNL();
781 
782 	if (sub_flow1 == sub_flow2 ||
783 	    nfp_flower_is_merge_flow(sub_flow1) ||
784 	    nfp_flower_is_merge_flow(sub_flow2))
785 		return -EINVAL;
786 
787 	err = nfp_flower_can_merge(sub_flow1, sub_flow2);
788 	if (err)
789 		return err;
790 
791 	merge_key_ls.key_size = sub_flow1->meta.key_len;
792 
793 	merge_flow = nfp_flower_allocate_new(&merge_key_ls);
794 	if (!merge_flow)
795 		return -ENOMEM;
796 
797 	merge_flow->tc_flower_cookie = (unsigned long)merge_flow;
798 	merge_flow->ingress_dev = sub_flow1->ingress_dev;
799 
800 	memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data,
801 	       sub_flow1->meta.key_len);
802 	memcpy(merge_flow->mask_data, sub_flow1->mask_data,
803 	       sub_flow1->meta.mask_len);
804 
805 	err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow);
806 	if (err)
807 		goto err_destroy_merge_flow;
808 
809 	err = nfp_flower_link_flows(merge_flow, sub_flow1);
810 	if (err)
811 		goto err_destroy_merge_flow;
812 
813 	err = nfp_flower_link_flows(merge_flow, sub_flow2);
814 	if (err)
815 		goto err_unlink_sub_flow1;
816 
817 	merge_tc_off.cookie = merge_flow->tc_flower_cookie;
818 	err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow,
819 					merge_flow->ingress_dev);
820 	if (err)
821 		goto err_unlink_sub_flow2;
822 
823 	err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node,
824 				     nfp_flower_table_params);
825 	if (err)
826 		goto err_release_metadata;
827 
828 	err = nfp_flower_xmit_flow(app, merge_flow,
829 				   NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
830 	if (err)
831 		goto err_remove_rhash;
832 
833 	merge_flow->in_hw = true;
834 	sub_flow1->in_hw = false;
835 
836 	return 0;
837 
838 err_remove_rhash:
839 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
840 					    &merge_flow->fl_node,
841 					    nfp_flower_table_params));
842 err_release_metadata:
843 	nfp_modify_flow_metadata(app, merge_flow);
844 err_unlink_sub_flow2:
845 	nfp_flower_unlink_flows(merge_flow, sub_flow2);
846 err_unlink_sub_flow1:
847 	nfp_flower_unlink_flows(merge_flow, sub_flow1);
848 err_destroy_merge_flow:
849 	kfree(merge_flow->action_data);
850 	kfree(merge_flow->mask_data);
851 	kfree(merge_flow->unmasked_data);
852 	kfree(merge_flow);
853 	return err;
854 }
855 
856 /**
857  * nfp_flower_add_offload() - Adds a new flow to hardware.
858  * @app:	Pointer to the APP handle
859  * @netdev:	netdev structure.
860  * @flow:	TC flower classifier offload structure.
861  *
862  * Adds a new flow to the repeated hash structure and action payload.
863  *
864  * Return: negative value on error, 0 if configured successfully.
865  */
866 static int
867 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
868 		       struct tc_cls_flower_offload *flow)
869 {
870 	enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
871 	struct nfp_flower_priv *priv = app->priv;
872 	struct nfp_fl_payload *flow_pay;
873 	struct nfp_fl_key_ls *key_layer;
874 	struct nfp_port *port = NULL;
875 	int err;
876 
877 	if (nfp_netdev_is_nfp_repr(netdev))
878 		port = nfp_port_from_netdev(netdev);
879 
880 	key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
881 	if (!key_layer)
882 		return -ENOMEM;
883 
884 	err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
885 					      &tun_type);
886 	if (err)
887 		goto err_free_key_ls;
888 
889 	flow_pay = nfp_flower_allocate_new(key_layer);
890 	if (!flow_pay) {
891 		err = -ENOMEM;
892 		goto err_free_key_ls;
893 	}
894 
895 	err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
896 					    flow_pay, tun_type);
897 	if (err)
898 		goto err_destroy_flow;
899 
900 	err = nfp_flower_compile_action(app, flow, netdev, flow_pay);
901 	if (err)
902 		goto err_destroy_flow;
903 
904 	err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev);
905 	if (err)
906 		goto err_destroy_flow;
907 
908 	flow_pay->tc_flower_cookie = flow->cookie;
909 	err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
910 				     nfp_flower_table_params);
911 	if (err)
912 		goto err_release_metadata;
913 
914 	err = nfp_flower_xmit_flow(app, flow_pay,
915 				   NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
916 	if (err)
917 		goto err_remove_rhash;
918 
919 	if (port)
920 		port->tc_offload_cnt++;
921 
922 	flow_pay->in_hw = true;
923 
924 	/* Deallocate flow payload when flower rule has been destroyed. */
925 	kfree(key_layer);
926 
927 	return 0;
928 
929 err_remove_rhash:
930 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
931 					    &flow_pay->fl_node,
932 					    nfp_flower_table_params));
933 err_release_metadata:
934 	nfp_modify_flow_metadata(app, flow_pay);
935 err_destroy_flow:
936 	kfree(flow_pay->action_data);
937 	kfree(flow_pay->mask_data);
938 	kfree(flow_pay->unmasked_data);
939 	kfree(flow_pay);
940 err_free_key_ls:
941 	kfree(key_layer);
942 	return err;
943 }
944 
945 static void
946 nfp_flower_remove_merge_flow(struct nfp_app *app,
947 			     struct nfp_fl_payload *del_sub_flow,
948 			     struct nfp_fl_payload *merge_flow)
949 {
950 	struct nfp_flower_priv *priv = app->priv;
951 	struct nfp_fl_payload_link *link, *temp;
952 	struct nfp_fl_payload *origin;
953 	bool mod = false;
954 	int err;
955 
956 	link = list_first_entry(&merge_flow->linked_flows,
957 				struct nfp_fl_payload_link, merge_flow.list);
958 	origin = link->sub_flow.flow;
959 
960 	/* Re-add rule the merge had overwritten if it has not been deleted. */
961 	if (origin != del_sub_flow)
962 		mod = true;
963 
964 	err = nfp_modify_flow_metadata(app, merge_flow);
965 	if (err) {
966 		nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n");
967 		goto err_free_links;
968 	}
969 
970 	if (!mod) {
971 		err = nfp_flower_xmit_flow(app, merge_flow,
972 					   NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
973 		if (err) {
974 			nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n");
975 			goto err_free_links;
976 		}
977 	} else {
978 		__nfp_modify_flow_metadata(priv, origin);
979 		err = nfp_flower_xmit_flow(app, origin,
980 					   NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
981 		if (err)
982 			nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n");
983 		origin->in_hw = true;
984 	}
985 
986 err_free_links:
987 	/* Clean any links connected with the merged flow. */
988 	list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
989 				 merge_flow.list)
990 		nfp_flower_unlink_flow(link);
991 
992 	kfree(merge_flow->action_data);
993 	kfree(merge_flow->mask_data);
994 	kfree(merge_flow->unmasked_data);
995 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
996 					    &merge_flow->fl_node,
997 					    nfp_flower_table_params));
998 	kfree_rcu(merge_flow, rcu);
999 }
1000 
1001 static void
1002 nfp_flower_del_linked_merge_flows(struct nfp_app *app,
1003 				  struct nfp_fl_payload *sub_flow)
1004 {
1005 	struct nfp_fl_payload_link *link, *temp;
1006 
1007 	/* Remove any merge flow formed from the deleted sub_flow. */
1008 	list_for_each_entry_safe(link, temp, &sub_flow->linked_flows,
1009 				 sub_flow.list)
1010 		nfp_flower_remove_merge_flow(app, sub_flow,
1011 					     link->merge_flow.flow);
1012 }
1013 
1014 /**
1015  * nfp_flower_del_offload() - Removes a flow from hardware.
1016  * @app:	Pointer to the APP handle
1017  * @netdev:	netdev structure.
1018  * @flow:	TC flower classifier offload structure
1019  *
1020  * Removes a flow from the repeated hash structure and clears the
1021  * action payload. Any flows merged from this are also deleted.
1022  *
1023  * Return: negative value on error, 0 if removed successfully.
1024  */
1025 static int
1026 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
1027 		       struct tc_cls_flower_offload *flow)
1028 {
1029 	struct nfp_flower_priv *priv = app->priv;
1030 	struct nfp_fl_payload *nfp_flow;
1031 	struct nfp_port *port = NULL;
1032 	int err;
1033 
1034 	if (nfp_netdev_is_nfp_repr(netdev))
1035 		port = nfp_port_from_netdev(netdev);
1036 
1037 	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1038 	if (!nfp_flow)
1039 		return -ENOENT;
1040 
1041 	err = nfp_modify_flow_metadata(app, nfp_flow);
1042 	if (err)
1043 		goto err_free_merge_flow;
1044 
1045 	if (nfp_flow->nfp_tun_ipv4_addr)
1046 		nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
1047 
1048 	if (!nfp_flow->in_hw) {
1049 		err = 0;
1050 		goto err_free_merge_flow;
1051 	}
1052 
1053 	err = nfp_flower_xmit_flow(app, nfp_flow,
1054 				   NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1055 	/* Fall through on error. */
1056 
1057 err_free_merge_flow:
1058 	nfp_flower_del_linked_merge_flows(app, nfp_flow);
1059 	if (port)
1060 		port->tc_offload_cnt--;
1061 	kfree(nfp_flow->action_data);
1062 	kfree(nfp_flow->mask_data);
1063 	kfree(nfp_flow->unmasked_data);
1064 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1065 					    &nfp_flow->fl_node,
1066 					    nfp_flower_table_params));
1067 	kfree_rcu(nfp_flow, rcu);
1068 	return err;
1069 }
1070 
1071 static void
1072 __nfp_flower_update_merge_stats(struct nfp_app *app,
1073 				struct nfp_fl_payload *merge_flow)
1074 {
1075 	struct nfp_flower_priv *priv = app->priv;
1076 	struct nfp_fl_payload_link *link;
1077 	struct nfp_fl_payload *sub_flow;
1078 	u64 pkts, bytes, used;
1079 	u32 ctx_id;
1080 
1081 	ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id);
1082 	pkts = priv->stats[ctx_id].pkts;
1083 	/* Do not cycle subflows if no stats to distribute. */
1084 	if (!pkts)
1085 		return;
1086 	bytes = priv->stats[ctx_id].bytes;
1087 	used = priv->stats[ctx_id].used;
1088 
1089 	/* Reset stats for the merge flow. */
1090 	priv->stats[ctx_id].pkts = 0;
1091 	priv->stats[ctx_id].bytes = 0;
1092 
1093 	/* The merge flow has received stats updates from firmware.
1094 	 * Distribute these stats to all subflows that form the merge.
1095 	 * The stats will collected from TC via the subflows.
1096 	 */
1097 	list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) {
1098 		sub_flow = link->sub_flow.flow;
1099 		ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
1100 		priv->stats[ctx_id].pkts += pkts;
1101 		priv->stats[ctx_id].bytes += bytes;
1102 		max_t(u64, priv->stats[ctx_id].used, used);
1103 	}
1104 }
1105 
1106 static void
1107 nfp_flower_update_merge_stats(struct nfp_app *app,
1108 			      struct nfp_fl_payload *sub_flow)
1109 {
1110 	struct nfp_fl_payload_link *link;
1111 
1112 	/* Get merge flows that the subflow forms to distribute their stats. */
1113 	list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list)
1114 		__nfp_flower_update_merge_stats(app, link->merge_flow.flow);
1115 }
1116 
1117 /**
1118  * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
1119  * @app:	Pointer to the APP handle
1120  * @netdev:	Netdev structure.
1121  * @flow:	TC flower classifier offload structure
1122  *
1123  * Populates a flow statistics structure which which corresponds to a
1124  * specific flow.
1125  *
1126  * Return: negative value on error, 0 if stats populated successfully.
1127  */
1128 static int
1129 nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
1130 		     struct tc_cls_flower_offload *flow)
1131 {
1132 	struct nfp_flower_priv *priv = app->priv;
1133 	struct nfp_fl_payload *nfp_flow;
1134 	u32 ctx_id;
1135 
1136 	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1137 	if (!nfp_flow)
1138 		return -EINVAL;
1139 
1140 	ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
1141 
1142 	spin_lock_bh(&priv->stats_lock);
1143 	/* If request is for a sub_flow, update stats from merged flows. */
1144 	if (!list_empty(&nfp_flow->linked_flows))
1145 		nfp_flower_update_merge_stats(app, nfp_flow);
1146 
1147 	flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
1148 			  priv->stats[ctx_id].pkts, priv->stats[ctx_id].used);
1149 
1150 	priv->stats[ctx_id].pkts = 0;
1151 	priv->stats[ctx_id].bytes = 0;
1152 	spin_unlock_bh(&priv->stats_lock);
1153 
1154 	return 0;
1155 }
1156 
1157 static int
1158 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
1159 			struct tc_cls_flower_offload *flower)
1160 {
1161 	if (!eth_proto_is_802_3(flower->common.protocol))
1162 		return -EOPNOTSUPP;
1163 
1164 	switch (flower->command) {
1165 	case TC_CLSFLOWER_REPLACE:
1166 		return nfp_flower_add_offload(app, netdev, flower);
1167 	case TC_CLSFLOWER_DESTROY:
1168 		return nfp_flower_del_offload(app, netdev, flower);
1169 	case TC_CLSFLOWER_STATS:
1170 		return nfp_flower_get_stats(app, netdev, flower);
1171 	default:
1172 		return -EOPNOTSUPP;
1173 	}
1174 }
1175 
1176 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
1177 					void *type_data, void *cb_priv)
1178 {
1179 	struct nfp_repr *repr = cb_priv;
1180 
1181 	if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
1182 		return -EOPNOTSUPP;
1183 
1184 	switch (type) {
1185 	case TC_SETUP_CLSFLOWER:
1186 		return nfp_flower_repr_offload(repr->app, repr->netdev,
1187 					       type_data);
1188 	case TC_SETUP_CLSMATCHALL:
1189 		return nfp_flower_setup_qos_offload(repr->app, repr->netdev,
1190 						    type_data);
1191 	default:
1192 		return -EOPNOTSUPP;
1193 	}
1194 }
1195 
1196 static int nfp_flower_setup_tc_block(struct net_device *netdev,
1197 				     struct tc_block_offload *f)
1198 {
1199 	struct nfp_repr *repr = netdev_priv(netdev);
1200 	struct nfp_flower_repr_priv *repr_priv;
1201 
1202 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1203 		return -EOPNOTSUPP;
1204 
1205 	repr_priv = repr->app_priv;
1206 	repr_priv->block_shared = tcf_block_shared(f->block);
1207 
1208 	switch (f->command) {
1209 	case TC_BLOCK_BIND:
1210 		return tcf_block_cb_register(f->block,
1211 					     nfp_flower_setup_tc_block_cb,
1212 					     repr, repr, f->extack);
1213 	case TC_BLOCK_UNBIND:
1214 		tcf_block_cb_unregister(f->block,
1215 					nfp_flower_setup_tc_block_cb,
1216 					repr);
1217 		return 0;
1218 	default:
1219 		return -EOPNOTSUPP;
1220 	}
1221 }
1222 
1223 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
1224 			enum tc_setup_type type, void *type_data)
1225 {
1226 	switch (type) {
1227 	case TC_SETUP_BLOCK:
1228 		return nfp_flower_setup_tc_block(netdev, type_data);
1229 	default:
1230 		return -EOPNOTSUPP;
1231 	}
1232 }
1233 
1234 struct nfp_flower_indr_block_cb_priv {
1235 	struct net_device *netdev;
1236 	struct nfp_app *app;
1237 	struct list_head list;
1238 };
1239 
1240 static struct nfp_flower_indr_block_cb_priv *
1241 nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
1242 				     struct net_device *netdev)
1243 {
1244 	struct nfp_flower_indr_block_cb_priv *cb_priv;
1245 	struct nfp_flower_priv *priv = app->priv;
1246 
1247 	/* All callback list access should be protected by RTNL. */
1248 	ASSERT_RTNL();
1249 
1250 	list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
1251 		if (cb_priv->netdev == netdev)
1252 			return cb_priv;
1253 
1254 	return NULL;
1255 }
1256 
1257 static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
1258 					  void *type_data, void *cb_priv)
1259 {
1260 	struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
1261 	struct tc_cls_flower_offload *flower = type_data;
1262 
1263 	if (flower->common.chain_index)
1264 		return -EOPNOTSUPP;
1265 
1266 	switch (type) {
1267 	case TC_SETUP_CLSFLOWER:
1268 		return nfp_flower_repr_offload(priv->app, priv->netdev,
1269 					       type_data);
1270 	default:
1271 		return -EOPNOTSUPP;
1272 	}
1273 }
1274 
1275 static int
1276 nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
1277 			       struct tc_block_offload *f)
1278 {
1279 	struct nfp_flower_indr_block_cb_priv *cb_priv;
1280 	struct nfp_flower_priv *priv = app->priv;
1281 	int err;
1282 
1283 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1284 	    !(f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
1285 	      nfp_flower_internal_port_can_offload(app, netdev)))
1286 		return -EOPNOTSUPP;
1287 
1288 	switch (f->command) {
1289 	case TC_BLOCK_BIND:
1290 		cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
1291 		if (!cb_priv)
1292 			return -ENOMEM;
1293 
1294 		cb_priv->netdev = netdev;
1295 		cb_priv->app = app;
1296 		list_add(&cb_priv->list, &priv->indr_block_cb_priv);
1297 
1298 		err = tcf_block_cb_register(f->block,
1299 					    nfp_flower_setup_indr_block_cb,
1300 					    cb_priv, cb_priv, f->extack);
1301 		if (err) {
1302 			list_del(&cb_priv->list);
1303 			kfree(cb_priv);
1304 		}
1305 
1306 		return err;
1307 	case TC_BLOCK_UNBIND:
1308 		cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1309 		if (!cb_priv)
1310 			return -ENOENT;
1311 
1312 		tcf_block_cb_unregister(f->block,
1313 					nfp_flower_setup_indr_block_cb,
1314 					cb_priv);
1315 		list_del(&cb_priv->list);
1316 		kfree(cb_priv);
1317 
1318 		return 0;
1319 	default:
1320 		return -EOPNOTSUPP;
1321 	}
1322 	return 0;
1323 }
1324 
1325 static int
1326 nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
1327 			    enum tc_setup_type type, void *type_data)
1328 {
1329 	switch (type) {
1330 	case TC_SETUP_BLOCK:
1331 		return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
1332 						      type_data);
1333 	default:
1334 		return -EOPNOTSUPP;
1335 	}
1336 }
1337 
1338 int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
1339 				       struct net_device *netdev,
1340 				       unsigned long event)
1341 {
1342 	int err;
1343 
1344 	if (!nfp_fl_is_netdev_to_offload(netdev))
1345 		return NOTIFY_OK;
1346 
1347 	if (event == NETDEV_REGISTER) {
1348 		err = __tc_indr_block_cb_register(netdev, app,
1349 						  nfp_flower_indr_setup_tc_cb,
1350 						  app);
1351 		if (err)
1352 			nfp_flower_cmsg_warn(app,
1353 					     "Indirect block reg failed - %s\n",
1354 					     netdev->name);
1355 	} else if (event == NETDEV_UNREGISTER) {
1356 		__tc_indr_block_cb_unregister(netdev,
1357 					      nfp_flower_indr_setup_tc_cb, app);
1358 	}
1359 
1360 	return NOTIFY_OK;
1361 }
1362