xref: /openbmc/linux/drivers/net/ethernet/netronome/nfp/flower/offload.c (revision 05cf4fe738242183f1237f1b3a28b4479348c0a1)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/skbuff.h>
5 #include <net/devlink.h>
6 #include <net/pkt_cls.h>
7 
8 #include "cmsg.h"
9 #include "main.h"
10 #include "../nfpcore/nfp_cpp.h"
11 #include "../nfpcore/nfp_nsp.h"
12 #include "../nfp_app.h"
13 #include "../nfp_main.h"
14 #include "../nfp_net.h"
15 #include "../nfp_port.h"
16 
17 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \
18 	(TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
19 	 TCPHDR_PSH | TCPHDR_URG)
20 
21 #define NFP_FLOWER_SUPPORTED_CTLFLAGS \
22 	(FLOW_DIS_IS_FRAGMENT | \
23 	 FLOW_DIS_FIRST_FRAG)
24 
25 #define NFP_FLOWER_WHITELIST_DISSECTOR \
26 	(BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
27 	 BIT(FLOW_DISSECTOR_KEY_BASIC) | \
28 	 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
29 	 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
30 	 BIT(FLOW_DISSECTOR_KEY_TCP) | \
31 	 BIT(FLOW_DISSECTOR_KEY_PORTS) | \
32 	 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
33 	 BIT(FLOW_DISSECTOR_KEY_VLAN) | \
34 	 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
35 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
36 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
37 	 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
38 	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
39 	 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
40 	 BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
41 	 BIT(FLOW_DISSECTOR_KEY_MPLS) | \
42 	 BIT(FLOW_DISSECTOR_KEY_IP))
43 
44 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
45 	(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
46 	 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
47 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
48 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
49 	 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
50 	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
51 	 BIT(FLOW_DISSECTOR_KEY_ENC_IP))
52 
53 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
54 	(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
55 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
56 	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
57 
58 static int
59 nfp_flower_xmit_flow(struct net_device *netdev,
60 		     struct nfp_fl_payload *nfp_flow, u8 mtype)
61 {
62 	u32 meta_len, key_len, mask_len, act_len, tot_len;
63 	struct nfp_repr *priv = netdev_priv(netdev);
64 	struct sk_buff *skb;
65 	unsigned char *msg;
66 
67 	meta_len =  sizeof(struct nfp_fl_rule_metadata);
68 	key_len = nfp_flow->meta.key_len;
69 	mask_len = nfp_flow->meta.mask_len;
70 	act_len = nfp_flow->meta.act_len;
71 
72 	tot_len = meta_len + key_len + mask_len + act_len;
73 
74 	/* Convert to long words as firmware expects
75 	 * lengths in units of NFP_FL_LW_SIZ.
76 	 */
77 	nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
78 	nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
79 	nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
80 
81 	skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL);
82 	if (!skb)
83 		return -ENOMEM;
84 
85 	msg = nfp_flower_cmsg_get_data(skb);
86 	memcpy(msg, &nfp_flow->meta, meta_len);
87 	memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
88 	memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
89 	memcpy(&msg[meta_len + key_len + mask_len],
90 	       nfp_flow->action_data, act_len);
91 
92 	/* Convert back to bytes as software expects
93 	 * lengths in units of bytes.
94 	 */
95 	nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
96 	nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
97 	nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
98 
99 	nfp_ctrl_tx(priv->app->ctrl, skb);
100 
101 	return 0;
102 }
103 
104 static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
105 {
106 	return dissector_uses_key(f->dissector,
107 				  FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
108 		dissector_uses_key(f->dissector,
109 				   FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
110 		dissector_uses_key(f->dissector,
111 				   FLOW_DISSECTOR_KEY_PORTS) ||
112 		dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ICMP);
113 }
114 
115 static int
116 nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
117 			  u32 *key_layer_two, int *key_size)
118 {
119 	if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY)
120 		return -EOPNOTSUPP;
121 
122 	if (enc_opts->len > 0) {
123 		*key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
124 		*key_size += sizeof(struct nfp_flower_geneve_options);
125 	}
126 
127 	return 0;
128 }
129 
130 static int
131 nfp_flower_calculate_key_layers(struct nfp_app *app,
132 				struct nfp_fl_key_ls *ret_key_ls,
133 				struct tc_cls_flower_offload *flow,
134 				bool egress,
135 				enum nfp_flower_tun_type *tun_type)
136 {
137 	struct flow_dissector_key_basic *mask_basic = NULL;
138 	struct flow_dissector_key_basic *key_basic = NULL;
139 	struct nfp_flower_priv *priv = app->priv;
140 	u32 key_layer_two;
141 	u8 key_layer;
142 	int key_size;
143 	int err;
144 
145 	if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
146 		return -EOPNOTSUPP;
147 
148 	/* If any tun dissector is used then the required set must be used. */
149 	if (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
150 	    (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
151 	    != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
152 		return -EOPNOTSUPP;
153 
154 	key_layer_two = 0;
155 	key_layer = NFP_FLOWER_LAYER_PORT;
156 	key_size = sizeof(struct nfp_flower_meta_tci) +
157 		   sizeof(struct nfp_flower_in_port);
158 
159 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
160 	    dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
161 		key_layer |= NFP_FLOWER_LAYER_MAC;
162 		key_size += sizeof(struct nfp_flower_mac_mpls);
163 	}
164 
165 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
166 		struct flow_dissector_key_vlan *flow_vlan;
167 
168 		flow_vlan = skb_flow_dissector_target(flow->dissector,
169 						      FLOW_DISSECTOR_KEY_VLAN,
170 						      flow->mask);
171 		if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
172 		    flow_vlan->vlan_priority)
173 			return -EOPNOTSUPP;
174 	}
175 
176 	if (dissector_uses_key(flow->dissector,
177 			       FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
178 		struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
179 		struct flow_dissector_key_ports *mask_enc_ports = NULL;
180 		struct flow_dissector_key_enc_opts *enc_op = NULL;
181 		struct flow_dissector_key_ports *enc_ports = NULL;
182 		struct flow_dissector_key_control *mask_enc_ctl =
183 			skb_flow_dissector_target(flow->dissector,
184 						  FLOW_DISSECTOR_KEY_ENC_CONTROL,
185 						  flow->mask);
186 		struct flow_dissector_key_control *enc_ctl =
187 			skb_flow_dissector_target(flow->dissector,
188 						  FLOW_DISSECTOR_KEY_ENC_CONTROL,
189 						  flow->key);
190 		if (!egress)
191 			return -EOPNOTSUPP;
192 
193 		if (mask_enc_ctl->addr_type != 0xffff ||
194 		    enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
195 			return -EOPNOTSUPP;
196 
197 		/* These fields are already verified as used. */
198 		mask_ipv4 =
199 			skb_flow_dissector_target(flow->dissector,
200 						  FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
201 						  flow->mask);
202 		if (mask_ipv4->dst != cpu_to_be32(~0))
203 			return -EOPNOTSUPP;
204 
205 		mask_enc_ports =
206 			skb_flow_dissector_target(flow->dissector,
207 						  FLOW_DISSECTOR_KEY_ENC_PORTS,
208 						  flow->mask);
209 		enc_ports =
210 			skb_flow_dissector_target(flow->dissector,
211 						  FLOW_DISSECTOR_KEY_ENC_PORTS,
212 						  flow->key);
213 
214 		if (mask_enc_ports->dst != cpu_to_be16(~0))
215 			return -EOPNOTSUPP;
216 
217 		if (dissector_uses_key(flow->dissector,
218 				       FLOW_DISSECTOR_KEY_ENC_OPTS)) {
219 			enc_op = skb_flow_dissector_target(flow->dissector,
220 							   FLOW_DISSECTOR_KEY_ENC_OPTS,
221 							   flow->key);
222 		}
223 
224 		switch (enc_ports->dst) {
225 		case htons(NFP_FL_VXLAN_PORT):
226 			*tun_type = NFP_FL_TUNNEL_VXLAN;
227 			key_layer |= NFP_FLOWER_LAYER_VXLAN;
228 			key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
229 
230 			if (enc_op)
231 				return -EOPNOTSUPP;
232 			break;
233 		case htons(NFP_FL_GENEVE_PORT):
234 			if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
235 				return -EOPNOTSUPP;
236 			*tun_type = NFP_FL_TUNNEL_GENEVE;
237 			key_layer |= NFP_FLOWER_LAYER_EXT_META;
238 			key_size += sizeof(struct nfp_flower_ext_meta);
239 			key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
240 			key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
241 
242 			if (!enc_op)
243 				break;
244 			if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))
245 				return -EOPNOTSUPP;
246 			err = nfp_flower_calc_opt_layer(enc_op, &key_layer_two,
247 							&key_size);
248 			if (err)
249 				return err;
250 			break;
251 		default:
252 			return -EOPNOTSUPP;
253 		}
254 	} else if (egress) {
255 		/* Reject non tunnel matches offloaded to egress repr. */
256 		return -EOPNOTSUPP;
257 	}
258 
259 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
260 		mask_basic = skb_flow_dissector_target(flow->dissector,
261 						       FLOW_DISSECTOR_KEY_BASIC,
262 						       flow->mask);
263 
264 		key_basic = skb_flow_dissector_target(flow->dissector,
265 						      FLOW_DISSECTOR_KEY_BASIC,
266 						      flow->key);
267 	}
268 
269 	if (mask_basic && mask_basic->n_proto) {
270 		/* Ethernet type is present in the key. */
271 		switch (key_basic->n_proto) {
272 		case cpu_to_be16(ETH_P_IP):
273 			key_layer |= NFP_FLOWER_LAYER_IPV4;
274 			key_size += sizeof(struct nfp_flower_ipv4);
275 			break;
276 
277 		case cpu_to_be16(ETH_P_IPV6):
278 			key_layer |= NFP_FLOWER_LAYER_IPV6;
279 			key_size += sizeof(struct nfp_flower_ipv6);
280 			break;
281 
282 		/* Currently we do not offload ARP
283 		 * because we rely on it to get to the host.
284 		 */
285 		case cpu_to_be16(ETH_P_ARP):
286 			return -EOPNOTSUPP;
287 
288 		case cpu_to_be16(ETH_P_MPLS_UC):
289 		case cpu_to_be16(ETH_P_MPLS_MC):
290 			if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
291 				key_layer |= NFP_FLOWER_LAYER_MAC;
292 				key_size += sizeof(struct nfp_flower_mac_mpls);
293 			}
294 			break;
295 
296 		/* Will be included in layer 2. */
297 		case cpu_to_be16(ETH_P_8021Q):
298 			break;
299 
300 		default:
301 			/* Other ethtype - we need check the masks for the
302 			 * remainder of the key to ensure we can offload.
303 			 */
304 			if (nfp_flower_check_higher_than_mac(flow))
305 				return -EOPNOTSUPP;
306 			break;
307 		}
308 	}
309 
310 	if (mask_basic && mask_basic->ip_proto) {
311 		/* Ethernet type is present in the key. */
312 		switch (key_basic->ip_proto) {
313 		case IPPROTO_TCP:
314 		case IPPROTO_UDP:
315 		case IPPROTO_SCTP:
316 		case IPPROTO_ICMP:
317 		case IPPROTO_ICMPV6:
318 			key_layer |= NFP_FLOWER_LAYER_TP;
319 			key_size += sizeof(struct nfp_flower_tp_ports);
320 			break;
321 		default:
322 			/* Other ip proto - we need check the masks for the
323 			 * remainder of the key to ensure we can offload.
324 			 */
325 			return -EOPNOTSUPP;
326 		}
327 	}
328 
329 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) {
330 		struct flow_dissector_key_tcp *tcp;
331 		u32 tcp_flags;
332 
333 		tcp = skb_flow_dissector_target(flow->dissector,
334 						FLOW_DISSECTOR_KEY_TCP,
335 						flow->key);
336 		tcp_flags = be16_to_cpu(tcp->flags);
337 
338 		if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS)
339 			return -EOPNOTSUPP;
340 
341 		/* We only support PSH and URG flags when either
342 		 * FIN, SYN or RST is present as well.
343 		 */
344 		if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
345 		    !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
346 			return -EOPNOTSUPP;
347 
348 		/* We need to store TCP flags in the IPv4 key space, thus
349 		 * we need to ensure we include a IPv4 key layer if we have
350 		 * not done so already.
351 		 */
352 		if (!(key_layer & NFP_FLOWER_LAYER_IPV4)) {
353 			key_layer |= NFP_FLOWER_LAYER_IPV4;
354 			key_size += sizeof(struct nfp_flower_ipv4);
355 		}
356 	}
357 
358 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
359 		struct flow_dissector_key_control *key_ctl;
360 
361 		key_ctl = skb_flow_dissector_target(flow->dissector,
362 						    FLOW_DISSECTOR_KEY_CONTROL,
363 						    flow->key);
364 
365 		if (key_ctl->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS)
366 			return -EOPNOTSUPP;
367 	}
368 
369 	ret_key_ls->key_layer = key_layer;
370 	ret_key_ls->key_layer_two = key_layer_two;
371 	ret_key_ls->key_size = key_size;
372 
373 	return 0;
374 }
375 
376 static struct nfp_fl_payload *
377 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
378 {
379 	struct nfp_fl_payload *flow_pay;
380 
381 	flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
382 	if (!flow_pay)
383 		return NULL;
384 
385 	flow_pay->meta.key_len = key_layer->key_size;
386 	flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
387 	if (!flow_pay->unmasked_data)
388 		goto err_free_flow;
389 
390 	flow_pay->meta.mask_len = key_layer->key_size;
391 	flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
392 	if (!flow_pay->mask_data)
393 		goto err_free_unmasked;
394 
395 	flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
396 	if (!flow_pay->action_data)
397 		goto err_free_mask;
398 
399 	flow_pay->nfp_tun_ipv4_addr = 0;
400 	flow_pay->meta.flags = 0;
401 	flow_pay->ingress_offload = !egress;
402 
403 	return flow_pay;
404 
405 err_free_mask:
406 	kfree(flow_pay->mask_data);
407 err_free_unmasked:
408 	kfree(flow_pay->unmasked_data);
409 err_free_flow:
410 	kfree(flow_pay);
411 	return NULL;
412 }
413 
414 /**
415  * nfp_flower_add_offload() - Adds a new flow to hardware.
416  * @app:	Pointer to the APP handle
417  * @netdev:	netdev structure.
418  * @flow:	TC flower classifier offload structure.
419  * @egress:	NFP netdev is the egress.
420  *
421  * Adds a new flow to the repeated hash structure and action payload.
422  *
423  * Return: negative value on error, 0 if configured successfully.
424  */
425 static int
426 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
427 		       struct tc_cls_flower_offload *flow, bool egress)
428 {
429 	enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
430 	struct nfp_port *port = nfp_port_from_netdev(netdev);
431 	struct nfp_flower_priv *priv = app->priv;
432 	struct nfp_fl_payload *flow_pay;
433 	struct nfp_fl_key_ls *key_layer;
434 	struct net_device *ingr_dev;
435 	int err;
436 
437 	ingr_dev = egress ? NULL : netdev;
438 	flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
439 					      NFP_FL_STATS_CTX_DONT_CARE);
440 	if (flow_pay) {
441 		/* Ignore as duplicate if it has been added by different cb. */
442 		if (flow_pay->ingress_offload && egress)
443 			return 0;
444 		else
445 			return -EOPNOTSUPP;
446 	}
447 
448 	key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
449 	if (!key_layer)
450 		return -ENOMEM;
451 
452 	err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress,
453 					      &tun_type);
454 	if (err)
455 		goto err_free_key_ls;
456 
457 	flow_pay = nfp_flower_allocate_new(key_layer, egress);
458 	if (!flow_pay) {
459 		err = -ENOMEM;
460 		goto err_free_key_ls;
461 	}
462 
463 	flow_pay->ingress_dev = egress ? NULL : netdev;
464 
465 	err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
466 					    tun_type);
467 	if (err)
468 		goto err_destroy_flow;
469 
470 	err = nfp_flower_compile_action(app, flow, netdev, flow_pay);
471 	if (err)
472 		goto err_destroy_flow;
473 
474 	err = nfp_compile_flow_metadata(app, flow, flow_pay,
475 					flow_pay->ingress_dev);
476 	if (err)
477 		goto err_destroy_flow;
478 
479 	flow_pay->tc_flower_cookie = flow->cookie;
480 	err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
481 				     nfp_flower_table_params);
482 	if (err)
483 		goto err_release_metadata;
484 
485 	err = nfp_flower_xmit_flow(netdev, flow_pay,
486 				   NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
487 	if (err)
488 		goto err_remove_rhash;
489 
490 	port->tc_offload_cnt++;
491 
492 	/* Deallocate flow payload when flower rule has been destroyed. */
493 	kfree(key_layer);
494 
495 	return 0;
496 
497 err_remove_rhash:
498 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
499 					    &flow_pay->fl_node,
500 					    nfp_flower_table_params));
501 err_release_metadata:
502 	nfp_modify_flow_metadata(app, flow_pay);
503 err_destroy_flow:
504 	kfree(flow_pay->action_data);
505 	kfree(flow_pay->mask_data);
506 	kfree(flow_pay->unmasked_data);
507 	kfree(flow_pay);
508 err_free_key_ls:
509 	kfree(key_layer);
510 	return err;
511 }
512 
513 /**
514  * nfp_flower_del_offload() - Removes a flow from hardware.
515  * @app:	Pointer to the APP handle
516  * @netdev:	netdev structure.
517  * @flow:	TC flower classifier offload structure
518  * @egress:	Netdev is the egress dev.
519  *
520  * Removes a flow from the repeated hash structure and clears the
521  * action payload.
522  *
523  * Return: negative value on error, 0 if removed successfully.
524  */
525 static int
526 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
527 		       struct tc_cls_flower_offload *flow, bool egress)
528 {
529 	struct nfp_port *port = nfp_port_from_netdev(netdev);
530 	struct nfp_flower_priv *priv = app->priv;
531 	struct nfp_fl_payload *nfp_flow;
532 	struct net_device *ingr_dev;
533 	int err;
534 
535 	ingr_dev = egress ? NULL : netdev;
536 	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
537 					      NFP_FL_STATS_CTX_DONT_CARE);
538 	if (!nfp_flow)
539 		return egress ? 0 : -ENOENT;
540 
541 	err = nfp_modify_flow_metadata(app, nfp_flow);
542 	if (err)
543 		goto err_free_flow;
544 
545 	if (nfp_flow->nfp_tun_ipv4_addr)
546 		nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
547 
548 	err = nfp_flower_xmit_flow(netdev, nfp_flow,
549 				   NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
550 	if (err)
551 		goto err_free_flow;
552 
553 err_free_flow:
554 	port->tc_offload_cnt--;
555 	kfree(nfp_flow->action_data);
556 	kfree(nfp_flow->mask_data);
557 	kfree(nfp_flow->unmasked_data);
558 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
559 					    &nfp_flow->fl_node,
560 					    nfp_flower_table_params));
561 	kfree_rcu(nfp_flow, rcu);
562 	return err;
563 }
564 
565 /**
566  * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
567  * @app:	Pointer to the APP handle
568  * @netdev:	Netdev structure.
569  * @flow:	TC flower classifier offload structure
570  * @egress:	Netdev is the egress dev.
571  *
572  * Populates a flow statistics structure which which corresponds to a
573  * specific flow.
574  *
575  * Return: negative value on error, 0 if stats populated successfully.
576  */
577 static int
578 nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
579 		     struct tc_cls_flower_offload *flow, bool egress)
580 {
581 	struct nfp_flower_priv *priv = app->priv;
582 	struct nfp_fl_payload *nfp_flow;
583 	struct net_device *ingr_dev;
584 	u32 ctx_id;
585 
586 	ingr_dev = egress ? NULL : netdev;
587 	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
588 					      NFP_FL_STATS_CTX_DONT_CARE);
589 	if (!nfp_flow)
590 		return -EINVAL;
591 
592 	if (nfp_flow->ingress_offload && egress)
593 		return 0;
594 
595 	ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
596 
597 	spin_lock_bh(&priv->stats_lock);
598 	tcf_exts_stats_update(flow->exts, priv->stats[ctx_id].bytes,
599 			      priv->stats[ctx_id].pkts,
600 			      priv->stats[ctx_id].used);
601 
602 	priv->stats[ctx_id].pkts = 0;
603 	priv->stats[ctx_id].bytes = 0;
604 	spin_unlock_bh(&priv->stats_lock);
605 
606 	return 0;
607 }
608 
609 static int
610 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
611 			struct tc_cls_flower_offload *flower, bool egress)
612 {
613 	if (!eth_proto_is_802_3(flower->common.protocol))
614 		return -EOPNOTSUPP;
615 
616 	switch (flower->command) {
617 	case TC_CLSFLOWER_REPLACE:
618 		return nfp_flower_add_offload(app, netdev, flower, egress);
619 	case TC_CLSFLOWER_DESTROY:
620 		return nfp_flower_del_offload(app, netdev, flower, egress);
621 	case TC_CLSFLOWER_STATS:
622 		return nfp_flower_get_stats(app, netdev, flower, egress);
623 	default:
624 		return -EOPNOTSUPP;
625 	}
626 }
627 
628 int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
629 				  void *cb_priv)
630 {
631 	struct nfp_repr *repr = cb_priv;
632 
633 	if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
634 		return -EOPNOTSUPP;
635 
636 	switch (type) {
637 	case TC_SETUP_CLSFLOWER:
638 		return nfp_flower_repr_offload(repr->app, repr->netdev,
639 					       type_data, true);
640 	default:
641 		return -EOPNOTSUPP;
642 	}
643 }
644 
645 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
646 					void *type_data, void *cb_priv)
647 {
648 	struct nfp_repr *repr = cb_priv;
649 
650 	if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
651 		return -EOPNOTSUPP;
652 
653 	switch (type) {
654 	case TC_SETUP_CLSFLOWER:
655 		return nfp_flower_repr_offload(repr->app, repr->netdev,
656 					       type_data, false);
657 	default:
658 		return -EOPNOTSUPP;
659 	}
660 }
661 
662 static int nfp_flower_setup_tc_block(struct net_device *netdev,
663 				     struct tc_block_offload *f)
664 {
665 	struct nfp_repr *repr = netdev_priv(netdev);
666 
667 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
668 		return -EOPNOTSUPP;
669 
670 	switch (f->command) {
671 	case TC_BLOCK_BIND:
672 		return tcf_block_cb_register(f->block,
673 					     nfp_flower_setup_tc_block_cb,
674 					     repr, repr, f->extack);
675 	case TC_BLOCK_UNBIND:
676 		tcf_block_cb_unregister(f->block,
677 					nfp_flower_setup_tc_block_cb,
678 					repr);
679 		return 0;
680 	default:
681 		return -EOPNOTSUPP;
682 	}
683 }
684 
685 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
686 			enum tc_setup_type type, void *type_data)
687 {
688 	switch (type) {
689 	case TC_SETUP_BLOCK:
690 		return nfp_flower_setup_tc_block(netdev, type_data);
691 	default:
692 		return -EOPNOTSUPP;
693 	}
694 }
695