1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/skbuff.h>
5 #include <net/devlink.h>
6 #include <net/pkt_cls.h>
7 
8 #include "cmsg.h"
9 #include "main.h"
10 #include "../nfpcore/nfp_cpp.h"
11 #include "../nfpcore/nfp_nsp.h"
12 #include "../nfp_app.h"
13 #include "../nfp_main.h"
14 #include "../nfp_net.h"
15 #include "../nfp_port.h"
16 
17 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \
18 	(TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
19 	 TCPHDR_PSH | TCPHDR_URG)
20 
21 #define NFP_FLOWER_SUPPORTED_CTLFLAGS \
22 	(FLOW_DIS_IS_FRAGMENT | \
23 	 FLOW_DIS_FIRST_FRAG)
24 
25 #define NFP_FLOWER_WHITELIST_DISSECTOR \
26 	(BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
27 	 BIT(FLOW_DISSECTOR_KEY_BASIC) | \
28 	 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
29 	 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
30 	 BIT(FLOW_DISSECTOR_KEY_TCP) | \
31 	 BIT(FLOW_DISSECTOR_KEY_PORTS) | \
32 	 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
33 	 BIT(FLOW_DISSECTOR_KEY_VLAN) | \
34 	 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
35 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
36 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
37 	 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
38 	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
39 	 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
40 	 BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
41 	 BIT(FLOW_DISSECTOR_KEY_MPLS) | \
42 	 BIT(FLOW_DISSECTOR_KEY_IP))
43 
44 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
45 	(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
46 	 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
47 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
48 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
49 	 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
50 	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
51 	 BIT(FLOW_DISSECTOR_KEY_ENC_IP))
52 
53 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
54 	(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
55 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
56 	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
57 
58 static int
59 nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
60 		     u8 mtype)
61 {
62 	u32 meta_len, key_len, mask_len, act_len, tot_len;
63 	struct sk_buff *skb;
64 	unsigned char *msg;
65 
66 	meta_len =  sizeof(struct nfp_fl_rule_metadata);
67 	key_len = nfp_flow->meta.key_len;
68 	mask_len = nfp_flow->meta.mask_len;
69 	act_len = nfp_flow->meta.act_len;
70 
71 	tot_len = meta_len + key_len + mask_len + act_len;
72 
73 	/* Convert to long words as firmware expects
74 	 * lengths in units of NFP_FL_LW_SIZ.
75 	 */
76 	nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
77 	nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
78 	nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
79 
80 	skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
81 	if (!skb)
82 		return -ENOMEM;
83 
84 	msg = nfp_flower_cmsg_get_data(skb);
85 	memcpy(msg, &nfp_flow->meta, meta_len);
86 	memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
87 	memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
88 	memcpy(&msg[meta_len + key_len + mask_len],
89 	       nfp_flow->action_data, act_len);
90 
91 	/* Convert back to bytes as software expects
92 	 * lengths in units of bytes.
93 	 */
94 	nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
95 	nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
96 	nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
97 
98 	nfp_ctrl_tx(app->ctrl, skb);
99 
100 	return 0;
101 }
102 
103 static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
104 {
105 	return dissector_uses_key(f->dissector,
106 				  FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
107 		dissector_uses_key(f->dissector,
108 				   FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
109 		dissector_uses_key(f->dissector,
110 				   FLOW_DISSECTOR_KEY_PORTS) ||
111 		dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ICMP);
112 }
113 
114 static int
115 nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
116 			  u32 *key_layer_two, int *key_size)
117 {
118 	if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY)
119 		return -EOPNOTSUPP;
120 
121 	if (enc_opts->len > 0) {
122 		*key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
123 		*key_size += sizeof(struct nfp_flower_geneve_options);
124 	}
125 
126 	return 0;
127 }
128 
129 static int
130 nfp_flower_calculate_key_layers(struct nfp_app *app,
131 				struct net_device *netdev,
132 				struct nfp_fl_key_ls *ret_key_ls,
133 				struct tc_cls_flower_offload *flow,
134 				enum nfp_flower_tun_type *tun_type)
135 {
136 	struct flow_dissector_key_basic *mask_basic = NULL;
137 	struct flow_dissector_key_basic *key_basic = NULL;
138 	struct nfp_flower_priv *priv = app->priv;
139 	u32 key_layer_two;
140 	u8 key_layer;
141 	int key_size;
142 	int err;
143 
144 	if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
145 		return -EOPNOTSUPP;
146 
147 	/* If any tun dissector is used then the required set must be used. */
148 	if (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
149 	    (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
150 	    != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
151 		return -EOPNOTSUPP;
152 
153 	key_layer_two = 0;
154 	key_layer = NFP_FLOWER_LAYER_PORT;
155 	key_size = sizeof(struct nfp_flower_meta_tci) +
156 		   sizeof(struct nfp_flower_in_port);
157 
158 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
159 	    dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
160 		key_layer |= NFP_FLOWER_LAYER_MAC;
161 		key_size += sizeof(struct nfp_flower_mac_mpls);
162 	}
163 
164 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
165 		struct flow_dissector_key_vlan *flow_vlan;
166 
167 		flow_vlan = skb_flow_dissector_target(flow->dissector,
168 						      FLOW_DISSECTOR_KEY_VLAN,
169 						      flow->mask);
170 		if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
171 		    flow_vlan->vlan_priority)
172 			return -EOPNOTSUPP;
173 	}
174 
175 	if (dissector_uses_key(flow->dissector,
176 			       FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
177 		struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
178 		struct flow_dissector_key_ports *mask_enc_ports = NULL;
179 		struct flow_dissector_key_enc_opts *enc_op = NULL;
180 		struct flow_dissector_key_ports *enc_ports = NULL;
181 		struct flow_dissector_key_control *mask_enc_ctl =
182 			skb_flow_dissector_target(flow->dissector,
183 						  FLOW_DISSECTOR_KEY_ENC_CONTROL,
184 						  flow->mask);
185 		struct flow_dissector_key_control *enc_ctl =
186 			skb_flow_dissector_target(flow->dissector,
187 						  FLOW_DISSECTOR_KEY_ENC_CONTROL,
188 						  flow->key);
189 
190 		if (mask_enc_ctl->addr_type != 0xffff ||
191 		    enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
192 			return -EOPNOTSUPP;
193 
194 		/* These fields are already verified as used. */
195 		mask_ipv4 =
196 			skb_flow_dissector_target(flow->dissector,
197 						  FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
198 						  flow->mask);
199 		if (mask_ipv4->dst != cpu_to_be32(~0))
200 			return -EOPNOTSUPP;
201 
202 		mask_enc_ports =
203 			skb_flow_dissector_target(flow->dissector,
204 						  FLOW_DISSECTOR_KEY_ENC_PORTS,
205 						  flow->mask);
206 		enc_ports =
207 			skb_flow_dissector_target(flow->dissector,
208 						  FLOW_DISSECTOR_KEY_ENC_PORTS,
209 						  flow->key);
210 
211 		if (mask_enc_ports->dst != cpu_to_be16(~0))
212 			return -EOPNOTSUPP;
213 
214 		if (dissector_uses_key(flow->dissector,
215 				       FLOW_DISSECTOR_KEY_ENC_OPTS)) {
216 			enc_op = skb_flow_dissector_target(flow->dissector,
217 							   FLOW_DISSECTOR_KEY_ENC_OPTS,
218 							   flow->key);
219 		}
220 
221 		switch (enc_ports->dst) {
222 		case htons(NFP_FL_VXLAN_PORT):
223 			*tun_type = NFP_FL_TUNNEL_VXLAN;
224 			key_layer |= NFP_FLOWER_LAYER_VXLAN;
225 			key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
226 
227 			if (enc_op)
228 				return -EOPNOTSUPP;
229 			break;
230 		case htons(NFP_FL_GENEVE_PORT):
231 			if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
232 				return -EOPNOTSUPP;
233 			*tun_type = NFP_FL_TUNNEL_GENEVE;
234 			key_layer |= NFP_FLOWER_LAYER_EXT_META;
235 			key_size += sizeof(struct nfp_flower_ext_meta);
236 			key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
237 			key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
238 
239 			if (!enc_op)
240 				break;
241 			if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))
242 				return -EOPNOTSUPP;
243 			err = nfp_flower_calc_opt_layer(enc_op, &key_layer_two,
244 							&key_size);
245 			if (err)
246 				return err;
247 			break;
248 		default:
249 			return -EOPNOTSUPP;
250 		}
251 
252 		/* Ensure the ingress netdev matches the expected tun type. */
253 		if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type))
254 			return -EOPNOTSUPP;
255 	}
256 
257 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
258 		mask_basic = skb_flow_dissector_target(flow->dissector,
259 						       FLOW_DISSECTOR_KEY_BASIC,
260 						       flow->mask);
261 
262 		key_basic = skb_flow_dissector_target(flow->dissector,
263 						      FLOW_DISSECTOR_KEY_BASIC,
264 						      flow->key);
265 	}
266 
267 	if (mask_basic && mask_basic->n_proto) {
268 		/* Ethernet type is present in the key. */
269 		switch (key_basic->n_proto) {
270 		case cpu_to_be16(ETH_P_IP):
271 			key_layer |= NFP_FLOWER_LAYER_IPV4;
272 			key_size += sizeof(struct nfp_flower_ipv4);
273 			break;
274 
275 		case cpu_to_be16(ETH_P_IPV6):
276 			key_layer |= NFP_FLOWER_LAYER_IPV6;
277 			key_size += sizeof(struct nfp_flower_ipv6);
278 			break;
279 
280 		/* Currently we do not offload ARP
281 		 * because we rely on it to get to the host.
282 		 */
283 		case cpu_to_be16(ETH_P_ARP):
284 			return -EOPNOTSUPP;
285 
286 		case cpu_to_be16(ETH_P_MPLS_UC):
287 		case cpu_to_be16(ETH_P_MPLS_MC):
288 			if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
289 				key_layer |= NFP_FLOWER_LAYER_MAC;
290 				key_size += sizeof(struct nfp_flower_mac_mpls);
291 			}
292 			break;
293 
294 		/* Will be included in layer 2. */
295 		case cpu_to_be16(ETH_P_8021Q):
296 			break;
297 
298 		default:
299 			/* Other ethtype - we need check the masks for the
300 			 * remainder of the key to ensure we can offload.
301 			 */
302 			if (nfp_flower_check_higher_than_mac(flow))
303 				return -EOPNOTSUPP;
304 			break;
305 		}
306 	}
307 
308 	if (mask_basic && mask_basic->ip_proto) {
309 		/* Ethernet type is present in the key. */
310 		switch (key_basic->ip_proto) {
311 		case IPPROTO_TCP:
312 		case IPPROTO_UDP:
313 		case IPPROTO_SCTP:
314 		case IPPROTO_ICMP:
315 		case IPPROTO_ICMPV6:
316 			key_layer |= NFP_FLOWER_LAYER_TP;
317 			key_size += sizeof(struct nfp_flower_tp_ports);
318 			break;
319 		default:
320 			/* Other ip proto - we need check the masks for the
321 			 * remainder of the key to ensure we can offload.
322 			 */
323 			return -EOPNOTSUPP;
324 		}
325 	}
326 
327 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) {
328 		struct flow_dissector_key_tcp *tcp;
329 		u32 tcp_flags;
330 
331 		tcp = skb_flow_dissector_target(flow->dissector,
332 						FLOW_DISSECTOR_KEY_TCP,
333 						flow->key);
334 		tcp_flags = be16_to_cpu(tcp->flags);
335 
336 		if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS)
337 			return -EOPNOTSUPP;
338 
339 		/* We only support PSH and URG flags when either
340 		 * FIN, SYN or RST is present as well.
341 		 */
342 		if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
343 		    !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
344 			return -EOPNOTSUPP;
345 
346 		/* We need to store TCP flags in the either the IPv4 or IPv6 key
347 		 * space, thus we need to ensure we include a IPv4/IPv6 key
348 		 * layer if we have not done so already.
349 		 */
350 		if (!key_basic)
351 			return -EOPNOTSUPP;
352 
353 		if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
354 		    !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
355 			switch (key_basic->n_proto) {
356 			case cpu_to_be16(ETH_P_IP):
357 				key_layer |= NFP_FLOWER_LAYER_IPV4;
358 				key_size += sizeof(struct nfp_flower_ipv4);
359 				break;
360 
361 			case cpu_to_be16(ETH_P_IPV6):
362 				key_layer |= NFP_FLOWER_LAYER_IPV6;
363 				key_size += sizeof(struct nfp_flower_ipv6);
364 				break;
365 
366 			default:
367 				return -EOPNOTSUPP;
368 			}
369 		}
370 	}
371 
372 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
373 		struct flow_dissector_key_control *key_ctl;
374 
375 		key_ctl = skb_flow_dissector_target(flow->dissector,
376 						    FLOW_DISSECTOR_KEY_CONTROL,
377 						    flow->key);
378 
379 		if (key_ctl->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS)
380 			return -EOPNOTSUPP;
381 	}
382 
383 	ret_key_ls->key_layer = key_layer;
384 	ret_key_ls->key_layer_two = key_layer_two;
385 	ret_key_ls->key_size = key_size;
386 
387 	return 0;
388 }
389 
390 static struct nfp_fl_payload *
391 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
392 {
393 	struct nfp_fl_payload *flow_pay;
394 
395 	flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
396 	if (!flow_pay)
397 		return NULL;
398 
399 	flow_pay->meta.key_len = key_layer->key_size;
400 	flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
401 	if (!flow_pay->unmasked_data)
402 		goto err_free_flow;
403 
404 	flow_pay->meta.mask_len = key_layer->key_size;
405 	flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
406 	if (!flow_pay->mask_data)
407 		goto err_free_unmasked;
408 
409 	flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
410 	if (!flow_pay->action_data)
411 		goto err_free_mask;
412 
413 	flow_pay->nfp_tun_ipv4_addr = 0;
414 	flow_pay->meta.flags = 0;
415 
416 	return flow_pay;
417 
418 err_free_mask:
419 	kfree(flow_pay->mask_data);
420 err_free_unmasked:
421 	kfree(flow_pay->unmasked_data);
422 err_free_flow:
423 	kfree(flow_pay);
424 	return NULL;
425 }
426 
427 /**
428  * nfp_flower_add_offload() - Adds a new flow to hardware.
429  * @app:	Pointer to the APP handle
430  * @netdev:	netdev structure.
431  * @flow:	TC flower classifier offload structure.
432  *
433  * Adds a new flow to the repeated hash structure and action payload.
434  *
435  * Return: negative value on error, 0 if configured successfully.
436  */
437 static int
438 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
439 		       struct tc_cls_flower_offload *flow)
440 {
441 	enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
442 	struct nfp_flower_priv *priv = app->priv;
443 	struct nfp_fl_payload *flow_pay;
444 	struct nfp_fl_key_ls *key_layer;
445 	struct nfp_port *port = NULL;
446 	int err;
447 
448 	if (nfp_netdev_is_nfp_repr(netdev))
449 		port = nfp_port_from_netdev(netdev);
450 
451 	key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
452 	if (!key_layer)
453 		return -ENOMEM;
454 
455 	err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
456 					      &tun_type);
457 	if (err)
458 		goto err_free_key_ls;
459 
460 	flow_pay = nfp_flower_allocate_new(key_layer);
461 	if (!flow_pay) {
462 		err = -ENOMEM;
463 		goto err_free_key_ls;
464 	}
465 
466 	err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
467 					    flow_pay, tun_type);
468 	if (err)
469 		goto err_destroy_flow;
470 
471 	err = nfp_flower_compile_action(app, flow, netdev, flow_pay);
472 	if (err)
473 		goto err_destroy_flow;
474 
475 	err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev);
476 	if (err)
477 		goto err_destroy_flow;
478 
479 	flow_pay->tc_flower_cookie = flow->cookie;
480 	err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
481 				     nfp_flower_table_params);
482 	if (err)
483 		goto err_release_metadata;
484 
485 	err = nfp_flower_xmit_flow(app, flow_pay,
486 				   NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
487 	if (err)
488 		goto err_remove_rhash;
489 
490 	if (port)
491 		port->tc_offload_cnt++;
492 
493 	/* Deallocate flow payload when flower rule has been destroyed. */
494 	kfree(key_layer);
495 
496 	return 0;
497 
498 err_remove_rhash:
499 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
500 					    &flow_pay->fl_node,
501 					    nfp_flower_table_params));
502 err_release_metadata:
503 	nfp_modify_flow_metadata(app, flow_pay);
504 err_destroy_flow:
505 	kfree(flow_pay->action_data);
506 	kfree(flow_pay->mask_data);
507 	kfree(flow_pay->unmasked_data);
508 	kfree(flow_pay);
509 err_free_key_ls:
510 	kfree(key_layer);
511 	return err;
512 }
513 
514 /**
515  * nfp_flower_del_offload() - Removes a flow from hardware.
516  * @app:	Pointer to the APP handle
517  * @netdev:	netdev structure.
518  * @flow:	TC flower classifier offload structure
519  *
520  * Removes a flow from the repeated hash structure and clears the
521  * action payload.
522  *
523  * Return: negative value on error, 0 if removed successfully.
524  */
525 static int
526 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
527 		       struct tc_cls_flower_offload *flow)
528 {
529 	struct nfp_flower_priv *priv = app->priv;
530 	struct nfp_fl_payload *nfp_flow;
531 	struct nfp_port *port = NULL;
532 	int err;
533 
534 	if (nfp_netdev_is_nfp_repr(netdev))
535 		port = nfp_port_from_netdev(netdev);
536 
537 	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
538 	if (!nfp_flow)
539 		return -ENOENT;
540 
541 	err = nfp_modify_flow_metadata(app, nfp_flow);
542 	if (err)
543 		goto err_free_flow;
544 
545 	if (nfp_flow->nfp_tun_ipv4_addr)
546 		nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
547 
548 	err = nfp_flower_xmit_flow(app, nfp_flow,
549 				   NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
550 	if (err)
551 		goto err_free_flow;
552 
553 err_free_flow:
554 	if (port)
555 		port->tc_offload_cnt--;
556 	kfree(nfp_flow->action_data);
557 	kfree(nfp_flow->mask_data);
558 	kfree(nfp_flow->unmasked_data);
559 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
560 					    &nfp_flow->fl_node,
561 					    nfp_flower_table_params));
562 	kfree_rcu(nfp_flow, rcu);
563 	return err;
564 }
565 
566 /**
567  * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
568  * @app:	Pointer to the APP handle
569  * @netdev:	Netdev structure.
570  * @flow:	TC flower classifier offload structure
571  *
572  * Populates a flow statistics structure which which corresponds to a
573  * specific flow.
574  *
575  * Return: negative value on error, 0 if stats populated successfully.
576  */
577 static int
578 nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
579 		     struct tc_cls_flower_offload *flow)
580 {
581 	struct nfp_flower_priv *priv = app->priv;
582 	struct nfp_fl_payload *nfp_flow;
583 	u32 ctx_id;
584 
585 	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
586 	if (!nfp_flow)
587 		return -EINVAL;
588 
589 	ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
590 
591 	spin_lock_bh(&priv->stats_lock);
592 	tcf_exts_stats_update(flow->exts, priv->stats[ctx_id].bytes,
593 			      priv->stats[ctx_id].pkts,
594 			      priv->stats[ctx_id].used);
595 
596 	priv->stats[ctx_id].pkts = 0;
597 	priv->stats[ctx_id].bytes = 0;
598 	spin_unlock_bh(&priv->stats_lock);
599 
600 	return 0;
601 }
602 
603 static int
604 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
605 			struct tc_cls_flower_offload *flower)
606 {
607 	if (!eth_proto_is_802_3(flower->common.protocol))
608 		return -EOPNOTSUPP;
609 
610 	switch (flower->command) {
611 	case TC_CLSFLOWER_REPLACE:
612 		return nfp_flower_add_offload(app, netdev, flower);
613 	case TC_CLSFLOWER_DESTROY:
614 		return nfp_flower_del_offload(app, netdev, flower);
615 	case TC_CLSFLOWER_STATS:
616 		return nfp_flower_get_stats(app, netdev, flower);
617 	default:
618 		return -EOPNOTSUPP;
619 	}
620 }
621 
622 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
623 					void *type_data, void *cb_priv)
624 {
625 	struct nfp_repr *repr = cb_priv;
626 
627 	if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
628 		return -EOPNOTSUPP;
629 
630 	switch (type) {
631 	case TC_SETUP_CLSFLOWER:
632 		return nfp_flower_repr_offload(repr->app, repr->netdev,
633 					       type_data);
634 	default:
635 		return -EOPNOTSUPP;
636 	}
637 }
638 
639 static int nfp_flower_setup_tc_block(struct net_device *netdev,
640 				     struct tc_block_offload *f)
641 {
642 	struct nfp_repr *repr = netdev_priv(netdev);
643 
644 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
645 		return -EOPNOTSUPP;
646 
647 	switch (f->command) {
648 	case TC_BLOCK_BIND:
649 		return tcf_block_cb_register(f->block,
650 					     nfp_flower_setup_tc_block_cb,
651 					     repr, repr, f->extack);
652 	case TC_BLOCK_UNBIND:
653 		tcf_block_cb_unregister(f->block,
654 					nfp_flower_setup_tc_block_cb,
655 					repr);
656 		return 0;
657 	default:
658 		return -EOPNOTSUPP;
659 	}
660 }
661 
662 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
663 			enum tc_setup_type type, void *type_data)
664 {
665 	switch (type) {
666 	case TC_SETUP_BLOCK:
667 		return nfp_flower_setup_tc_block(netdev, type_data);
668 	default:
669 		return -EOPNOTSUPP;
670 	}
671 }
672 
673 struct nfp_flower_indr_block_cb_priv {
674 	struct net_device *netdev;
675 	struct nfp_app *app;
676 	struct list_head list;
677 };
678 
679 static struct nfp_flower_indr_block_cb_priv *
680 nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
681 				     struct net_device *netdev)
682 {
683 	struct nfp_flower_indr_block_cb_priv *cb_priv;
684 	struct nfp_flower_priv *priv = app->priv;
685 
686 	/* All callback list access should be protected by RTNL. */
687 	ASSERT_RTNL();
688 
689 	list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
690 		if (cb_priv->netdev == netdev)
691 			return cb_priv;
692 
693 	return NULL;
694 }
695 
696 static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
697 					  void *type_data, void *cb_priv)
698 {
699 	struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
700 	struct tc_cls_flower_offload *flower = type_data;
701 
702 	if (flower->common.chain_index)
703 		return -EOPNOTSUPP;
704 
705 	switch (type) {
706 	case TC_SETUP_CLSFLOWER:
707 		return nfp_flower_repr_offload(priv->app, priv->netdev,
708 					       type_data);
709 	default:
710 		return -EOPNOTSUPP;
711 	}
712 }
713 
714 static int
715 nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
716 			       struct tc_block_offload *f)
717 {
718 	struct nfp_flower_indr_block_cb_priv *cb_priv;
719 	struct nfp_flower_priv *priv = app->priv;
720 	int err;
721 
722 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
723 		return -EOPNOTSUPP;
724 
725 	switch (f->command) {
726 	case TC_BLOCK_BIND:
727 		cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
728 		if (!cb_priv)
729 			return -ENOMEM;
730 
731 		cb_priv->netdev = netdev;
732 		cb_priv->app = app;
733 		list_add(&cb_priv->list, &priv->indr_block_cb_priv);
734 
735 		err = tcf_block_cb_register(f->block,
736 					    nfp_flower_setup_indr_block_cb,
737 					    cb_priv, cb_priv, f->extack);
738 		if (err) {
739 			list_del(&cb_priv->list);
740 			kfree(cb_priv);
741 		}
742 
743 		return err;
744 	case TC_BLOCK_UNBIND:
745 		cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
746 		if (!cb_priv)
747 			return -ENOENT;
748 
749 		tcf_block_cb_unregister(f->block,
750 					nfp_flower_setup_indr_block_cb,
751 					cb_priv);
752 		list_del(&cb_priv->list);
753 		kfree(cb_priv);
754 
755 		return 0;
756 	default:
757 		return -EOPNOTSUPP;
758 	}
759 	return 0;
760 }
761 
762 static int
763 nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
764 			    enum tc_setup_type type, void *type_data)
765 {
766 	switch (type) {
767 	case TC_SETUP_BLOCK:
768 		return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
769 						      type_data);
770 	default:
771 		return -EOPNOTSUPP;
772 	}
773 }
774 
775 int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
776 				       struct net_device *netdev,
777 				       unsigned long event)
778 {
779 	int err;
780 
781 	if (!nfp_fl_is_netdev_to_offload(netdev))
782 		return NOTIFY_OK;
783 
784 	if (event == NETDEV_REGISTER) {
785 		err = __tc_indr_block_cb_register(netdev, app,
786 						  nfp_flower_indr_setup_tc_cb,
787 						  app);
788 		if (err)
789 			nfp_flower_cmsg_warn(app,
790 					     "Indirect block reg failed - %s\n",
791 					     netdev->name);
792 	} else if (event == NETDEV_UNREGISTER) {
793 		__tc_indr_block_cb_unregister(netdev,
794 					      nfp_flower_indr_setup_tc_cb, app);
795 	}
796 
797 	return NOTIFY_OK;
798 }
799