xref: /openbmc/linux/drivers/net/ethernet/netronome/nfp/flower/action.c (revision 63f59b73e80a0f7431f6f91383fcc3f5fac49bb8)
1 /*
2  * Copyright (C) 2017 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/bitfield.h>
35 #include <net/geneve.h>
36 #include <net/pkt_cls.h>
37 #include <net/switchdev.h>
38 #include <net/tc_act/tc_csum.h>
39 #include <net/tc_act/tc_gact.h>
40 #include <net/tc_act/tc_mirred.h>
41 #include <net/tc_act/tc_pedit.h>
42 #include <net/tc_act/tc_vlan.h>
43 #include <net/tc_act/tc_tunnel_key.h>
44 
45 #include "cmsg.h"
46 #include "main.h"
47 #include "../nfp_net_repr.h"
48 
49 /* The kernel versions of TUNNEL_* are not ABI and therefore vulnerable
50  * to change. Such changes will break our FW ABI.
51  */
52 #define NFP_FL_TUNNEL_CSUM			cpu_to_be16(0x01)
53 #define NFP_FL_TUNNEL_KEY			cpu_to_be16(0x04)
54 #define NFP_FL_TUNNEL_GENEVE_OPT		cpu_to_be16(0x0800)
55 #define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS	IP_TUNNEL_INFO_TX
56 #define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS	(NFP_FL_TUNNEL_CSUM | \
57 						 NFP_FL_TUNNEL_KEY | \
58 						 NFP_FL_TUNNEL_GENEVE_OPT)
59 
60 static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
61 {
62 	size_t act_size = sizeof(struct nfp_fl_pop_vlan);
63 
64 	pop_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_POP_VLAN;
65 	pop_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
66 	pop_vlan->reserved = 0;
67 }
68 
69 static void
70 nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
71 		 const struct tc_action *action)
72 {
73 	size_t act_size = sizeof(struct nfp_fl_push_vlan);
74 	u16 tmp_push_vlan_tci;
75 
76 	push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN;
77 	push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
78 	push_vlan->reserved = 0;
79 	push_vlan->vlan_tpid = tcf_vlan_push_proto(action);
80 
81 	tmp_push_vlan_tci =
82 		FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) |
83 		FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)) |
84 		NFP_FL_PUSH_VLAN_CFI;
85 	push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
86 }
87 
88 static int
89 nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
90 	       struct nfp_fl_payload *nfp_flow, int act_len)
91 {
92 	size_t act_size = sizeof(struct nfp_fl_pre_lag);
93 	struct nfp_fl_pre_lag *pre_lag;
94 	struct net_device *out_dev;
95 	int err;
96 
97 	out_dev = tcf_mirred_dev(action);
98 	if (!out_dev || !netif_is_lag_master(out_dev))
99 		return 0;
100 
101 	if (act_len + act_size > NFP_FL_MAX_A_SIZ)
102 		return -EOPNOTSUPP;
103 
104 	/* Pre_lag action must be first on action list.
105 	 * If other actions already exist they need pushed forward.
106 	 */
107 	if (act_len)
108 		memmove(nfp_flow->action_data + act_size,
109 			nfp_flow->action_data, act_len);
110 
111 	pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data;
112 	err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag);
113 	if (err)
114 		return err;
115 
116 	pre_lag->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_LAG;
117 	pre_lag->head.len_lw = act_size >> NFP_FL_LW_SIZ;
118 
119 	nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
120 
121 	return act_size;
122 }
123 
124 static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
125 					 enum nfp_flower_tun_type tun_type)
126 {
127 	if (!out_dev->rtnl_link_ops)
128 		return false;
129 
130 	if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
131 		return tun_type == NFP_FL_TUNNEL_VXLAN;
132 
133 	if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve"))
134 		return tun_type == NFP_FL_TUNNEL_GENEVE;
135 
136 	return false;
137 }
138 
139 static int
140 nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
141 	      const struct tc_action *action, struct nfp_fl_payload *nfp_flow,
142 	      bool last, struct net_device *in_dev,
143 	      enum nfp_flower_tun_type tun_type, int *tun_out_cnt)
144 {
145 	size_t act_size = sizeof(struct nfp_fl_output);
146 	struct nfp_flower_priv *priv = app->priv;
147 	struct net_device *out_dev;
148 	u16 tmp_flags;
149 
150 	output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
151 	output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
152 
153 	out_dev = tcf_mirred_dev(action);
154 	if (!out_dev)
155 		return -EOPNOTSUPP;
156 
157 	tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
158 
159 	if (tun_type) {
160 		/* Verify the egress netdev matches the tunnel type. */
161 		if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type))
162 			return -EOPNOTSUPP;
163 
164 		if (*tun_out_cnt)
165 			return -EOPNOTSUPP;
166 		(*tun_out_cnt)++;
167 
168 		output->flags = cpu_to_be16(tmp_flags |
169 					    NFP_FL_OUT_FLAGS_USE_TUN);
170 		output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
171 	} else if (netif_is_lag_master(out_dev) &&
172 		   priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
173 		int gid;
174 
175 		output->flags = cpu_to_be16(tmp_flags);
176 		gid = nfp_flower_lag_get_output_id(app, out_dev);
177 		if (gid < 0)
178 			return gid;
179 		output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
180 	} else {
181 		/* Set action output parameters. */
182 		output->flags = cpu_to_be16(tmp_flags);
183 
184 		/* Only offload if egress ports are on the same device as the
185 		 * ingress port.
186 		 */
187 		if (!switchdev_port_same_parent_id(in_dev, out_dev))
188 			return -EOPNOTSUPP;
189 		if (!nfp_netdev_is_nfp_repr(out_dev))
190 			return -EOPNOTSUPP;
191 
192 		output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
193 		if (!output->port)
194 			return -EOPNOTSUPP;
195 	}
196 	nfp_flow->meta.shortcut = output->port;
197 
198 	return 0;
199 }
200 
201 static enum nfp_flower_tun_type
202 nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
203 				const struct tc_action *action)
204 {
205 	struct ip_tunnel_info *tun = tcf_tunnel_info(action);
206 	struct nfp_flower_priv *priv = app->priv;
207 
208 	switch (tun->key.tp_dst) {
209 	case htons(NFP_FL_VXLAN_PORT):
210 		return NFP_FL_TUNNEL_VXLAN;
211 	case htons(NFP_FL_GENEVE_PORT):
212 		if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
213 			return NFP_FL_TUNNEL_GENEVE;
214 		/* FALLTHROUGH */
215 	default:
216 		return NFP_FL_TUNNEL_NONE;
217 	}
218 }
219 
220 static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
221 {
222 	size_t act_size = sizeof(struct nfp_fl_pre_tunnel);
223 	struct nfp_fl_pre_tunnel *pre_tun_act;
224 
225 	/* Pre_tunnel action must be first on action list.
226 	 * If other actions already exist they need to be pushed forward.
227 	 */
228 	if (act_len)
229 		memmove(act_data + act_size, act_data, act_len);
230 
231 	pre_tun_act = (struct nfp_fl_pre_tunnel *)act_data;
232 
233 	memset(pre_tun_act, 0, act_size);
234 
235 	pre_tun_act->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_TUNNEL;
236 	pre_tun_act->head.len_lw = act_size >> NFP_FL_LW_SIZ;
237 
238 	return pre_tun_act;
239 }
240 
241 static int
242 nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
243 			   const struct tc_action *action)
244 {
245 	struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
246 	int opt_len, opt_cnt, act_start, tot_push_len;
247 	u8 *src = ip_tunnel_info_opts(ip_tun);
248 
249 	/* We need to populate the options in reverse order for HW.
250 	 * Therefore we go through the options, calculating the
251 	 * number of options and the total size, then we populate
252 	 * them in reverse order in the action list.
253 	 */
254 	opt_cnt = 0;
255 	tot_push_len = 0;
256 	opt_len = ip_tun->options_len;
257 	while (opt_len > 0) {
258 		struct geneve_opt *opt = (struct geneve_opt *)src;
259 
260 		opt_cnt++;
261 		if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT)
262 			return -EOPNOTSUPP;
263 
264 		tot_push_len += sizeof(struct nfp_fl_push_geneve) +
265 			       opt->length * 4;
266 		if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT)
267 			return -EOPNOTSUPP;
268 
269 		opt_len -= sizeof(struct geneve_opt) + opt->length * 4;
270 		src += sizeof(struct geneve_opt) + opt->length * 4;
271 	}
272 
273 	if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ)
274 		return -EOPNOTSUPP;
275 
276 	act_start = *list_len;
277 	*list_len += tot_push_len;
278 	src = ip_tunnel_info_opts(ip_tun);
279 	while (opt_cnt) {
280 		struct geneve_opt *opt = (struct geneve_opt *)src;
281 		struct nfp_fl_push_geneve *push;
282 		size_t act_size, len;
283 
284 		opt_cnt--;
285 		act_size = sizeof(struct nfp_fl_push_geneve) + opt->length * 4;
286 		tot_push_len -= act_size;
287 		len = act_start + tot_push_len;
288 
289 		push = (struct nfp_fl_push_geneve *)&nfp_fl->action_data[len];
290 		push->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_GENEVE;
291 		push->head.len_lw = act_size >> NFP_FL_LW_SIZ;
292 		push->reserved = 0;
293 		push->class = opt->opt_class;
294 		push->type = opt->type;
295 		push->length = opt->length;
296 		memcpy(&push->opt_data, opt->opt_data, opt->length * 4);
297 
298 		src += sizeof(struct geneve_opt) + opt->length * 4;
299 	}
300 
301 	return 0;
302 }
303 
304 static int
305 nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
306 			struct nfp_fl_set_ipv4_udp_tun *set_tun,
307 			const struct tc_action *action,
308 			struct nfp_fl_pre_tunnel *pre_tun,
309 			enum nfp_flower_tun_type tun_type,
310 			struct net_device *netdev)
311 {
312 	size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
313 	struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
314 	struct nfp_flower_priv *priv = app->priv;
315 	u32 tmp_set_ip_tun_type_index = 0;
316 	/* Currently support one pre-tunnel so index is always 0. */
317 	int pretun_idx = 0;
318 
319 	BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
320 		     NFP_FL_TUNNEL_KEY	!= TUNNEL_KEY ||
321 		     NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
322 	if (ip_tun->options_len &&
323 	    (tun_type != NFP_FL_TUNNEL_GENEVE ||
324 	    !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)))
325 		return -EOPNOTSUPP;
326 
327 	set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
328 	set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
329 
330 	/* Set tunnel type and pre-tunnel index. */
331 	tmp_set_ip_tun_type_index |=
332 		FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
333 		FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
334 
335 	set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
336 	set_tun->tun_id = ip_tun->key.tun_id;
337 
338 	if (ip_tun->key.ttl) {
339 		set_tun->ttl = ip_tun->key.ttl;
340 	} else {
341 		struct net *net = dev_net(netdev);
342 		struct flowi4 flow = {};
343 		struct rtable *rt;
344 		int err;
345 
346 		/* Do a route lookup to determine ttl - if fails then use
347 		 * default. Note that CONFIG_INET is a requirement of
348 		 * CONFIG_NET_SWITCHDEV so must be defined here.
349 		 */
350 		flow.daddr = ip_tun->key.u.ipv4.dst;
351 		flow.flowi4_proto = IPPROTO_UDP;
352 		rt = ip_route_output_key(net, &flow);
353 		err = PTR_ERR_OR_ZERO(rt);
354 		if (!err) {
355 			set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
356 			ip_rt_put(rt);
357 		} else {
358 			set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
359 		}
360 	}
361 
362 	set_tun->tos = ip_tun->key.tos;
363 
364 	if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
365 	    ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS)
366 		return -EOPNOTSUPP;
367 	set_tun->tun_flags = ip_tun->key.tun_flags;
368 
369 	if (tun_type == NFP_FL_TUNNEL_GENEVE) {
370 		set_tun->tun_proto = htons(ETH_P_TEB);
371 		set_tun->tun_len = ip_tun->options_len / 4;
372 	}
373 
374 	/* Complete pre_tunnel action. */
375 	pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
376 
377 	return 0;
378 }
379 
380 static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
381 {
382 	u32 oldvalue = get_unaligned((u32 *)p_exact);
383 	u32 oldmask = get_unaligned((u32 *)p_mask);
384 
385 	value &= mask;
386 	value |= oldvalue & ~mask;
387 
388 	put_unaligned(oldmask | mask, (u32 *)p_mask);
389 	put_unaligned(value, (u32 *)p_exact);
390 }
391 
392 static int
393 nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
394 	       struct nfp_fl_set_eth *set_eth)
395 {
396 	u32 exact, mask;
397 
398 	if (off + 4 > ETH_ALEN * 2)
399 		return -EOPNOTSUPP;
400 
401 	mask = ~tcf_pedit_mask(action, idx);
402 	exact = tcf_pedit_val(action, idx);
403 
404 	if (exact & ~mask)
405 		return -EOPNOTSUPP;
406 
407 	nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
408 			    &set_eth->eth_addr_mask[off]);
409 
410 	set_eth->reserved = cpu_to_be16(0);
411 	set_eth->head.jump_id = NFP_FL_ACTION_OPCODE_SET_ETHERNET;
412 	set_eth->head.len_lw = sizeof(*set_eth) >> NFP_FL_LW_SIZ;
413 
414 	return 0;
415 }
416 
417 static int
418 nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
419 	       struct nfp_fl_set_ip4_addrs *set_ip_addr)
420 {
421 	__be32 exact, mask;
422 
423 	/* We are expecting tcf_pedit to return a big endian value */
424 	mask = (__force __be32)~tcf_pedit_mask(action, idx);
425 	exact = (__force __be32)tcf_pedit_val(action, idx);
426 
427 	if (exact & ~mask)
428 		return -EOPNOTSUPP;
429 
430 	switch (off) {
431 	case offsetof(struct iphdr, daddr):
432 		set_ip_addr->ipv4_dst_mask = mask;
433 		set_ip_addr->ipv4_dst = exact;
434 		break;
435 	case offsetof(struct iphdr, saddr):
436 		set_ip_addr->ipv4_src_mask = mask;
437 		set_ip_addr->ipv4_src = exact;
438 		break;
439 	default:
440 		return -EOPNOTSUPP;
441 	}
442 
443 	set_ip_addr->reserved = cpu_to_be16(0);
444 	set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
445 	set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ;
446 
447 	return 0;
448 }
449 
450 static void
451 nfp_fl_set_ip6_helper(int opcode_tag, int idx, __be32 exact, __be32 mask,
452 		      struct nfp_fl_set_ipv6_addr *ip6)
453 {
454 	ip6->ipv6[idx % 4].mask = mask;
455 	ip6->ipv6[idx % 4].exact = exact;
456 
457 	ip6->reserved = cpu_to_be16(0);
458 	ip6->head.jump_id = opcode_tag;
459 	ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ;
460 }
461 
462 static int
463 nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
464 	       struct nfp_fl_set_ipv6_addr *ip_dst,
465 	       struct nfp_fl_set_ipv6_addr *ip_src)
466 {
467 	__be32 exact, mask;
468 
469 	/* We are expecting tcf_pedit to return a big endian value */
470 	mask = (__force __be32)~tcf_pedit_mask(action, idx);
471 	exact = (__force __be32)tcf_pedit_val(action, idx);
472 
473 	if (exact & ~mask)
474 		return -EOPNOTSUPP;
475 
476 	if (off < offsetof(struct ipv6hdr, saddr))
477 		return -EOPNOTSUPP;
478 	else if (off < offsetof(struct ipv6hdr, daddr))
479 		nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, idx,
480 				      exact, mask, ip_src);
481 	else if (off < offsetof(struct ipv6hdr, daddr) +
482 		       sizeof(struct in6_addr))
483 		nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, idx,
484 				      exact, mask, ip_dst);
485 	else
486 		return -EOPNOTSUPP;
487 
488 	return 0;
489 }
490 
491 static int
492 nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
493 		 struct nfp_fl_set_tport *set_tport, int opcode)
494 {
495 	u32 exact, mask;
496 
497 	if (off)
498 		return -EOPNOTSUPP;
499 
500 	mask = ~tcf_pedit_mask(action, idx);
501 	exact = tcf_pedit_val(action, idx);
502 
503 	if (exact & ~mask)
504 		return -EOPNOTSUPP;
505 
506 	nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
507 			    set_tport->tp_port_mask);
508 
509 	set_tport->reserved = cpu_to_be16(0);
510 	set_tport->head.jump_id = opcode;
511 	set_tport->head.len_lw = sizeof(*set_tport) >> NFP_FL_LW_SIZ;
512 
513 	return 0;
514 }
515 
516 static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
517 {
518 	switch (ip_proto) {
519 	case 0:
520 		/* Filter doesn't force proto match,
521 		 * both TCP and UDP will be updated if encountered
522 		 */
523 		return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP;
524 	case IPPROTO_TCP:
525 		return TCA_CSUM_UPDATE_FLAG_TCP;
526 	case IPPROTO_UDP:
527 		return TCA_CSUM_UPDATE_FLAG_UDP;
528 	default:
529 		/* All other protocols will be ignored by FW */
530 		return 0;
531 	}
532 }
533 
534 static int
535 nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
536 	     char *nfp_action, int *a_len, u32 *csum_updated)
537 {
538 	struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
539 	struct nfp_fl_set_ip4_addrs set_ip_addr;
540 	struct nfp_fl_set_tport set_tport;
541 	struct nfp_fl_set_eth set_eth;
542 	enum pedit_header_type htype;
543 	int idx, nkeys, err;
544 	size_t act_size;
545 	u32 offset, cmd;
546 	u8 ip_proto = 0;
547 
548 	memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
549 	memset(&set_ip6_src, 0, sizeof(set_ip6_src));
550 	memset(&set_ip_addr, 0, sizeof(set_ip_addr));
551 	memset(&set_tport, 0, sizeof(set_tport));
552 	memset(&set_eth, 0, sizeof(set_eth));
553 	nkeys = tcf_pedit_nkeys(action);
554 
555 	for (idx = 0; idx < nkeys; idx++) {
556 		cmd = tcf_pedit_cmd(action, idx);
557 		htype = tcf_pedit_htype(action, idx);
558 		offset = tcf_pedit_offset(action, idx);
559 
560 		if (cmd != TCA_PEDIT_KEY_EX_CMD_SET)
561 			return -EOPNOTSUPP;
562 
563 		switch (htype) {
564 		case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
565 			err = nfp_fl_set_eth(action, idx, offset, &set_eth);
566 			break;
567 		case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
568 			err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr);
569 			break;
570 		case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
571 			err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst,
572 					     &set_ip6_src);
573 			break;
574 		case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
575 			err = nfp_fl_set_tport(action, idx, offset, &set_tport,
576 					       NFP_FL_ACTION_OPCODE_SET_TCP);
577 			break;
578 		case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
579 			err = nfp_fl_set_tport(action, idx, offset, &set_tport,
580 					       NFP_FL_ACTION_OPCODE_SET_UDP);
581 			break;
582 		default:
583 			return -EOPNOTSUPP;
584 		}
585 		if (err)
586 			return err;
587 	}
588 
589 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
590 		struct flow_dissector_key_basic *basic;
591 
592 		basic = skb_flow_dissector_target(flow->dissector,
593 						  FLOW_DISSECTOR_KEY_BASIC,
594 						  flow->key);
595 		ip_proto = basic->ip_proto;
596 	}
597 
598 	if (set_eth.head.len_lw) {
599 		act_size = sizeof(set_eth);
600 		memcpy(nfp_action, &set_eth, act_size);
601 		*a_len += act_size;
602 	} else if (set_ip_addr.head.len_lw) {
603 		act_size = sizeof(set_ip_addr);
604 		memcpy(nfp_action, &set_ip_addr, act_size);
605 		*a_len += act_size;
606 
607 		/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
608 		*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
609 				nfp_fl_csum_l4_to_flag(ip_proto);
610 	} else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
611 		/* TC compiles set src and dst IPv6 address as a single action,
612 		 * the hardware requires this to be 2 separate actions.
613 		 */
614 		act_size = sizeof(set_ip6_src);
615 		memcpy(nfp_action, &set_ip6_src, act_size);
616 		*a_len += act_size;
617 
618 		act_size = sizeof(set_ip6_dst);
619 		memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
620 		       act_size);
621 		*a_len += act_size;
622 
623 		/* Hardware will automatically fix TCP/UDP checksum. */
624 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
625 	} else if (set_ip6_dst.head.len_lw) {
626 		act_size = sizeof(set_ip6_dst);
627 		memcpy(nfp_action, &set_ip6_dst, act_size);
628 		*a_len += act_size;
629 
630 		/* Hardware will automatically fix TCP/UDP checksum. */
631 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
632 	} else if (set_ip6_src.head.len_lw) {
633 		act_size = sizeof(set_ip6_src);
634 		memcpy(nfp_action, &set_ip6_src, act_size);
635 		*a_len += act_size;
636 
637 		/* Hardware will automatically fix TCP/UDP checksum. */
638 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
639 	} else if (set_tport.head.len_lw) {
640 		act_size = sizeof(set_tport);
641 		memcpy(nfp_action, &set_tport, act_size);
642 		*a_len += act_size;
643 
644 		/* Hardware will automatically fix TCP/UDP checksum. */
645 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
646 	}
647 
648 	return 0;
649 }
650 
651 static int
652 nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
653 			 struct nfp_fl_payload *nfp_fl, int *a_len,
654 			 struct net_device *netdev, bool last,
655 			 enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
656 			 int *out_cnt, u32 *csum_updated)
657 {
658 	struct nfp_flower_priv *priv = app->priv;
659 	struct nfp_fl_output *output;
660 	int err, prelag_size;
661 
662 	/* If csum_updated has not been reset by now, it means HW will
663 	 * incorrectly update csums when they are not requested.
664 	 */
665 	if (*csum_updated)
666 		return -EOPNOTSUPP;
667 
668 	if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
669 		return -EOPNOTSUPP;
670 
671 	output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
672 	err = nfp_fl_output(app, output, a, nfp_fl, last, netdev, *tun_type,
673 			    tun_out_cnt);
674 	if (err)
675 		return err;
676 
677 	*a_len += sizeof(struct nfp_fl_output);
678 
679 	if (priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
680 		/* nfp_fl_pre_lag returns -err or size of prelag action added.
681 		 * This will be 0 if it is not egressing to a lag dev.
682 		 */
683 		prelag_size = nfp_fl_pre_lag(app, a, nfp_fl, *a_len);
684 		if (prelag_size < 0)
685 			return prelag_size;
686 		else if (prelag_size > 0 && (!last || *out_cnt))
687 			return -EOPNOTSUPP;
688 
689 		*a_len += prelag_size;
690 	}
691 	(*out_cnt)++;
692 
693 	return 0;
694 }
695 
696 static int
697 nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
698 		       struct tc_cls_flower_offload *flow,
699 		       struct nfp_fl_payload *nfp_fl, int *a_len,
700 		       struct net_device *netdev,
701 		       enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
702 		       int *out_cnt, u32 *csum_updated)
703 {
704 	struct nfp_fl_set_ipv4_udp_tun *set_tun;
705 	struct nfp_fl_pre_tunnel *pre_tun;
706 	struct nfp_fl_push_vlan *psh_v;
707 	struct nfp_fl_pop_vlan *pop_v;
708 	int err;
709 
710 	if (is_tcf_gact_shot(a)) {
711 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
712 	} else if (is_tcf_mirred_egress_redirect(a)) {
713 		err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
714 					       true, tun_type, tun_out_cnt,
715 					       out_cnt, csum_updated);
716 		if (err)
717 			return err;
718 
719 	} else if (is_tcf_mirred_egress_mirror(a)) {
720 		err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
721 					       false, tun_type, tun_out_cnt,
722 					       out_cnt, csum_updated);
723 		if (err)
724 			return err;
725 
726 	} else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
727 		if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ)
728 			return -EOPNOTSUPP;
729 
730 		pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len];
731 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV);
732 
733 		nfp_fl_pop_vlan(pop_v);
734 		*a_len += sizeof(struct nfp_fl_pop_vlan);
735 	} else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
736 		if (*a_len + sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ)
737 			return -EOPNOTSUPP;
738 
739 		psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len];
740 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
741 
742 		nfp_fl_push_vlan(psh_v, a);
743 		*a_len += sizeof(struct nfp_fl_push_vlan);
744 	} else if (is_tcf_tunnel_set(a)) {
745 		struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
746 		struct nfp_repr *repr = netdev_priv(netdev);
747 
748 		*tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
749 		if (*tun_type == NFP_FL_TUNNEL_NONE)
750 			return -EOPNOTSUPP;
751 
752 		if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS)
753 			return -EOPNOTSUPP;
754 
755 		/* Pre-tunnel action is required for tunnel encap.
756 		 * This checks for next hop entries on NFP.
757 		 * If none, the packet falls back before applying other actions.
758 		 */
759 		if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
760 		    sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ)
761 			return -EOPNOTSUPP;
762 
763 		pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
764 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
765 		*a_len += sizeof(struct nfp_fl_pre_tunnel);
766 
767 		err = nfp_fl_push_geneve_options(nfp_fl, a_len, a);
768 		if (err)
769 			return err;
770 
771 		set_tun = (void *)&nfp_fl->action_data[*a_len];
772 		err = nfp_fl_set_ipv4_udp_tun(app, set_tun, a, pre_tun,
773 					      *tun_type, netdev);
774 		if (err)
775 			return err;
776 		*a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
777 	} else if (is_tcf_tunnel_release(a)) {
778 		/* Tunnel decap is handled by default so accept action. */
779 		return 0;
780 	} else if (is_tcf_pedit(a)) {
781 		if (nfp_fl_pedit(a, flow, &nfp_fl->action_data[*a_len],
782 				 a_len, csum_updated))
783 			return -EOPNOTSUPP;
784 	} else if (is_tcf_csum(a)) {
785 		/* csum action requests recalc of something we have not fixed */
786 		if (tcf_csum_update_flags(a) & ~*csum_updated)
787 			return -EOPNOTSUPP;
788 		/* If we will correctly fix the csum we can remove it from the
789 		 * csum update list. Which will later be used to check support.
790 		 */
791 		*csum_updated &= ~tcf_csum_update_flags(a);
792 	} else {
793 		/* Currently we do not handle any other actions. */
794 		return -EOPNOTSUPP;
795 	}
796 
797 	return 0;
798 }
799 
800 int nfp_flower_compile_action(struct nfp_app *app,
801 			      struct tc_cls_flower_offload *flow,
802 			      struct net_device *netdev,
803 			      struct nfp_fl_payload *nfp_flow)
804 {
805 	int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
806 	enum nfp_flower_tun_type tun_type;
807 	const struct tc_action *a;
808 	u32 csum_updated = 0;
809 
810 	memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
811 	nfp_flow->meta.act_len = 0;
812 	tun_type = NFP_FL_TUNNEL_NONE;
813 	act_len = 0;
814 	act_cnt = 0;
815 	tun_out_cnt = 0;
816 	out_cnt = 0;
817 
818 	tcf_exts_for_each_action(i, a, flow->exts) {
819 		err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len,
820 					     netdev, &tun_type, &tun_out_cnt,
821 					     &out_cnt, &csum_updated);
822 		if (err)
823 			return err;
824 		act_cnt++;
825 	}
826 
827 	/* We optimise when the action list is small, this can unfortunately
828 	 * not happen once we have more than one action in the action list.
829 	 */
830 	if (act_cnt > 1)
831 		nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
832 
833 	nfp_flow->meta.act_len = act_len;
834 
835 	return 0;
836 }
837