xref: /openbmc/linux/drivers/net/ethernet/netronome/nfp/flower/action.c (revision 05cf4fe738242183f1237f1b3a28b4479348c0a1)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/bitfield.h>
5 #include <net/geneve.h>
6 #include <net/pkt_cls.h>
7 #include <net/switchdev.h>
8 #include <net/tc_act/tc_csum.h>
9 #include <net/tc_act/tc_gact.h>
10 #include <net/tc_act/tc_mirred.h>
11 #include <net/tc_act/tc_pedit.h>
12 #include <net/tc_act/tc_vlan.h>
13 #include <net/tc_act/tc_tunnel_key.h>
14 
15 #include "cmsg.h"
16 #include "main.h"
17 #include "../nfp_net_repr.h"
18 
19 /* The kernel versions of TUNNEL_* are not ABI and therefore vulnerable
20  * to change. Such changes will break our FW ABI.
21  */
22 #define NFP_FL_TUNNEL_CSUM			cpu_to_be16(0x01)
23 #define NFP_FL_TUNNEL_KEY			cpu_to_be16(0x04)
24 #define NFP_FL_TUNNEL_GENEVE_OPT		cpu_to_be16(0x0800)
25 #define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS	IP_TUNNEL_INFO_TX
26 #define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS	(NFP_FL_TUNNEL_CSUM | \
27 						 NFP_FL_TUNNEL_KEY | \
28 						 NFP_FL_TUNNEL_GENEVE_OPT)
29 
30 static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
31 {
32 	size_t act_size = sizeof(struct nfp_fl_pop_vlan);
33 
34 	pop_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_POP_VLAN;
35 	pop_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
36 	pop_vlan->reserved = 0;
37 }
38 
39 static void
40 nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
41 		 const struct tc_action *action)
42 {
43 	size_t act_size = sizeof(struct nfp_fl_push_vlan);
44 	u16 tmp_push_vlan_tci;
45 
46 	push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN;
47 	push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
48 	push_vlan->reserved = 0;
49 	push_vlan->vlan_tpid = tcf_vlan_push_proto(action);
50 
51 	tmp_push_vlan_tci =
52 		FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) |
53 		FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)) |
54 		NFP_FL_PUSH_VLAN_CFI;
55 	push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
56 }
57 
58 static int
59 nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
60 	       struct nfp_fl_payload *nfp_flow, int act_len)
61 {
62 	size_t act_size = sizeof(struct nfp_fl_pre_lag);
63 	struct nfp_fl_pre_lag *pre_lag;
64 	struct net_device *out_dev;
65 	int err;
66 
67 	out_dev = tcf_mirred_dev(action);
68 	if (!out_dev || !netif_is_lag_master(out_dev))
69 		return 0;
70 
71 	if (act_len + act_size > NFP_FL_MAX_A_SIZ)
72 		return -EOPNOTSUPP;
73 
74 	/* Pre_lag action must be first on action list.
75 	 * If other actions already exist they need pushed forward.
76 	 */
77 	if (act_len)
78 		memmove(nfp_flow->action_data + act_size,
79 			nfp_flow->action_data, act_len);
80 
81 	pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data;
82 	err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag);
83 	if (err)
84 		return err;
85 
86 	pre_lag->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_LAG;
87 	pre_lag->head.len_lw = act_size >> NFP_FL_LW_SIZ;
88 
89 	nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
90 
91 	return act_size;
92 }
93 
94 static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
95 					 enum nfp_flower_tun_type tun_type)
96 {
97 	if (!out_dev->rtnl_link_ops)
98 		return false;
99 
100 	if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
101 		return tun_type == NFP_FL_TUNNEL_VXLAN;
102 
103 	if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve"))
104 		return tun_type == NFP_FL_TUNNEL_GENEVE;
105 
106 	return false;
107 }
108 
109 static int
110 nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
111 	      const struct tc_action *action, struct nfp_fl_payload *nfp_flow,
112 	      bool last, struct net_device *in_dev,
113 	      enum nfp_flower_tun_type tun_type, int *tun_out_cnt)
114 {
115 	size_t act_size = sizeof(struct nfp_fl_output);
116 	struct nfp_flower_priv *priv = app->priv;
117 	struct net_device *out_dev;
118 	u16 tmp_flags;
119 
120 	output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
121 	output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
122 
123 	out_dev = tcf_mirred_dev(action);
124 	if (!out_dev)
125 		return -EOPNOTSUPP;
126 
127 	tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
128 
129 	if (tun_type) {
130 		/* Verify the egress netdev matches the tunnel type. */
131 		if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type))
132 			return -EOPNOTSUPP;
133 
134 		if (*tun_out_cnt)
135 			return -EOPNOTSUPP;
136 		(*tun_out_cnt)++;
137 
138 		output->flags = cpu_to_be16(tmp_flags |
139 					    NFP_FL_OUT_FLAGS_USE_TUN);
140 		output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
141 	} else if (netif_is_lag_master(out_dev) &&
142 		   priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
143 		int gid;
144 
145 		output->flags = cpu_to_be16(tmp_flags);
146 		gid = nfp_flower_lag_get_output_id(app, out_dev);
147 		if (gid < 0)
148 			return gid;
149 		output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
150 	} else {
151 		/* Set action output parameters. */
152 		output->flags = cpu_to_be16(tmp_flags);
153 
154 		/* Only offload if egress ports are on the same device as the
155 		 * ingress port.
156 		 */
157 		if (!switchdev_port_same_parent_id(in_dev, out_dev))
158 			return -EOPNOTSUPP;
159 		if (!nfp_netdev_is_nfp_repr(out_dev))
160 			return -EOPNOTSUPP;
161 
162 		output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
163 		if (!output->port)
164 			return -EOPNOTSUPP;
165 	}
166 	nfp_flow->meta.shortcut = output->port;
167 
168 	return 0;
169 }
170 
171 static enum nfp_flower_tun_type
172 nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
173 				const struct tc_action *action)
174 {
175 	struct ip_tunnel_info *tun = tcf_tunnel_info(action);
176 	struct nfp_flower_priv *priv = app->priv;
177 
178 	switch (tun->key.tp_dst) {
179 	case htons(NFP_FL_VXLAN_PORT):
180 		return NFP_FL_TUNNEL_VXLAN;
181 	case htons(NFP_FL_GENEVE_PORT):
182 		if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
183 			return NFP_FL_TUNNEL_GENEVE;
184 		/* FALLTHROUGH */
185 	default:
186 		return NFP_FL_TUNNEL_NONE;
187 	}
188 }
189 
190 static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
191 {
192 	size_t act_size = sizeof(struct nfp_fl_pre_tunnel);
193 	struct nfp_fl_pre_tunnel *pre_tun_act;
194 
195 	/* Pre_tunnel action must be first on action list.
196 	 * If other actions already exist they need to be pushed forward.
197 	 */
198 	if (act_len)
199 		memmove(act_data + act_size, act_data, act_len);
200 
201 	pre_tun_act = (struct nfp_fl_pre_tunnel *)act_data;
202 
203 	memset(pre_tun_act, 0, act_size);
204 
205 	pre_tun_act->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_TUNNEL;
206 	pre_tun_act->head.len_lw = act_size >> NFP_FL_LW_SIZ;
207 
208 	return pre_tun_act;
209 }
210 
211 static int
212 nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
213 			   const struct tc_action *action)
214 {
215 	struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
216 	int opt_len, opt_cnt, act_start, tot_push_len;
217 	u8 *src = ip_tunnel_info_opts(ip_tun);
218 
219 	/* We need to populate the options in reverse order for HW.
220 	 * Therefore we go through the options, calculating the
221 	 * number of options and the total size, then we populate
222 	 * them in reverse order in the action list.
223 	 */
224 	opt_cnt = 0;
225 	tot_push_len = 0;
226 	opt_len = ip_tun->options_len;
227 	while (opt_len > 0) {
228 		struct geneve_opt *opt = (struct geneve_opt *)src;
229 
230 		opt_cnt++;
231 		if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT)
232 			return -EOPNOTSUPP;
233 
234 		tot_push_len += sizeof(struct nfp_fl_push_geneve) +
235 			       opt->length * 4;
236 		if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT)
237 			return -EOPNOTSUPP;
238 
239 		opt_len -= sizeof(struct geneve_opt) + opt->length * 4;
240 		src += sizeof(struct geneve_opt) + opt->length * 4;
241 	}
242 
243 	if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ)
244 		return -EOPNOTSUPP;
245 
246 	act_start = *list_len;
247 	*list_len += tot_push_len;
248 	src = ip_tunnel_info_opts(ip_tun);
249 	while (opt_cnt) {
250 		struct geneve_opt *opt = (struct geneve_opt *)src;
251 		struct nfp_fl_push_geneve *push;
252 		size_t act_size, len;
253 
254 		opt_cnt--;
255 		act_size = sizeof(struct nfp_fl_push_geneve) + opt->length * 4;
256 		tot_push_len -= act_size;
257 		len = act_start + tot_push_len;
258 
259 		push = (struct nfp_fl_push_geneve *)&nfp_fl->action_data[len];
260 		push->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_GENEVE;
261 		push->head.len_lw = act_size >> NFP_FL_LW_SIZ;
262 		push->reserved = 0;
263 		push->class = opt->opt_class;
264 		push->type = opt->type;
265 		push->length = opt->length;
266 		memcpy(&push->opt_data, opt->opt_data, opt->length * 4);
267 
268 		src += sizeof(struct geneve_opt) + opt->length * 4;
269 	}
270 
271 	return 0;
272 }
273 
274 static int
275 nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
276 			struct nfp_fl_set_ipv4_udp_tun *set_tun,
277 			const struct tc_action *action,
278 			struct nfp_fl_pre_tunnel *pre_tun,
279 			enum nfp_flower_tun_type tun_type,
280 			struct net_device *netdev)
281 {
282 	size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
283 	struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
284 	struct nfp_flower_priv *priv = app->priv;
285 	u32 tmp_set_ip_tun_type_index = 0;
286 	/* Currently support one pre-tunnel so index is always 0. */
287 	int pretun_idx = 0;
288 
289 	BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
290 		     NFP_FL_TUNNEL_KEY	!= TUNNEL_KEY ||
291 		     NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
292 	if (ip_tun->options_len &&
293 	    (tun_type != NFP_FL_TUNNEL_GENEVE ||
294 	    !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)))
295 		return -EOPNOTSUPP;
296 
297 	set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
298 	set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
299 
300 	/* Set tunnel type and pre-tunnel index. */
301 	tmp_set_ip_tun_type_index |=
302 		FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
303 		FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
304 
305 	set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
306 	set_tun->tun_id = ip_tun->key.tun_id;
307 
308 	if (ip_tun->key.ttl) {
309 		set_tun->ttl = ip_tun->key.ttl;
310 	} else {
311 		struct net *net = dev_net(netdev);
312 		struct flowi4 flow = {};
313 		struct rtable *rt;
314 		int err;
315 
316 		/* Do a route lookup to determine ttl - if fails then use
317 		 * default. Note that CONFIG_INET is a requirement of
318 		 * CONFIG_NET_SWITCHDEV so must be defined here.
319 		 */
320 		flow.daddr = ip_tun->key.u.ipv4.dst;
321 		flow.flowi4_proto = IPPROTO_UDP;
322 		rt = ip_route_output_key(net, &flow);
323 		err = PTR_ERR_OR_ZERO(rt);
324 		if (!err) {
325 			set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
326 			ip_rt_put(rt);
327 		} else {
328 			set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
329 		}
330 	}
331 
332 	set_tun->tos = ip_tun->key.tos;
333 
334 	if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
335 	    ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS)
336 		return -EOPNOTSUPP;
337 	set_tun->tun_flags = ip_tun->key.tun_flags;
338 
339 	if (tun_type == NFP_FL_TUNNEL_GENEVE) {
340 		set_tun->tun_proto = htons(ETH_P_TEB);
341 		set_tun->tun_len = ip_tun->options_len / 4;
342 	}
343 
344 	/* Complete pre_tunnel action. */
345 	pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
346 
347 	return 0;
348 }
349 
350 static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
351 {
352 	u32 oldvalue = get_unaligned((u32 *)p_exact);
353 	u32 oldmask = get_unaligned((u32 *)p_mask);
354 
355 	value &= mask;
356 	value |= oldvalue & ~mask;
357 
358 	put_unaligned(oldmask | mask, (u32 *)p_mask);
359 	put_unaligned(value, (u32 *)p_exact);
360 }
361 
362 static int
363 nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
364 	       struct nfp_fl_set_eth *set_eth)
365 {
366 	u32 exact, mask;
367 
368 	if (off + 4 > ETH_ALEN * 2)
369 		return -EOPNOTSUPP;
370 
371 	mask = ~tcf_pedit_mask(action, idx);
372 	exact = tcf_pedit_val(action, idx);
373 
374 	if (exact & ~mask)
375 		return -EOPNOTSUPP;
376 
377 	nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
378 			    &set_eth->eth_addr_mask[off]);
379 
380 	set_eth->reserved = cpu_to_be16(0);
381 	set_eth->head.jump_id = NFP_FL_ACTION_OPCODE_SET_ETHERNET;
382 	set_eth->head.len_lw = sizeof(*set_eth) >> NFP_FL_LW_SIZ;
383 
384 	return 0;
385 }
386 
387 static int
388 nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
389 	       struct nfp_fl_set_ip4_addrs *set_ip_addr)
390 {
391 	__be32 exact, mask;
392 
393 	/* We are expecting tcf_pedit to return a big endian value */
394 	mask = (__force __be32)~tcf_pedit_mask(action, idx);
395 	exact = (__force __be32)tcf_pedit_val(action, idx);
396 
397 	if (exact & ~mask)
398 		return -EOPNOTSUPP;
399 
400 	switch (off) {
401 	case offsetof(struct iphdr, daddr):
402 		set_ip_addr->ipv4_dst_mask |= mask;
403 		set_ip_addr->ipv4_dst &= ~mask;
404 		set_ip_addr->ipv4_dst |= exact & mask;
405 		break;
406 	case offsetof(struct iphdr, saddr):
407 		set_ip_addr->ipv4_src_mask |= mask;
408 		set_ip_addr->ipv4_src &= ~mask;
409 		set_ip_addr->ipv4_src |= exact & mask;
410 		break;
411 	default:
412 		return -EOPNOTSUPP;
413 	}
414 
415 	set_ip_addr->reserved = cpu_to_be16(0);
416 	set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
417 	set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ;
418 
419 	return 0;
420 }
421 
422 static void
423 nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask,
424 		      struct nfp_fl_set_ipv6_addr *ip6)
425 {
426 	ip6->ipv6[word].mask |= mask;
427 	ip6->ipv6[word].exact &= ~mask;
428 	ip6->ipv6[word].exact |= exact & mask;
429 
430 	ip6->reserved = cpu_to_be16(0);
431 	ip6->head.jump_id = opcode_tag;
432 	ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ;
433 }
434 
435 static int
436 nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
437 	       struct nfp_fl_set_ipv6_addr *ip_dst,
438 	       struct nfp_fl_set_ipv6_addr *ip_src)
439 {
440 	__be32 exact, mask;
441 	u8 word;
442 
443 	/* We are expecting tcf_pedit to return a big endian value */
444 	mask = (__force __be32)~tcf_pedit_mask(action, idx);
445 	exact = (__force __be32)tcf_pedit_val(action, idx);
446 
447 	if (exact & ~mask)
448 		return -EOPNOTSUPP;
449 
450 	if (off < offsetof(struct ipv6hdr, saddr)) {
451 		return -EOPNOTSUPP;
452 	} else if (off < offsetof(struct ipv6hdr, daddr)) {
453 		word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
454 		nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
455 				      exact, mask, ip_src);
456 	} else if (off < offsetof(struct ipv6hdr, daddr) +
457 		       sizeof(struct in6_addr)) {
458 		word = (off - offsetof(struct ipv6hdr, daddr)) / sizeof(exact);
459 		nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word,
460 				      exact, mask, ip_dst);
461 	} else {
462 		return -EOPNOTSUPP;
463 	}
464 
465 	return 0;
466 }
467 
468 static int
469 nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
470 		 struct nfp_fl_set_tport *set_tport, int opcode)
471 {
472 	u32 exact, mask;
473 
474 	if (off)
475 		return -EOPNOTSUPP;
476 
477 	mask = ~tcf_pedit_mask(action, idx);
478 	exact = tcf_pedit_val(action, idx);
479 
480 	if (exact & ~mask)
481 		return -EOPNOTSUPP;
482 
483 	nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
484 			    set_tport->tp_port_mask);
485 
486 	set_tport->reserved = cpu_to_be16(0);
487 	set_tport->head.jump_id = opcode;
488 	set_tport->head.len_lw = sizeof(*set_tport) >> NFP_FL_LW_SIZ;
489 
490 	return 0;
491 }
492 
493 static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
494 {
495 	switch (ip_proto) {
496 	case 0:
497 		/* Filter doesn't force proto match,
498 		 * both TCP and UDP will be updated if encountered
499 		 */
500 		return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP;
501 	case IPPROTO_TCP:
502 		return TCA_CSUM_UPDATE_FLAG_TCP;
503 	case IPPROTO_UDP:
504 		return TCA_CSUM_UPDATE_FLAG_UDP;
505 	default:
506 		/* All other protocols will be ignored by FW */
507 		return 0;
508 	}
509 }
510 
511 static int
512 nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
513 	     char *nfp_action, int *a_len, u32 *csum_updated)
514 {
515 	struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
516 	struct nfp_fl_set_ip4_addrs set_ip_addr;
517 	struct nfp_fl_set_tport set_tport;
518 	struct nfp_fl_set_eth set_eth;
519 	enum pedit_header_type htype;
520 	int idx, nkeys, err;
521 	size_t act_size = 0;
522 	u32 offset, cmd;
523 	u8 ip_proto = 0;
524 
525 	memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
526 	memset(&set_ip6_src, 0, sizeof(set_ip6_src));
527 	memset(&set_ip_addr, 0, sizeof(set_ip_addr));
528 	memset(&set_tport, 0, sizeof(set_tport));
529 	memset(&set_eth, 0, sizeof(set_eth));
530 	nkeys = tcf_pedit_nkeys(action);
531 
532 	for (idx = 0; idx < nkeys; idx++) {
533 		cmd = tcf_pedit_cmd(action, idx);
534 		htype = tcf_pedit_htype(action, idx);
535 		offset = tcf_pedit_offset(action, idx);
536 
537 		if (cmd != TCA_PEDIT_KEY_EX_CMD_SET)
538 			return -EOPNOTSUPP;
539 
540 		switch (htype) {
541 		case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
542 			err = nfp_fl_set_eth(action, idx, offset, &set_eth);
543 			break;
544 		case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
545 			err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr);
546 			break;
547 		case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
548 			err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst,
549 					     &set_ip6_src);
550 			break;
551 		case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
552 			err = nfp_fl_set_tport(action, idx, offset, &set_tport,
553 					       NFP_FL_ACTION_OPCODE_SET_TCP);
554 			break;
555 		case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
556 			err = nfp_fl_set_tport(action, idx, offset, &set_tport,
557 					       NFP_FL_ACTION_OPCODE_SET_UDP);
558 			break;
559 		default:
560 			return -EOPNOTSUPP;
561 		}
562 		if (err)
563 			return err;
564 	}
565 
566 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
567 		struct flow_dissector_key_basic *basic;
568 
569 		basic = skb_flow_dissector_target(flow->dissector,
570 						  FLOW_DISSECTOR_KEY_BASIC,
571 						  flow->key);
572 		ip_proto = basic->ip_proto;
573 	}
574 
575 	if (set_eth.head.len_lw) {
576 		act_size = sizeof(set_eth);
577 		memcpy(nfp_action, &set_eth, act_size);
578 		*a_len += act_size;
579 	}
580 	if (set_ip_addr.head.len_lw) {
581 		nfp_action += act_size;
582 		act_size = sizeof(set_ip_addr);
583 		memcpy(nfp_action, &set_ip_addr, act_size);
584 		*a_len += act_size;
585 
586 		/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
587 		*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
588 				nfp_fl_csum_l4_to_flag(ip_proto);
589 	}
590 	if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
591 		/* TC compiles set src and dst IPv6 address as a single action,
592 		 * the hardware requires this to be 2 separate actions.
593 		 */
594 		nfp_action += act_size;
595 		act_size = sizeof(set_ip6_src);
596 		memcpy(nfp_action, &set_ip6_src, act_size);
597 		*a_len += act_size;
598 
599 		act_size = sizeof(set_ip6_dst);
600 		memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
601 		       act_size);
602 		*a_len += act_size;
603 
604 		/* Hardware will automatically fix TCP/UDP checksum. */
605 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
606 	} else if (set_ip6_dst.head.len_lw) {
607 		nfp_action += act_size;
608 		act_size = sizeof(set_ip6_dst);
609 		memcpy(nfp_action, &set_ip6_dst, act_size);
610 		*a_len += act_size;
611 
612 		/* Hardware will automatically fix TCP/UDP checksum. */
613 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
614 	} else if (set_ip6_src.head.len_lw) {
615 		nfp_action += act_size;
616 		act_size = sizeof(set_ip6_src);
617 		memcpy(nfp_action, &set_ip6_src, act_size);
618 		*a_len += act_size;
619 
620 		/* Hardware will automatically fix TCP/UDP checksum. */
621 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
622 	}
623 	if (set_tport.head.len_lw) {
624 		nfp_action += act_size;
625 		act_size = sizeof(set_tport);
626 		memcpy(nfp_action, &set_tport, act_size);
627 		*a_len += act_size;
628 
629 		/* Hardware will automatically fix TCP/UDP checksum. */
630 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
631 	}
632 
633 	return 0;
634 }
635 
636 static int
637 nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
638 			 struct nfp_fl_payload *nfp_fl, int *a_len,
639 			 struct net_device *netdev, bool last,
640 			 enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
641 			 int *out_cnt, u32 *csum_updated)
642 {
643 	struct nfp_flower_priv *priv = app->priv;
644 	struct nfp_fl_output *output;
645 	int err, prelag_size;
646 
647 	/* If csum_updated has not been reset by now, it means HW will
648 	 * incorrectly update csums when they are not requested.
649 	 */
650 	if (*csum_updated)
651 		return -EOPNOTSUPP;
652 
653 	if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
654 		return -EOPNOTSUPP;
655 
656 	output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
657 	err = nfp_fl_output(app, output, a, nfp_fl, last, netdev, *tun_type,
658 			    tun_out_cnt);
659 	if (err)
660 		return err;
661 
662 	*a_len += sizeof(struct nfp_fl_output);
663 
664 	if (priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
665 		/* nfp_fl_pre_lag returns -err or size of prelag action added.
666 		 * This will be 0 if it is not egressing to a lag dev.
667 		 */
668 		prelag_size = nfp_fl_pre_lag(app, a, nfp_fl, *a_len);
669 		if (prelag_size < 0)
670 			return prelag_size;
671 		else if (prelag_size > 0 && (!last || *out_cnt))
672 			return -EOPNOTSUPP;
673 
674 		*a_len += prelag_size;
675 	}
676 	(*out_cnt)++;
677 
678 	return 0;
679 }
680 
681 static int
682 nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
683 		       struct tc_cls_flower_offload *flow,
684 		       struct nfp_fl_payload *nfp_fl, int *a_len,
685 		       struct net_device *netdev,
686 		       enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
687 		       int *out_cnt, u32 *csum_updated)
688 {
689 	struct nfp_fl_set_ipv4_udp_tun *set_tun;
690 	struct nfp_fl_pre_tunnel *pre_tun;
691 	struct nfp_fl_push_vlan *psh_v;
692 	struct nfp_fl_pop_vlan *pop_v;
693 	int err;
694 
695 	if (is_tcf_gact_shot(a)) {
696 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
697 	} else if (is_tcf_mirred_egress_redirect(a)) {
698 		err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
699 					       true, tun_type, tun_out_cnt,
700 					       out_cnt, csum_updated);
701 		if (err)
702 			return err;
703 
704 	} else if (is_tcf_mirred_egress_mirror(a)) {
705 		err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
706 					       false, tun_type, tun_out_cnt,
707 					       out_cnt, csum_updated);
708 		if (err)
709 			return err;
710 
711 	} else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
712 		if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ)
713 			return -EOPNOTSUPP;
714 
715 		pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len];
716 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV);
717 
718 		nfp_fl_pop_vlan(pop_v);
719 		*a_len += sizeof(struct nfp_fl_pop_vlan);
720 	} else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
721 		if (*a_len + sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ)
722 			return -EOPNOTSUPP;
723 
724 		psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len];
725 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
726 
727 		nfp_fl_push_vlan(psh_v, a);
728 		*a_len += sizeof(struct nfp_fl_push_vlan);
729 	} else if (is_tcf_tunnel_set(a)) {
730 		struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
731 		struct nfp_repr *repr = netdev_priv(netdev);
732 
733 		*tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
734 		if (*tun_type == NFP_FL_TUNNEL_NONE)
735 			return -EOPNOTSUPP;
736 
737 		if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS)
738 			return -EOPNOTSUPP;
739 
740 		/* Pre-tunnel action is required for tunnel encap.
741 		 * This checks for next hop entries on NFP.
742 		 * If none, the packet falls back before applying other actions.
743 		 */
744 		if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
745 		    sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ)
746 			return -EOPNOTSUPP;
747 
748 		pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
749 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
750 		*a_len += sizeof(struct nfp_fl_pre_tunnel);
751 
752 		err = nfp_fl_push_geneve_options(nfp_fl, a_len, a);
753 		if (err)
754 			return err;
755 
756 		set_tun = (void *)&nfp_fl->action_data[*a_len];
757 		err = nfp_fl_set_ipv4_udp_tun(app, set_tun, a, pre_tun,
758 					      *tun_type, netdev);
759 		if (err)
760 			return err;
761 		*a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
762 	} else if (is_tcf_tunnel_release(a)) {
763 		/* Tunnel decap is handled by default so accept action. */
764 		return 0;
765 	} else if (is_tcf_pedit(a)) {
766 		if (nfp_fl_pedit(a, flow, &nfp_fl->action_data[*a_len],
767 				 a_len, csum_updated))
768 			return -EOPNOTSUPP;
769 	} else if (is_tcf_csum(a)) {
770 		/* csum action requests recalc of something we have not fixed */
771 		if (tcf_csum_update_flags(a) & ~*csum_updated)
772 			return -EOPNOTSUPP;
773 		/* If we will correctly fix the csum we can remove it from the
774 		 * csum update list. Which will later be used to check support.
775 		 */
776 		*csum_updated &= ~tcf_csum_update_flags(a);
777 	} else {
778 		/* Currently we do not handle any other actions. */
779 		return -EOPNOTSUPP;
780 	}
781 
782 	return 0;
783 }
784 
785 int nfp_flower_compile_action(struct nfp_app *app,
786 			      struct tc_cls_flower_offload *flow,
787 			      struct net_device *netdev,
788 			      struct nfp_fl_payload *nfp_flow)
789 {
790 	int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
791 	enum nfp_flower_tun_type tun_type;
792 	const struct tc_action *a;
793 	u32 csum_updated = 0;
794 
795 	memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
796 	nfp_flow->meta.act_len = 0;
797 	tun_type = NFP_FL_TUNNEL_NONE;
798 	act_len = 0;
799 	act_cnt = 0;
800 	tun_out_cnt = 0;
801 	out_cnt = 0;
802 
803 	tcf_exts_for_each_action(i, a, flow->exts) {
804 		err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len,
805 					     netdev, &tun_type, &tun_out_cnt,
806 					     &out_cnt, &csum_updated);
807 		if (err)
808 			return err;
809 		act_cnt++;
810 	}
811 
812 	/* We optimise when the action list is small, this can unfortunately
813 	 * not happen once we have more than one action in the action list.
814 	 */
815 	if (act_cnt > 1)
816 		nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
817 
818 	nfp_flow->meta.act_len = act_len;
819 
820 	return 0;
821 }
822