1 /*
2  * Copyright (C) 2017 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/bitfield.h>
35 #include <net/geneve.h>
36 #include <net/pkt_cls.h>
37 #include <net/switchdev.h>
38 #include <net/tc_act/tc_csum.h>
39 #include <net/tc_act/tc_gact.h>
40 #include <net/tc_act/tc_mirred.h>
41 #include <net/tc_act/tc_pedit.h>
42 #include <net/tc_act/tc_vlan.h>
43 #include <net/tc_act/tc_tunnel_key.h>
44 
45 #include "cmsg.h"
46 #include "main.h"
47 #include "../nfp_net_repr.h"
48 
49 /* The kernel versions of TUNNEL_* are not ABI and therefore vulnerable
50  * to change. Such changes will break our FW ABI.
51  */
52 #define NFP_FL_TUNNEL_CSUM			cpu_to_be16(0x01)
53 #define NFP_FL_TUNNEL_KEY			cpu_to_be16(0x04)
54 #define NFP_FL_TUNNEL_GENEVE_OPT		cpu_to_be16(0x0800)
55 #define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS	(NFP_FL_TUNNEL_CSUM | \
56 						 NFP_FL_TUNNEL_KEY | \
57 						 NFP_FL_TUNNEL_GENEVE_OPT)
58 
59 static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
60 {
61 	size_t act_size = sizeof(struct nfp_fl_pop_vlan);
62 
63 	pop_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_POP_VLAN;
64 	pop_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
65 	pop_vlan->reserved = 0;
66 }
67 
68 static void
69 nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
70 		 const struct tc_action *action)
71 {
72 	size_t act_size = sizeof(struct nfp_fl_push_vlan);
73 	u16 tmp_push_vlan_tci;
74 
75 	push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN;
76 	push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
77 	push_vlan->reserved = 0;
78 	push_vlan->vlan_tpid = tcf_vlan_push_proto(action);
79 
80 	tmp_push_vlan_tci =
81 		FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) |
82 		FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)) |
83 		NFP_FL_PUSH_VLAN_CFI;
84 	push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
85 }
86 
87 static int
88 nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
89 	       struct nfp_fl_payload *nfp_flow, int act_len)
90 {
91 	size_t act_size = sizeof(struct nfp_fl_pre_lag);
92 	struct nfp_fl_pre_lag *pre_lag;
93 	struct net_device *out_dev;
94 	int err;
95 
96 	out_dev = tcf_mirred_dev(action);
97 	if (!out_dev || !netif_is_lag_master(out_dev))
98 		return 0;
99 
100 	if (act_len + act_size > NFP_FL_MAX_A_SIZ)
101 		return -EOPNOTSUPP;
102 
103 	/* Pre_lag action must be first on action list.
104 	 * If other actions already exist they need pushed forward.
105 	 */
106 	if (act_len)
107 		memmove(nfp_flow->action_data + act_size,
108 			nfp_flow->action_data, act_len);
109 
110 	pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data;
111 	err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag);
112 	if (err)
113 		return err;
114 
115 	pre_lag->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_LAG;
116 	pre_lag->head.len_lw = act_size >> NFP_FL_LW_SIZ;
117 
118 	nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
119 
120 	return act_size;
121 }
122 
123 static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
124 					 enum nfp_flower_tun_type tun_type)
125 {
126 	if (!out_dev->rtnl_link_ops)
127 		return false;
128 
129 	if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
130 		return tun_type == NFP_FL_TUNNEL_VXLAN;
131 
132 	if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve"))
133 		return tun_type == NFP_FL_TUNNEL_GENEVE;
134 
135 	return false;
136 }
137 
138 static int
139 nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
140 	      const struct tc_action *action, struct nfp_fl_payload *nfp_flow,
141 	      bool last, struct net_device *in_dev,
142 	      enum nfp_flower_tun_type tun_type, int *tun_out_cnt)
143 {
144 	size_t act_size = sizeof(struct nfp_fl_output);
145 	struct nfp_flower_priv *priv = app->priv;
146 	struct net_device *out_dev;
147 	u16 tmp_flags;
148 
149 	output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
150 	output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
151 
152 	out_dev = tcf_mirred_dev(action);
153 	if (!out_dev)
154 		return -EOPNOTSUPP;
155 
156 	tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
157 
158 	if (tun_type) {
159 		/* Verify the egress netdev matches the tunnel type. */
160 		if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type))
161 			return -EOPNOTSUPP;
162 
163 		if (*tun_out_cnt)
164 			return -EOPNOTSUPP;
165 		(*tun_out_cnt)++;
166 
167 		output->flags = cpu_to_be16(tmp_flags |
168 					    NFP_FL_OUT_FLAGS_USE_TUN);
169 		output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
170 	} else if (netif_is_lag_master(out_dev) &&
171 		   priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
172 		int gid;
173 
174 		output->flags = cpu_to_be16(tmp_flags);
175 		gid = nfp_flower_lag_get_output_id(app, out_dev);
176 		if (gid < 0)
177 			return gid;
178 		output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
179 	} else {
180 		/* Set action output parameters. */
181 		output->flags = cpu_to_be16(tmp_flags);
182 
183 		/* Only offload if egress ports are on the same device as the
184 		 * ingress port.
185 		 */
186 		if (!switchdev_port_same_parent_id(in_dev, out_dev))
187 			return -EOPNOTSUPP;
188 		if (!nfp_netdev_is_nfp_repr(out_dev))
189 			return -EOPNOTSUPP;
190 
191 		output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
192 		if (!output->port)
193 			return -EOPNOTSUPP;
194 	}
195 	nfp_flow->meta.shortcut = output->port;
196 
197 	return 0;
198 }
199 
200 static enum nfp_flower_tun_type
201 nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
202 				const struct tc_action *action)
203 {
204 	struct ip_tunnel_info *tun = tcf_tunnel_info(action);
205 	struct nfp_flower_priv *priv = app->priv;
206 
207 	switch (tun->key.tp_dst) {
208 	case htons(NFP_FL_VXLAN_PORT):
209 		return NFP_FL_TUNNEL_VXLAN;
210 	case htons(NFP_FL_GENEVE_PORT):
211 		if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
212 			return NFP_FL_TUNNEL_GENEVE;
213 		/* FALLTHROUGH */
214 	default:
215 		return NFP_FL_TUNNEL_NONE;
216 	}
217 }
218 
219 static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
220 {
221 	size_t act_size = sizeof(struct nfp_fl_pre_tunnel);
222 	struct nfp_fl_pre_tunnel *pre_tun_act;
223 
224 	/* Pre_tunnel action must be first on action list.
225 	 * If other actions already exist they need to be pushed forward.
226 	 */
227 	if (act_len)
228 		memmove(act_data + act_size, act_data, act_len);
229 
230 	pre_tun_act = (struct nfp_fl_pre_tunnel *)act_data;
231 
232 	memset(pre_tun_act, 0, act_size);
233 
234 	pre_tun_act->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_TUNNEL;
235 	pre_tun_act->head.len_lw = act_size >> NFP_FL_LW_SIZ;
236 
237 	return pre_tun_act;
238 }
239 
240 static int
241 nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
242 			   const struct tc_action *action)
243 {
244 	struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
245 	int opt_len, opt_cnt, act_start, tot_push_len;
246 	u8 *src = ip_tunnel_info_opts(ip_tun);
247 
248 	/* We need to populate the options in reverse order for HW.
249 	 * Therefore we go through the options, calculating the
250 	 * number of options and the total size, then we populate
251 	 * them in reverse order in the action list.
252 	 */
253 	opt_cnt = 0;
254 	tot_push_len = 0;
255 	opt_len = ip_tun->options_len;
256 	while (opt_len > 0) {
257 		struct geneve_opt *opt = (struct geneve_opt *)src;
258 
259 		opt_cnt++;
260 		if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT)
261 			return -EOPNOTSUPP;
262 
263 		tot_push_len += sizeof(struct nfp_fl_push_geneve) +
264 			       opt->length * 4;
265 		if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT)
266 			return -EOPNOTSUPP;
267 
268 		opt_len -= sizeof(struct geneve_opt) + opt->length * 4;
269 		src += sizeof(struct geneve_opt) + opt->length * 4;
270 	}
271 
272 	if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ)
273 		return -EOPNOTSUPP;
274 
275 	act_start = *list_len;
276 	*list_len += tot_push_len;
277 	src = ip_tunnel_info_opts(ip_tun);
278 	while (opt_cnt) {
279 		struct geneve_opt *opt = (struct geneve_opt *)src;
280 		struct nfp_fl_push_geneve *push;
281 		size_t act_size, len;
282 
283 		opt_cnt--;
284 		act_size = sizeof(struct nfp_fl_push_geneve) + opt->length * 4;
285 		tot_push_len -= act_size;
286 		len = act_start + tot_push_len;
287 
288 		push = (struct nfp_fl_push_geneve *)&nfp_fl->action_data[len];
289 		push->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_GENEVE;
290 		push->head.len_lw = act_size >> NFP_FL_LW_SIZ;
291 		push->reserved = 0;
292 		push->class = opt->opt_class;
293 		push->type = opt->type;
294 		push->length = opt->length;
295 		memcpy(&push->opt_data, opt->opt_data, opt->length * 4);
296 
297 		src += sizeof(struct geneve_opt) + opt->length * 4;
298 	}
299 
300 	return 0;
301 }
302 
303 static int
304 nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
305 			struct nfp_fl_set_ipv4_udp_tun *set_tun,
306 			const struct tc_action *action,
307 			struct nfp_fl_pre_tunnel *pre_tun,
308 			enum nfp_flower_tun_type tun_type,
309 			struct net_device *netdev)
310 {
311 	size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
312 	struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
313 	struct nfp_flower_priv *priv = app->priv;
314 	u32 tmp_set_ip_tun_type_index = 0;
315 	/* Currently support one pre-tunnel so index is always 0. */
316 	int pretun_idx = 0;
317 
318 	BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
319 		     NFP_FL_TUNNEL_KEY	!= TUNNEL_KEY ||
320 		     NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
321 	if (ip_tun->options_len &&
322 	    (tun_type != NFP_FL_TUNNEL_GENEVE ||
323 	    !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)))
324 		return -EOPNOTSUPP;
325 
326 	set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
327 	set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
328 
329 	/* Set tunnel type and pre-tunnel index. */
330 	tmp_set_ip_tun_type_index |=
331 		FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
332 		FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
333 
334 	set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
335 	set_tun->tun_id = ip_tun->key.tun_id;
336 
337 	if (ip_tun->key.ttl) {
338 		set_tun->ttl = ip_tun->key.ttl;
339 	} else {
340 		struct net *net = dev_net(netdev);
341 		struct flowi4 flow = {};
342 		struct rtable *rt;
343 		int err;
344 
345 		/* Do a route lookup to determine ttl - if fails then use
346 		 * default. Note that CONFIG_INET is a requirement of
347 		 * CONFIG_NET_SWITCHDEV so must be defined here.
348 		 */
349 		flow.daddr = ip_tun->key.u.ipv4.dst;
350 		flow.flowi4_proto = IPPROTO_UDP;
351 		rt = ip_route_output_key(net, &flow);
352 		err = PTR_ERR_OR_ZERO(rt);
353 		if (!err) {
354 			set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
355 			ip_rt_put(rt);
356 		} else {
357 			set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
358 		}
359 	}
360 
361 	set_tun->tos = ip_tun->key.tos;
362 
363 	if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
364 	    ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS)
365 		return -EOPNOTSUPP;
366 	set_tun->tun_flags = ip_tun->key.tun_flags;
367 
368 	if (tun_type == NFP_FL_TUNNEL_GENEVE) {
369 		set_tun->tun_proto = htons(ETH_P_TEB);
370 		set_tun->tun_len = ip_tun->options_len / 4;
371 	}
372 
373 	/* Complete pre_tunnel action. */
374 	pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
375 
376 	return 0;
377 }
378 
379 static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
380 {
381 	u32 oldvalue = get_unaligned((u32 *)p_exact);
382 	u32 oldmask = get_unaligned((u32 *)p_mask);
383 
384 	value &= mask;
385 	value |= oldvalue & ~mask;
386 
387 	put_unaligned(oldmask | mask, (u32 *)p_mask);
388 	put_unaligned(value, (u32 *)p_exact);
389 }
390 
391 static int
392 nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
393 	       struct nfp_fl_set_eth *set_eth)
394 {
395 	u32 exact, mask;
396 
397 	if (off + 4 > ETH_ALEN * 2)
398 		return -EOPNOTSUPP;
399 
400 	mask = ~tcf_pedit_mask(action, idx);
401 	exact = tcf_pedit_val(action, idx);
402 
403 	if (exact & ~mask)
404 		return -EOPNOTSUPP;
405 
406 	nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
407 			    &set_eth->eth_addr_mask[off]);
408 
409 	set_eth->reserved = cpu_to_be16(0);
410 	set_eth->head.jump_id = NFP_FL_ACTION_OPCODE_SET_ETHERNET;
411 	set_eth->head.len_lw = sizeof(*set_eth) >> NFP_FL_LW_SIZ;
412 
413 	return 0;
414 }
415 
416 static int
417 nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
418 	       struct nfp_fl_set_ip4_addrs *set_ip_addr)
419 {
420 	__be32 exact, mask;
421 
422 	/* We are expecting tcf_pedit to return a big endian value */
423 	mask = (__force __be32)~tcf_pedit_mask(action, idx);
424 	exact = (__force __be32)tcf_pedit_val(action, idx);
425 
426 	if (exact & ~mask)
427 		return -EOPNOTSUPP;
428 
429 	switch (off) {
430 	case offsetof(struct iphdr, daddr):
431 		set_ip_addr->ipv4_dst_mask = mask;
432 		set_ip_addr->ipv4_dst = exact;
433 		break;
434 	case offsetof(struct iphdr, saddr):
435 		set_ip_addr->ipv4_src_mask = mask;
436 		set_ip_addr->ipv4_src = exact;
437 		break;
438 	default:
439 		return -EOPNOTSUPP;
440 	}
441 
442 	set_ip_addr->reserved = cpu_to_be16(0);
443 	set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
444 	set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ;
445 
446 	return 0;
447 }
448 
449 static void
450 nfp_fl_set_ip6_helper(int opcode_tag, int idx, __be32 exact, __be32 mask,
451 		      struct nfp_fl_set_ipv6_addr *ip6)
452 {
453 	ip6->ipv6[idx % 4].mask = mask;
454 	ip6->ipv6[idx % 4].exact = exact;
455 
456 	ip6->reserved = cpu_to_be16(0);
457 	ip6->head.jump_id = opcode_tag;
458 	ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ;
459 }
460 
461 static int
462 nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
463 	       struct nfp_fl_set_ipv6_addr *ip_dst,
464 	       struct nfp_fl_set_ipv6_addr *ip_src)
465 {
466 	__be32 exact, mask;
467 
468 	/* We are expecting tcf_pedit to return a big endian value */
469 	mask = (__force __be32)~tcf_pedit_mask(action, idx);
470 	exact = (__force __be32)tcf_pedit_val(action, idx);
471 
472 	if (exact & ~mask)
473 		return -EOPNOTSUPP;
474 
475 	if (off < offsetof(struct ipv6hdr, saddr))
476 		return -EOPNOTSUPP;
477 	else if (off < offsetof(struct ipv6hdr, daddr))
478 		nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, idx,
479 				      exact, mask, ip_src);
480 	else if (off < offsetof(struct ipv6hdr, daddr) +
481 		       sizeof(struct in6_addr))
482 		nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, idx,
483 				      exact, mask, ip_dst);
484 	else
485 		return -EOPNOTSUPP;
486 
487 	return 0;
488 }
489 
490 static int
491 nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
492 		 struct nfp_fl_set_tport *set_tport, int opcode)
493 {
494 	u32 exact, mask;
495 
496 	if (off)
497 		return -EOPNOTSUPP;
498 
499 	mask = ~tcf_pedit_mask(action, idx);
500 	exact = tcf_pedit_val(action, idx);
501 
502 	if (exact & ~mask)
503 		return -EOPNOTSUPP;
504 
505 	nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
506 			    set_tport->tp_port_mask);
507 
508 	set_tport->reserved = cpu_to_be16(0);
509 	set_tport->head.jump_id = opcode;
510 	set_tport->head.len_lw = sizeof(*set_tport) >> NFP_FL_LW_SIZ;
511 
512 	return 0;
513 }
514 
515 static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
516 {
517 	switch (ip_proto) {
518 	case 0:
519 		/* Filter doesn't force proto match,
520 		 * both TCP and UDP will be updated if encountered
521 		 */
522 		return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP;
523 	case IPPROTO_TCP:
524 		return TCA_CSUM_UPDATE_FLAG_TCP;
525 	case IPPROTO_UDP:
526 		return TCA_CSUM_UPDATE_FLAG_UDP;
527 	default:
528 		/* All other protocols will be ignored by FW */
529 		return 0;
530 	}
531 }
532 
533 static int
534 nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
535 	     char *nfp_action, int *a_len, u32 *csum_updated)
536 {
537 	struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
538 	struct nfp_fl_set_ip4_addrs set_ip_addr;
539 	struct nfp_fl_set_tport set_tport;
540 	struct nfp_fl_set_eth set_eth;
541 	enum pedit_header_type htype;
542 	int idx, nkeys, err;
543 	size_t act_size;
544 	u32 offset, cmd;
545 	u8 ip_proto = 0;
546 
547 	memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
548 	memset(&set_ip6_src, 0, sizeof(set_ip6_src));
549 	memset(&set_ip_addr, 0, sizeof(set_ip_addr));
550 	memset(&set_tport, 0, sizeof(set_tport));
551 	memset(&set_eth, 0, sizeof(set_eth));
552 	nkeys = tcf_pedit_nkeys(action);
553 
554 	for (idx = 0; idx < nkeys; idx++) {
555 		cmd = tcf_pedit_cmd(action, idx);
556 		htype = tcf_pedit_htype(action, idx);
557 		offset = tcf_pedit_offset(action, idx);
558 
559 		if (cmd != TCA_PEDIT_KEY_EX_CMD_SET)
560 			return -EOPNOTSUPP;
561 
562 		switch (htype) {
563 		case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
564 			err = nfp_fl_set_eth(action, idx, offset, &set_eth);
565 			break;
566 		case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
567 			err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr);
568 			break;
569 		case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
570 			err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst,
571 					     &set_ip6_src);
572 			break;
573 		case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
574 			err = nfp_fl_set_tport(action, idx, offset, &set_tport,
575 					       NFP_FL_ACTION_OPCODE_SET_TCP);
576 			break;
577 		case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
578 			err = nfp_fl_set_tport(action, idx, offset, &set_tport,
579 					       NFP_FL_ACTION_OPCODE_SET_UDP);
580 			break;
581 		default:
582 			return -EOPNOTSUPP;
583 		}
584 		if (err)
585 			return err;
586 	}
587 
588 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
589 		struct flow_dissector_key_basic *basic;
590 
591 		basic = skb_flow_dissector_target(flow->dissector,
592 						  FLOW_DISSECTOR_KEY_BASIC,
593 						  flow->key);
594 		ip_proto = basic->ip_proto;
595 	}
596 
597 	if (set_eth.head.len_lw) {
598 		act_size = sizeof(set_eth);
599 		memcpy(nfp_action, &set_eth, act_size);
600 		*a_len += act_size;
601 	} else if (set_ip_addr.head.len_lw) {
602 		act_size = sizeof(set_ip_addr);
603 		memcpy(nfp_action, &set_ip_addr, act_size);
604 		*a_len += act_size;
605 
606 		/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
607 		*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
608 				nfp_fl_csum_l4_to_flag(ip_proto);
609 	} else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
610 		/* TC compiles set src and dst IPv6 address as a single action,
611 		 * the hardware requires this to be 2 separate actions.
612 		 */
613 		act_size = sizeof(set_ip6_src);
614 		memcpy(nfp_action, &set_ip6_src, act_size);
615 		*a_len += act_size;
616 
617 		act_size = sizeof(set_ip6_dst);
618 		memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
619 		       act_size);
620 		*a_len += act_size;
621 
622 		/* Hardware will automatically fix TCP/UDP checksum. */
623 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
624 	} else if (set_ip6_dst.head.len_lw) {
625 		act_size = sizeof(set_ip6_dst);
626 		memcpy(nfp_action, &set_ip6_dst, act_size);
627 		*a_len += act_size;
628 
629 		/* Hardware will automatically fix TCP/UDP checksum. */
630 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
631 	} else if (set_ip6_src.head.len_lw) {
632 		act_size = sizeof(set_ip6_src);
633 		memcpy(nfp_action, &set_ip6_src, act_size);
634 		*a_len += act_size;
635 
636 		/* Hardware will automatically fix TCP/UDP checksum. */
637 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
638 	} else if (set_tport.head.len_lw) {
639 		act_size = sizeof(set_tport);
640 		memcpy(nfp_action, &set_tport, act_size);
641 		*a_len += act_size;
642 
643 		/* Hardware will automatically fix TCP/UDP checksum. */
644 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
645 	}
646 
647 	return 0;
648 }
649 
650 static int
651 nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
652 			 struct nfp_fl_payload *nfp_fl, int *a_len,
653 			 struct net_device *netdev, bool last,
654 			 enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
655 			 int *out_cnt, u32 *csum_updated)
656 {
657 	struct nfp_flower_priv *priv = app->priv;
658 	struct nfp_fl_output *output;
659 	int err, prelag_size;
660 
661 	/* If csum_updated has not been reset by now, it means HW will
662 	 * incorrectly update csums when they are not requested.
663 	 */
664 	if (*csum_updated)
665 		return -EOPNOTSUPP;
666 
667 	if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
668 		return -EOPNOTSUPP;
669 
670 	output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
671 	err = nfp_fl_output(app, output, a, nfp_fl, last, netdev, *tun_type,
672 			    tun_out_cnt);
673 	if (err)
674 		return err;
675 
676 	*a_len += sizeof(struct nfp_fl_output);
677 
678 	if (priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
679 		/* nfp_fl_pre_lag returns -err or size of prelag action added.
680 		 * This will be 0 if it is not egressing to a lag dev.
681 		 */
682 		prelag_size = nfp_fl_pre_lag(app, a, nfp_fl, *a_len);
683 		if (prelag_size < 0)
684 			return prelag_size;
685 		else if (prelag_size > 0 && (!last || *out_cnt))
686 			return -EOPNOTSUPP;
687 
688 		*a_len += prelag_size;
689 	}
690 	(*out_cnt)++;
691 
692 	return 0;
693 }
694 
695 static int
696 nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
697 		       struct tc_cls_flower_offload *flow,
698 		       struct nfp_fl_payload *nfp_fl, int *a_len,
699 		       struct net_device *netdev,
700 		       enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
701 		       int *out_cnt, u32 *csum_updated)
702 {
703 	struct nfp_fl_set_ipv4_udp_tun *set_tun;
704 	struct nfp_fl_pre_tunnel *pre_tun;
705 	struct nfp_fl_push_vlan *psh_v;
706 	struct nfp_fl_pop_vlan *pop_v;
707 	int err;
708 
709 	if (is_tcf_gact_shot(a)) {
710 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
711 	} else if (is_tcf_mirred_egress_redirect(a)) {
712 		err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
713 					       true, tun_type, tun_out_cnt,
714 					       out_cnt, csum_updated);
715 		if (err)
716 			return err;
717 
718 	} else if (is_tcf_mirred_egress_mirror(a)) {
719 		err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
720 					       false, tun_type, tun_out_cnt,
721 					       out_cnt, csum_updated);
722 		if (err)
723 			return err;
724 
725 	} else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
726 		if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ)
727 			return -EOPNOTSUPP;
728 
729 		pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len];
730 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV);
731 
732 		nfp_fl_pop_vlan(pop_v);
733 		*a_len += sizeof(struct nfp_fl_pop_vlan);
734 	} else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
735 		if (*a_len + sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ)
736 			return -EOPNOTSUPP;
737 
738 		psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len];
739 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
740 
741 		nfp_fl_push_vlan(psh_v, a);
742 		*a_len += sizeof(struct nfp_fl_push_vlan);
743 	} else if (is_tcf_tunnel_set(a)) {
744 		struct nfp_repr *repr = netdev_priv(netdev);
745 		*tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
746 		if (*tun_type == NFP_FL_TUNNEL_NONE)
747 			return -EOPNOTSUPP;
748 
749 		/* Pre-tunnel action is required for tunnel encap.
750 		 * This checks for next hop entries on NFP.
751 		 * If none, the packet falls back before applying other actions.
752 		 */
753 		if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
754 		    sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ)
755 			return -EOPNOTSUPP;
756 
757 		pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
758 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
759 		*a_len += sizeof(struct nfp_fl_pre_tunnel);
760 
761 		err = nfp_fl_push_geneve_options(nfp_fl, a_len, a);
762 		if (err)
763 			return err;
764 
765 		set_tun = (void *)&nfp_fl->action_data[*a_len];
766 		err = nfp_fl_set_ipv4_udp_tun(app, set_tun, a, pre_tun,
767 					      *tun_type, netdev);
768 		if (err)
769 			return err;
770 		*a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
771 	} else if (is_tcf_tunnel_release(a)) {
772 		/* Tunnel decap is handled by default so accept action. */
773 		return 0;
774 	} else if (is_tcf_pedit(a)) {
775 		if (nfp_fl_pedit(a, flow, &nfp_fl->action_data[*a_len],
776 				 a_len, csum_updated))
777 			return -EOPNOTSUPP;
778 	} else if (is_tcf_csum(a)) {
779 		/* csum action requests recalc of something we have not fixed */
780 		if (tcf_csum_update_flags(a) & ~*csum_updated)
781 			return -EOPNOTSUPP;
782 		/* If we will correctly fix the csum we can remove it from the
783 		 * csum update list. Which will later be used to check support.
784 		 */
785 		*csum_updated &= ~tcf_csum_update_flags(a);
786 	} else {
787 		/* Currently we do not handle any other actions. */
788 		return -EOPNOTSUPP;
789 	}
790 
791 	return 0;
792 }
793 
794 int nfp_flower_compile_action(struct nfp_app *app,
795 			      struct tc_cls_flower_offload *flow,
796 			      struct net_device *netdev,
797 			      struct nfp_fl_payload *nfp_flow)
798 {
799 	int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
800 	enum nfp_flower_tun_type tun_type;
801 	const struct tc_action *a;
802 	u32 csum_updated = 0;
803 
804 	memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
805 	nfp_flow->meta.act_len = 0;
806 	tun_type = NFP_FL_TUNNEL_NONE;
807 	act_len = 0;
808 	act_cnt = 0;
809 	tun_out_cnt = 0;
810 	out_cnt = 0;
811 
812 	tcf_exts_for_each_action(i, a, flow->exts) {
813 		err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len,
814 					     netdev, &tun_type, &tun_out_cnt,
815 					     &out_cnt, &csum_updated);
816 		if (err)
817 			return err;
818 		act_cnt++;
819 	}
820 
821 	/* We optimise when the action list is small, this can unfortunately
822 	 * not happen once we have more than one action in the action list.
823 	 */
824 	if (act_cnt > 1)
825 		nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
826 
827 	nfp_flow->meta.act_len = act_len;
828 
829 	return 0;
830 }
831