xref: /openbmc/linux/drivers/net/ethernet/netronome/nfp/flower/action.c (revision 45cc842d5b75ba8f9a958f2dd12b95c6dd0452bd)
1 /*
2  * Copyright (C) 2017 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/bitfield.h>
35 #include <net/pkt_cls.h>
36 #include <net/switchdev.h>
37 #include <net/tc_act/tc_gact.h>
38 #include <net/tc_act/tc_mirred.h>
39 #include <net/tc_act/tc_pedit.h>
40 #include <net/tc_act/tc_vlan.h>
41 #include <net/tc_act/tc_tunnel_key.h>
42 
43 #include "cmsg.h"
44 #include "main.h"
45 #include "../nfp_net_repr.h"
46 
47 static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
48 {
49 	size_t act_size = sizeof(struct nfp_fl_pop_vlan);
50 
51 	pop_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_POP_VLAN;
52 	pop_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
53 	pop_vlan->reserved = 0;
54 }
55 
56 static void
57 nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
58 		 const struct tc_action *action)
59 {
60 	size_t act_size = sizeof(struct nfp_fl_push_vlan);
61 	u16 tmp_push_vlan_tci;
62 
63 	push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN;
64 	push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
65 	push_vlan->reserved = 0;
66 	push_vlan->vlan_tpid = tcf_vlan_push_proto(action);
67 
68 	tmp_push_vlan_tci =
69 		FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) |
70 		FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)) |
71 		NFP_FL_PUSH_VLAN_CFI;
72 	push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
73 }
74 
75 static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
76 					 enum nfp_flower_tun_type tun_type)
77 {
78 	if (!out_dev->rtnl_link_ops)
79 		return false;
80 
81 	if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
82 		return tun_type == NFP_FL_TUNNEL_VXLAN;
83 
84 	if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve"))
85 		return tun_type == NFP_FL_TUNNEL_GENEVE;
86 
87 	return false;
88 }
89 
90 static int
91 nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
92 	      struct nfp_fl_payload *nfp_flow, bool last,
93 	      struct net_device *in_dev, enum nfp_flower_tun_type tun_type,
94 	      int *tun_out_cnt)
95 {
96 	size_t act_size = sizeof(struct nfp_fl_output);
97 	struct net_device *out_dev;
98 	u16 tmp_flags;
99 
100 	output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
101 	output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
102 
103 	out_dev = tcf_mirred_dev(action);
104 	if (!out_dev)
105 		return -EOPNOTSUPP;
106 
107 	tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
108 
109 	if (tun_type) {
110 		/* Verify the egress netdev matches the tunnel type. */
111 		if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type))
112 			return -EOPNOTSUPP;
113 
114 		if (*tun_out_cnt)
115 			return -EOPNOTSUPP;
116 		(*tun_out_cnt)++;
117 
118 		output->flags = cpu_to_be16(tmp_flags |
119 					    NFP_FL_OUT_FLAGS_USE_TUN);
120 		output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
121 	} else {
122 		/* Set action output parameters. */
123 		output->flags = cpu_to_be16(tmp_flags);
124 
125 		/* Only offload if egress ports are on the same device as the
126 		 * ingress port.
127 		 */
128 		if (!switchdev_port_same_parent_id(in_dev, out_dev))
129 			return -EOPNOTSUPP;
130 		if (!nfp_netdev_is_nfp_repr(out_dev))
131 			return -EOPNOTSUPP;
132 
133 		output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
134 		if (!output->port)
135 			return -EOPNOTSUPP;
136 	}
137 	nfp_flow->meta.shortcut = output->port;
138 
139 	return 0;
140 }
141 
142 static enum nfp_flower_tun_type
143 nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
144 				const struct tc_action *action)
145 {
146 	struct ip_tunnel_info *tun = tcf_tunnel_info(action);
147 	struct nfp_flower_priv *priv = app->priv;
148 
149 	switch (tun->key.tp_dst) {
150 	case htons(NFP_FL_VXLAN_PORT):
151 		return NFP_FL_TUNNEL_VXLAN;
152 	case htons(NFP_FL_GENEVE_PORT):
153 		if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
154 			return NFP_FL_TUNNEL_GENEVE;
155 		/* FALLTHROUGH */
156 	default:
157 		return NFP_FL_TUNNEL_NONE;
158 	}
159 }
160 
161 static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
162 {
163 	size_t act_size = sizeof(struct nfp_fl_pre_tunnel);
164 	struct nfp_fl_pre_tunnel *pre_tun_act;
165 
166 	/* Pre_tunnel action must be first on action list.
167 	 * If other actions already exist they need pushed forward.
168 	 */
169 	if (act_len)
170 		memmove(act_data + act_size, act_data, act_len);
171 
172 	pre_tun_act = (struct nfp_fl_pre_tunnel *)act_data;
173 
174 	memset(pre_tun_act, 0, act_size);
175 
176 	pre_tun_act->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_TUNNEL;
177 	pre_tun_act->head.len_lw = act_size >> NFP_FL_LW_SIZ;
178 
179 	return pre_tun_act;
180 }
181 
182 static int
183 nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
184 			const struct tc_action *action,
185 			struct nfp_fl_pre_tunnel *pre_tun,
186 			enum nfp_flower_tun_type tun_type)
187 {
188 	size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
189 	struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
190 	u32 tmp_set_ip_tun_type_index = 0;
191 	/* Currently support one pre-tunnel so index is always 0. */
192 	int pretun_idx = 0;
193 
194 	if (ip_tun->options_len)
195 		return -EOPNOTSUPP;
196 
197 	set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
198 	set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
199 
200 	/* Set tunnel type and pre-tunnel index. */
201 	tmp_set_ip_tun_type_index |=
202 		FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
203 		FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
204 
205 	set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
206 	set_tun->tun_id = ip_tun->key.tun_id;
207 
208 	/* Complete pre_tunnel action. */
209 	pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
210 
211 	return 0;
212 }
213 
214 static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
215 {
216 	u32 oldvalue = get_unaligned((u32 *)p_exact);
217 	u32 oldmask = get_unaligned((u32 *)p_mask);
218 
219 	value &= mask;
220 	value |= oldvalue & ~mask;
221 
222 	put_unaligned(oldmask | mask, (u32 *)p_mask);
223 	put_unaligned(value, (u32 *)p_exact);
224 }
225 
226 static int
227 nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
228 	       struct nfp_fl_set_eth *set_eth)
229 {
230 	u32 exact, mask;
231 
232 	if (off + 4 > ETH_ALEN * 2)
233 		return -EOPNOTSUPP;
234 
235 	mask = ~tcf_pedit_mask(action, idx);
236 	exact = tcf_pedit_val(action, idx);
237 
238 	if (exact & ~mask)
239 		return -EOPNOTSUPP;
240 
241 	nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
242 			    &set_eth->eth_addr_mask[off]);
243 
244 	set_eth->reserved = cpu_to_be16(0);
245 	set_eth->head.jump_id = NFP_FL_ACTION_OPCODE_SET_ETHERNET;
246 	set_eth->head.len_lw = sizeof(*set_eth) >> NFP_FL_LW_SIZ;
247 
248 	return 0;
249 }
250 
251 static int
252 nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
253 	       struct nfp_fl_set_ip4_addrs *set_ip_addr)
254 {
255 	__be32 exact, mask;
256 
257 	/* We are expecting tcf_pedit to return a big endian value */
258 	mask = (__force __be32)~tcf_pedit_mask(action, idx);
259 	exact = (__force __be32)tcf_pedit_val(action, idx);
260 
261 	if (exact & ~mask)
262 		return -EOPNOTSUPP;
263 
264 	switch (off) {
265 	case offsetof(struct iphdr, daddr):
266 		set_ip_addr->ipv4_dst_mask = mask;
267 		set_ip_addr->ipv4_dst = exact;
268 		break;
269 	case offsetof(struct iphdr, saddr):
270 		set_ip_addr->ipv4_src_mask = mask;
271 		set_ip_addr->ipv4_src = exact;
272 		break;
273 	default:
274 		return -EOPNOTSUPP;
275 	}
276 
277 	set_ip_addr->reserved = cpu_to_be16(0);
278 	set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
279 	set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ;
280 
281 	return 0;
282 }
283 
284 static void
285 nfp_fl_set_ip6_helper(int opcode_tag, int idx, __be32 exact, __be32 mask,
286 		      struct nfp_fl_set_ipv6_addr *ip6)
287 {
288 	ip6->ipv6[idx % 4].mask = mask;
289 	ip6->ipv6[idx % 4].exact = exact;
290 
291 	ip6->reserved = cpu_to_be16(0);
292 	ip6->head.jump_id = opcode_tag;
293 	ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ;
294 }
295 
296 static int
297 nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
298 	       struct nfp_fl_set_ipv6_addr *ip_dst,
299 	       struct nfp_fl_set_ipv6_addr *ip_src)
300 {
301 	__be32 exact, mask;
302 
303 	/* We are expecting tcf_pedit to return a big endian value */
304 	mask = (__force __be32)~tcf_pedit_mask(action, idx);
305 	exact = (__force __be32)tcf_pedit_val(action, idx);
306 
307 	if (exact & ~mask)
308 		return -EOPNOTSUPP;
309 
310 	if (off < offsetof(struct ipv6hdr, saddr))
311 		return -EOPNOTSUPP;
312 	else if (off < offsetof(struct ipv6hdr, daddr))
313 		nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, idx,
314 				      exact, mask, ip_src);
315 	else if (off < offsetof(struct ipv6hdr, daddr) +
316 		       sizeof(struct in6_addr))
317 		nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, idx,
318 				      exact, mask, ip_dst);
319 	else
320 		return -EOPNOTSUPP;
321 
322 	return 0;
323 }
324 
325 static int
326 nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
327 		 struct nfp_fl_set_tport *set_tport, int opcode)
328 {
329 	u32 exact, mask;
330 
331 	if (off)
332 		return -EOPNOTSUPP;
333 
334 	mask = ~tcf_pedit_mask(action, idx);
335 	exact = tcf_pedit_val(action, idx);
336 
337 	if (exact & ~mask)
338 		return -EOPNOTSUPP;
339 
340 	nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
341 			    set_tport->tp_port_mask);
342 
343 	set_tport->reserved = cpu_to_be16(0);
344 	set_tport->head.jump_id = opcode;
345 	set_tport->head.len_lw = sizeof(*set_tport) >> NFP_FL_LW_SIZ;
346 
347 	return 0;
348 }
349 
350 static int
351 nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
352 {
353 	struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
354 	struct nfp_fl_set_ip4_addrs set_ip_addr;
355 	struct nfp_fl_set_tport set_tport;
356 	struct nfp_fl_set_eth set_eth;
357 	enum pedit_header_type htype;
358 	int idx, nkeys, err;
359 	size_t act_size;
360 	u32 offset, cmd;
361 
362 	memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
363 	memset(&set_ip6_src, 0, sizeof(set_ip6_src));
364 	memset(&set_ip_addr, 0, sizeof(set_ip_addr));
365 	memset(&set_tport, 0, sizeof(set_tport));
366 	memset(&set_eth, 0, sizeof(set_eth));
367 	nkeys = tcf_pedit_nkeys(action);
368 
369 	for (idx = 0; idx < nkeys; idx++) {
370 		cmd = tcf_pedit_cmd(action, idx);
371 		htype = tcf_pedit_htype(action, idx);
372 		offset = tcf_pedit_offset(action, idx);
373 
374 		if (cmd != TCA_PEDIT_KEY_EX_CMD_SET)
375 			return -EOPNOTSUPP;
376 
377 		switch (htype) {
378 		case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
379 			err = nfp_fl_set_eth(action, idx, offset, &set_eth);
380 			break;
381 		case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
382 			err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr);
383 			break;
384 		case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
385 			err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst,
386 					     &set_ip6_src);
387 			break;
388 		case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
389 			err = nfp_fl_set_tport(action, idx, offset, &set_tport,
390 					       NFP_FL_ACTION_OPCODE_SET_TCP);
391 			break;
392 		case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
393 			err = nfp_fl_set_tport(action, idx, offset, &set_tport,
394 					       NFP_FL_ACTION_OPCODE_SET_UDP);
395 			break;
396 		default:
397 			return -EOPNOTSUPP;
398 		}
399 		if (err)
400 			return err;
401 	}
402 
403 	if (set_eth.head.len_lw) {
404 		act_size = sizeof(set_eth);
405 		memcpy(nfp_action, &set_eth, act_size);
406 		*a_len += act_size;
407 	} else if (set_ip_addr.head.len_lw) {
408 		act_size = sizeof(set_ip_addr);
409 		memcpy(nfp_action, &set_ip_addr, act_size);
410 		*a_len += act_size;
411 	} else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
412 		/* TC compiles set src and dst IPv6 address as a single action,
413 		 * the hardware requires this to be 2 separate actions.
414 		 */
415 		act_size = sizeof(set_ip6_src);
416 		memcpy(nfp_action, &set_ip6_src, act_size);
417 		*a_len += act_size;
418 
419 		act_size = sizeof(set_ip6_dst);
420 		memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
421 		       act_size);
422 		*a_len += act_size;
423 	} else if (set_ip6_dst.head.len_lw) {
424 		act_size = sizeof(set_ip6_dst);
425 		memcpy(nfp_action, &set_ip6_dst, act_size);
426 		*a_len += act_size;
427 	} else if (set_ip6_src.head.len_lw) {
428 		act_size = sizeof(set_ip6_src);
429 		memcpy(nfp_action, &set_ip6_src, act_size);
430 		*a_len += act_size;
431 	} else if (set_tport.head.len_lw) {
432 		act_size = sizeof(set_tport);
433 		memcpy(nfp_action, &set_tport, act_size);
434 		*a_len += act_size;
435 	}
436 
437 	return 0;
438 }
439 
440 static int
441 nfp_flower_loop_action(const struct tc_action *a,
442 		       struct nfp_fl_payload *nfp_fl, int *a_len,
443 		       struct net_device *netdev,
444 		       enum nfp_flower_tun_type *tun_type, int *tun_out_cnt)
445 {
446 	struct nfp_fl_set_ipv4_udp_tun *set_tun;
447 	struct nfp_fl_pre_tunnel *pre_tun;
448 	struct nfp_fl_push_vlan *psh_v;
449 	struct nfp_fl_pop_vlan *pop_v;
450 	struct nfp_fl_output *output;
451 	int err;
452 
453 	if (is_tcf_gact_shot(a)) {
454 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
455 	} else if (is_tcf_mirred_egress_redirect(a)) {
456 		if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
457 			return -EOPNOTSUPP;
458 
459 		output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
460 		err = nfp_fl_output(output, a, nfp_fl, true, netdev, *tun_type,
461 				    tun_out_cnt);
462 		if (err)
463 			return err;
464 
465 		*a_len += sizeof(struct nfp_fl_output);
466 	} else if (is_tcf_mirred_egress_mirror(a)) {
467 		if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
468 			return -EOPNOTSUPP;
469 
470 		output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
471 		err = nfp_fl_output(output, a, nfp_fl, false, netdev, *tun_type,
472 				    tun_out_cnt);
473 		if (err)
474 			return err;
475 
476 		*a_len += sizeof(struct nfp_fl_output);
477 	} else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
478 		if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ)
479 			return -EOPNOTSUPP;
480 
481 		pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len];
482 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV);
483 
484 		nfp_fl_pop_vlan(pop_v);
485 		*a_len += sizeof(struct nfp_fl_pop_vlan);
486 	} else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
487 		if (*a_len + sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ)
488 			return -EOPNOTSUPP;
489 
490 		psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len];
491 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
492 
493 		nfp_fl_push_vlan(psh_v, a);
494 		*a_len += sizeof(struct nfp_fl_push_vlan);
495 	} else if (is_tcf_tunnel_set(a)) {
496 		struct nfp_repr *repr = netdev_priv(netdev);
497 		*tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
498 		if (*tun_type == NFP_FL_TUNNEL_NONE)
499 			return -EOPNOTSUPP;
500 
501 		/* Pre-tunnel action is required for tunnel encap.
502 		 * This checks for next hop entries on NFP.
503 		 * If none, the packet falls back before applying other actions.
504 		 */
505 		if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
506 		    sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ)
507 			return -EOPNOTSUPP;
508 
509 		pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
510 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
511 		*a_len += sizeof(struct nfp_fl_pre_tunnel);
512 
513 		set_tun = (void *)&nfp_fl->action_data[*a_len];
514 		err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type);
515 		if (err)
516 			return err;
517 		*a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
518 	} else if (is_tcf_tunnel_release(a)) {
519 		/* Tunnel decap is handled by default so accept action. */
520 		return 0;
521 	} else if (is_tcf_pedit(a)) {
522 		if (nfp_fl_pedit(a, &nfp_fl->action_data[*a_len], a_len))
523 			return -EOPNOTSUPP;
524 	} else {
525 		/* Currently we do not handle any other actions. */
526 		return -EOPNOTSUPP;
527 	}
528 
529 	return 0;
530 }
531 
532 int nfp_flower_compile_action(struct tc_cls_flower_offload *flow,
533 			      struct net_device *netdev,
534 			      struct nfp_fl_payload *nfp_flow)
535 {
536 	int act_len, act_cnt, err, tun_out_cnt;
537 	enum nfp_flower_tun_type tun_type;
538 	const struct tc_action *a;
539 	LIST_HEAD(actions);
540 
541 	memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
542 	nfp_flow->meta.act_len = 0;
543 	tun_type = NFP_FL_TUNNEL_NONE;
544 	act_len = 0;
545 	act_cnt = 0;
546 	tun_out_cnt = 0;
547 
548 	tcf_exts_to_list(flow->exts, &actions);
549 	list_for_each_entry(a, &actions, list) {
550 		err = nfp_flower_loop_action(a, nfp_flow, &act_len, netdev,
551 					     &tun_type, &tun_out_cnt);
552 		if (err)
553 			return err;
554 		act_cnt++;
555 	}
556 
557 	/* We optimise when the action list is small, this can unfortunately
558 	 * not happen once we have more than one action in the action list.
559 	 */
560 	if (act_cnt > 1)
561 		nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
562 
563 	nfp_flow->meta.act_len = act_len;
564 
565 	return 0;
566 }
567