1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/bitfield.h>
5 #include <net/pkt_cls.h>
6 #include <net/tc_act/tc_csum.h>
7 #include <net/tc_act/tc_gact.h>
8 #include <net/tc_act/tc_mirred.h>
9 #include <net/tc_act/tc_pedit.h>
10 #include <net/tc_act/tc_vlan.h>
11 #include <net/tc_act/tc_tunnel_key.h>
12 
13 #include "cmsg.h"
14 #include "main.h"
15 #include "../nfp_net_repr.h"
16 
17 /* The kernel versions of TUNNEL_* are not ABI and therefore vulnerable
18  * to change. Such changes will break our FW ABI.
19  */
20 #define NFP_FL_TUNNEL_CSUM			cpu_to_be16(0x01)
21 #define NFP_FL_TUNNEL_KEY			cpu_to_be16(0x04)
22 #define NFP_FL_TUNNEL_GENEVE_OPT		cpu_to_be16(0x0800)
23 #define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS	IP_TUNNEL_INFO_TX
24 #define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS	(NFP_FL_TUNNEL_CSUM | \
25 						 NFP_FL_TUNNEL_KEY | \
26 						 NFP_FL_TUNNEL_GENEVE_OPT)
27 
28 static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
29 {
30 	size_t act_size = sizeof(struct nfp_fl_pop_vlan);
31 
32 	pop_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_POP_VLAN;
33 	pop_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
34 	pop_vlan->reserved = 0;
35 }
36 
37 static void
38 nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
39 		 const struct flow_action_entry *act)
40 {
41 	size_t act_size = sizeof(struct nfp_fl_push_vlan);
42 	u16 tmp_push_vlan_tci;
43 
44 	push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN;
45 	push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
46 	push_vlan->reserved = 0;
47 	push_vlan->vlan_tpid = act->vlan.proto;
48 
49 	tmp_push_vlan_tci =
50 		FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) |
51 		FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid) |
52 		NFP_FL_PUSH_VLAN_CFI;
53 	push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
54 }
55 
56 static int
57 nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act,
58 	       struct nfp_fl_payload *nfp_flow, int act_len)
59 {
60 	size_t act_size = sizeof(struct nfp_fl_pre_lag);
61 	struct nfp_fl_pre_lag *pre_lag;
62 	struct net_device *out_dev;
63 	int err;
64 
65 	out_dev = act->dev;
66 	if (!out_dev || !netif_is_lag_master(out_dev))
67 		return 0;
68 
69 	if (act_len + act_size > NFP_FL_MAX_A_SIZ)
70 		return -EOPNOTSUPP;
71 
72 	/* Pre_lag action must be first on action list.
73 	 * If other actions already exist they need pushed forward.
74 	 */
75 	if (act_len)
76 		memmove(nfp_flow->action_data + act_size,
77 			nfp_flow->action_data, act_len);
78 
79 	pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data;
80 	err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag);
81 	if (err)
82 		return err;
83 
84 	pre_lag->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_LAG;
85 	pre_lag->head.len_lw = act_size >> NFP_FL_LW_SIZ;
86 
87 	nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
88 
89 	return act_size;
90 }
91 
92 static int
93 nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
94 	      const struct flow_action_entry *act,
95 	      struct nfp_fl_payload *nfp_flow,
96 	      bool last, struct net_device *in_dev,
97 	      enum nfp_flower_tun_type tun_type, int *tun_out_cnt)
98 {
99 	size_t act_size = sizeof(struct nfp_fl_output);
100 	struct nfp_flower_priv *priv = app->priv;
101 	struct net_device *out_dev;
102 	u16 tmp_flags;
103 
104 	output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
105 	output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
106 
107 	out_dev = act->dev;
108 	if (!out_dev)
109 		return -EOPNOTSUPP;
110 
111 	tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
112 
113 	if (tun_type) {
114 		/* Verify the egress netdev matches the tunnel type. */
115 		if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type))
116 			return -EOPNOTSUPP;
117 
118 		if (*tun_out_cnt)
119 			return -EOPNOTSUPP;
120 		(*tun_out_cnt)++;
121 
122 		output->flags = cpu_to_be16(tmp_flags |
123 					    NFP_FL_OUT_FLAGS_USE_TUN);
124 		output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
125 	} else if (netif_is_lag_master(out_dev) &&
126 		   priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
127 		int gid;
128 
129 		output->flags = cpu_to_be16(tmp_flags);
130 		gid = nfp_flower_lag_get_output_id(app, out_dev);
131 		if (gid < 0)
132 			return gid;
133 		output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
134 	} else {
135 		/* Set action output parameters. */
136 		output->flags = cpu_to_be16(tmp_flags);
137 
138 		if (nfp_netdev_is_nfp_repr(in_dev)) {
139 			/* Confirm ingress and egress are on same device. */
140 			if (!netdev_port_same_parent_id(in_dev, out_dev))
141 				return -EOPNOTSUPP;
142 		}
143 
144 		if (!nfp_netdev_is_nfp_repr(out_dev))
145 			return -EOPNOTSUPP;
146 
147 		output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
148 		if (!output->port)
149 			return -EOPNOTSUPP;
150 	}
151 	nfp_flow->meta.shortcut = output->port;
152 
153 	return 0;
154 }
155 
156 static enum nfp_flower_tun_type
157 nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
158 				const struct flow_action_entry *act)
159 {
160 	const struct ip_tunnel_info *tun = act->tunnel;
161 	struct nfp_flower_priv *priv = app->priv;
162 
163 	switch (tun->key.tp_dst) {
164 	case htons(NFP_FL_VXLAN_PORT):
165 		return NFP_FL_TUNNEL_VXLAN;
166 	case htons(NFP_FL_GENEVE_PORT):
167 		if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
168 			return NFP_FL_TUNNEL_GENEVE;
169 		/* FALLTHROUGH */
170 	default:
171 		return NFP_FL_TUNNEL_NONE;
172 	}
173 }
174 
175 static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
176 {
177 	size_t act_size = sizeof(struct nfp_fl_pre_tunnel);
178 	struct nfp_fl_pre_tunnel *pre_tun_act;
179 
180 	/* Pre_tunnel action must be first on action list.
181 	 * If other actions already exist they need to be pushed forward.
182 	 */
183 	if (act_len)
184 		memmove(act_data + act_size, act_data, act_len);
185 
186 	pre_tun_act = (struct nfp_fl_pre_tunnel *)act_data;
187 
188 	memset(pre_tun_act, 0, act_size);
189 
190 	pre_tun_act->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_TUNNEL;
191 	pre_tun_act->head.len_lw = act_size >> NFP_FL_LW_SIZ;
192 
193 	return pre_tun_act;
194 }
195 
196 static int
197 nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
198 			   const struct flow_action_entry *act)
199 {
200 	struct ip_tunnel_info *ip_tun = (struct ip_tunnel_info *)act->tunnel;
201 	int opt_len, opt_cnt, act_start, tot_push_len;
202 	u8 *src = ip_tunnel_info_opts(ip_tun);
203 
204 	/* We need to populate the options in reverse order for HW.
205 	 * Therefore we go through the options, calculating the
206 	 * number of options and the total size, then we populate
207 	 * them in reverse order in the action list.
208 	 */
209 	opt_cnt = 0;
210 	tot_push_len = 0;
211 	opt_len = ip_tun->options_len;
212 	while (opt_len > 0) {
213 		struct geneve_opt *opt = (struct geneve_opt *)src;
214 
215 		opt_cnt++;
216 		if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT)
217 			return -EOPNOTSUPP;
218 
219 		tot_push_len += sizeof(struct nfp_fl_push_geneve) +
220 			       opt->length * 4;
221 		if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT)
222 			return -EOPNOTSUPP;
223 
224 		opt_len -= sizeof(struct geneve_opt) + opt->length * 4;
225 		src += sizeof(struct geneve_opt) + opt->length * 4;
226 	}
227 
228 	if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ)
229 		return -EOPNOTSUPP;
230 
231 	act_start = *list_len;
232 	*list_len += tot_push_len;
233 	src = ip_tunnel_info_opts(ip_tun);
234 	while (opt_cnt) {
235 		struct geneve_opt *opt = (struct geneve_opt *)src;
236 		struct nfp_fl_push_geneve *push;
237 		size_t act_size, len;
238 
239 		opt_cnt--;
240 		act_size = sizeof(struct nfp_fl_push_geneve) + opt->length * 4;
241 		tot_push_len -= act_size;
242 		len = act_start + tot_push_len;
243 
244 		push = (struct nfp_fl_push_geneve *)&nfp_fl->action_data[len];
245 		push->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_GENEVE;
246 		push->head.len_lw = act_size >> NFP_FL_LW_SIZ;
247 		push->reserved = 0;
248 		push->class = opt->opt_class;
249 		push->type = opt->type;
250 		push->length = opt->length;
251 		memcpy(&push->opt_data, opt->opt_data, opt->length * 4);
252 
253 		src += sizeof(struct geneve_opt) + opt->length * 4;
254 	}
255 
256 	return 0;
257 }
258 
259 static int
260 nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
261 			struct nfp_fl_set_ipv4_udp_tun *set_tun,
262 			const struct flow_action_entry *act,
263 			struct nfp_fl_pre_tunnel *pre_tun,
264 			enum nfp_flower_tun_type tun_type,
265 			struct net_device *netdev)
266 {
267 	size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
268 	const struct ip_tunnel_info *ip_tun = act->tunnel;
269 	struct nfp_flower_priv *priv = app->priv;
270 	u32 tmp_set_ip_tun_type_index = 0;
271 	/* Currently support one pre-tunnel so index is always 0. */
272 	int pretun_idx = 0;
273 
274 	BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
275 		     NFP_FL_TUNNEL_KEY	!= TUNNEL_KEY ||
276 		     NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
277 	if (ip_tun->options_len &&
278 	    (tun_type != NFP_FL_TUNNEL_GENEVE ||
279 	    !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)))
280 		return -EOPNOTSUPP;
281 
282 	set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
283 	set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
284 
285 	/* Set tunnel type and pre-tunnel index. */
286 	tmp_set_ip_tun_type_index |=
287 		FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
288 		FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
289 
290 	set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
291 	set_tun->tun_id = ip_tun->key.tun_id;
292 
293 	if (ip_tun->key.ttl) {
294 		set_tun->ttl = ip_tun->key.ttl;
295 	} else {
296 		struct net *net = dev_net(netdev);
297 		struct flowi4 flow = {};
298 		struct rtable *rt;
299 		int err;
300 
301 		/* Do a route lookup to determine ttl - if fails then use
302 		 * default. Note that CONFIG_INET is a requirement of
303 		 * CONFIG_NET_SWITCHDEV so must be defined here.
304 		 */
305 		flow.daddr = ip_tun->key.u.ipv4.dst;
306 		flow.flowi4_proto = IPPROTO_UDP;
307 		rt = ip_route_output_key(net, &flow);
308 		err = PTR_ERR_OR_ZERO(rt);
309 		if (!err) {
310 			set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
311 			ip_rt_put(rt);
312 		} else {
313 			set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
314 		}
315 	}
316 
317 	set_tun->tos = ip_tun->key.tos;
318 
319 	if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
320 	    ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS)
321 		return -EOPNOTSUPP;
322 	set_tun->tun_flags = ip_tun->key.tun_flags;
323 
324 	if (tun_type == NFP_FL_TUNNEL_GENEVE) {
325 		set_tun->tun_proto = htons(ETH_P_TEB);
326 		set_tun->tun_len = ip_tun->options_len / 4;
327 	}
328 
329 	/* Complete pre_tunnel action. */
330 	pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
331 
332 	return 0;
333 }
334 
335 static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
336 {
337 	u32 oldvalue = get_unaligned((u32 *)p_exact);
338 	u32 oldmask = get_unaligned((u32 *)p_mask);
339 
340 	value &= mask;
341 	value |= oldvalue & ~mask;
342 
343 	put_unaligned(oldmask | mask, (u32 *)p_mask);
344 	put_unaligned(value, (u32 *)p_exact);
345 }
346 
347 static int
348 nfp_fl_set_eth(const struct flow_action_entry *act, u32 off,
349 	       struct nfp_fl_set_eth *set_eth)
350 {
351 	u32 exact, mask;
352 
353 	if (off + 4 > ETH_ALEN * 2)
354 		return -EOPNOTSUPP;
355 
356 	mask = ~act->mangle.mask;
357 	exact = act->mangle.val;
358 
359 	if (exact & ~mask)
360 		return -EOPNOTSUPP;
361 
362 	nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
363 			    &set_eth->eth_addr_mask[off]);
364 
365 	set_eth->reserved = cpu_to_be16(0);
366 	set_eth->head.jump_id = NFP_FL_ACTION_OPCODE_SET_ETHERNET;
367 	set_eth->head.len_lw = sizeof(*set_eth) >> NFP_FL_LW_SIZ;
368 
369 	return 0;
370 }
371 
372 struct ipv4_ttl_word {
373 	__u8	ttl;
374 	__u8	protocol;
375 	__sum16	check;
376 };
377 
378 static int
379 nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off,
380 	       struct nfp_fl_set_ip4_addrs *set_ip_addr,
381 	       struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos)
382 {
383 	struct ipv4_ttl_word *ttl_word_mask;
384 	struct ipv4_ttl_word *ttl_word;
385 	struct iphdr *tos_word_mask;
386 	struct iphdr *tos_word;
387 	__be32 exact, mask;
388 
389 	/* We are expecting tcf_pedit to return a big endian value */
390 	mask = (__force __be32)~act->mangle.mask;
391 	exact = (__force __be32)act->mangle.val;
392 
393 	if (exact & ~mask)
394 		return -EOPNOTSUPP;
395 
396 	switch (off) {
397 	case offsetof(struct iphdr, daddr):
398 		set_ip_addr->ipv4_dst_mask |= mask;
399 		set_ip_addr->ipv4_dst &= ~mask;
400 		set_ip_addr->ipv4_dst |= exact & mask;
401 		set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
402 		set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
403 					   NFP_FL_LW_SIZ;
404 		break;
405 	case offsetof(struct iphdr, saddr):
406 		set_ip_addr->ipv4_src_mask |= mask;
407 		set_ip_addr->ipv4_src &= ~mask;
408 		set_ip_addr->ipv4_src |= exact & mask;
409 		set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
410 		set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
411 					   NFP_FL_LW_SIZ;
412 		break;
413 	case offsetof(struct iphdr, ttl):
414 		ttl_word_mask = (struct ipv4_ttl_word *)&mask;
415 		ttl_word = (struct ipv4_ttl_word *)&exact;
416 
417 		if (ttl_word_mask->protocol || ttl_word_mask->check)
418 			return -EOPNOTSUPP;
419 
420 		set_ip_ttl_tos->ipv4_ttl_mask |= ttl_word_mask->ttl;
421 		set_ip_ttl_tos->ipv4_ttl &= ~ttl_word_mask->ttl;
422 		set_ip_ttl_tos->ipv4_ttl |= ttl_word->ttl & ttl_word_mask->ttl;
423 		set_ip_ttl_tos->head.jump_id =
424 			NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
425 		set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
426 					      NFP_FL_LW_SIZ;
427 		break;
428 	case round_down(offsetof(struct iphdr, tos), 4):
429 		tos_word_mask = (struct iphdr *)&mask;
430 		tos_word = (struct iphdr *)&exact;
431 
432 		if (tos_word_mask->version || tos_word_mask->ihl ||
433 		    tos_word_mask->tot_len)
434 			return -EOPNOTSUPP;
435 
436 		set_ip_ttl_tos->ipv4_tos_mask |= tos_word_mask->tos;
437 		set_ip_ttl_tos->ipv4_tos &= ~tos_word_mask->tos;
438 		set_ip_ttl_tos->ipv4_tos |= tos_word->tos & tos_word_mask->tos;
439 		set_ip_ttl_tos->head.jump_id =
440 			NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
441 		set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
442 					      NFP_FL_LW_SIZ;
443 		break;
444 	default:
445 		return -EOPNOTSUPP;
446 	}
447 
448 	return 0;
449 }
450 
451 static void
452 nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask,
453 		      struct nfp_fl_set_ipv6_addr *ip6)
454 {
455 	ip6->ipv6[word].mask |= mask;
456 	ip6->ipv6[word].exact &= ~mask;
457 	ip6->ipv6[word].exact |= exact & mask;
458 
459 	ip6->reserved = cpu_to_be16(0);
460 	ip6->head.jump_id = opcode_tag;
461 	ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ;
462 }
463 
464 struct ipv6_hop_limit_word {
465 	__be16 payload_len;
466 	u8 nexthdr;
467 	u8 hop_limit;
468 };
469 
470 static int
471 nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
472 				    struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl)
473 {
474 	struct ipv6_hop_limit_word *fl_hl_mask;
475 	struct ipv6_hop_limit_word *fl_hl;
476 
477 	switch (off) {
478 	case offsetof(struct ipv6hdr, payload_len):
479 		fl_hl_mask = (struct ipv6_hop_limit_word *)&mask;
480 		fl_hl = (struct ipv6_hop_limit_word *)&exact;
481 
482 		if (fl_hl_mask->nexthdr || fl_hl_mask->payload_len)
483 			return -EOPNOTSUPP;
484 
485 		ip_hl_fl->ipv6_hop_limit_mask |= fl_hl_mask->hop_limit;
486 		ip_hl_fl->ipv6_hop_limit &= ~fl_hl_mask->hop_limit;
487 		ip_hl_fl->ipv6_hop_limit |= fl_hl->hop_limit &
488 					    fl_hl_mask->hop_limit;
489 		break;
490 	case round_down(offsetof(struct ipv6hdr, flow_lbl), 4):
491 		if (mask & ~IPV6_FLOW_LABEL_MASK ||
492 		    exact & ~IPV6_FLOW_LABEL_MASK)
493 			return -EOPNOTSUPP;
494 
495 		ip_hl_fl->ipv6_label_mask |= mask;
496 		ip_hl_fl->ipv6_label &= ~mask;
497 		ip_hl_fl->ipv6_label |= exact & mask;
498 		break;
499 	}
500 
501 	ip_hl_fl->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL;
502 	ip_hl_fl->head.len_lw = sizeof(*ip_hl_fl) >> NFP_FL_LW_SIZ;
503 
504 	return 0;
505 }
506 
507 static int
508 nfp_fl_set_ip6(const struct flow_action_entry *act, u32 off,
509 	       struct nfp_fl_set_ipv6_addr *ip_dst,
510 	       struct nfp_fl_set_ipv6_addr *ip_src,
511 	       struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl)
512 {
513 	__be32 exact, mask;
514 	int err = 0;
515 	u8 word;
516 
517 	/* We are expecting tcf_pedit to return a big endian value */
518 	mask = (__force __be32)~act->mangle.mask;
519 	exact = (__force __be32)act->mangle.val;
520 
521 	if (exact & ~mask)
522 		return -EOPNOTSUPP;
523 
524 	if (off < offsetof(struct ipv6hdr, saddr)) {
525 		err = nfp_fl_set_ip6_hop_limit_flow_label(off, exact, mask,
526 							  ip_hl_fl);
527 	} else if (off < offsetof(struct ipv6hdr, daddr)) {
528 		word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
529 		nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
530 				      exact, mask, ip_src);
531 	} else if (off < offsetof(struct ipv6hdr, daddr) +
532 		       sizeof(struct in6_addr)) {
533 		word = (off - offsetof(struct ipv6hdr, daddr)) / sizeof(exact);
534 		nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word,
535 				      exact, mask, ip_dst);
536 	} else {
537 		return -EOPNOTSUPP;
538 	}
539 
540 	return err;
541 }
542 
543 static int
544 nfp_fl_set_tport(const struct flow_action_entry *act, u32 off,
545 		 struct nfp_fl_set_tport *set_tport, int opcode)
546 {
547 	u32 exact, mask;
548 
549 	if (off)
550 		return -EOPNOTSUPP;
551 
552 	mask = ~act->mangle.mask;
553 	exact = act->mangle.val;
554 
555 	if (exact & ~mask)
556 		return -EOPNOTSUPP;
557 
558 	nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
559 			    set_tport->tp_port_mask);
560 
561 	set_tport->reserved = cpu_to_be16(0);
562 	set_tport->head.jump_id = opcode;
563 	set_tport->head.len_lw = sizeof(*set_tport) >> NFP_FL_LW_SIZ;
564 
565 	return 0;
566 }
567 
568 static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
569 {
570 	switch (ip_proto) {
571 	case 0:
572 		/* Filter doesn't force proto match,
573 		 * both TCP and UDP will be updated if encountered
574 		 */
575 		return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP;
576 	case IPPROTO_TCP:
577 		return TCA_CSUM_UPDATE_FLAG_TCP;
578 	case IPPROTO_UDP:
579 		return TCA_CSUM_UPDATE_FLAG_UDP;
580 	default:
581 		/* All other protocols will be ignored by FW */
582 		return 0;
583 	}
584 }
585 
586 static int
587 nfp_fl_pedit(const struct flow_action_entry *act,
588 	     struct tc_cls_flower_offload *flow,
589 	     char *nfp_action, int *a_len, u32 *csum_updated)
590 {
591 	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
592 	struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
593 	struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl;
594 	struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos;
595 	struct nfp_fl_set_ip4_addrs set_ip_addr;
596 	enum flow_action_mangle_base htype;
597 	struct nfp_fl_set_tport set_tport;
598 	struct nfp_fl_set_eth set_eth;
599 	size_t act_size = 0;
600 	u8 ip_proto = 0;
601 	u32 offset;
602 	int err;
603 
604 	memset(&set_ip6_tc_hl_fl, 0, sizeof(set_ip6_tc_hl_fl));
605 	memset(&set_ip_ttl_tos, 0, sizeof(set_ip_ttl_tos));
606 	memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
607 	memset(&set_ip6_src, 0, sizeof(set_ip6_src));
608 	memset(&set_ip_addr, 0, sizeof(set_ip_addr));
609 	memset(&set_tport, 0, sizeof(set_tport));
610 	memset(&set_eth, 0, sizeof(set_eth));
611 
612 	htype = act->mangle.htype;
613 	offset = act->mangle.offset;
614 
615 	switch (htype) {
616 	case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
617 		err = nfp_fl_set_eth(act, offset, &set_eth);
618 		break;
619 	case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
620 		err = nfp_fl_set_ip4(act, offset, &set_ip_addr,
621 				     &set_ip_ttl_tos);
622 		break;
623 	case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
624 		err = nfp_fl_set_ip6(act, offset, &set_ip6_dst,
625 				     &set_ip6_src, &set_ip6_tc_hl_fl);
626 		break;
627 	case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
628 		err = nfp_fl_set_tport(act, offset, &set_tport,
629 				       NFP_FL_ACTION_OPCODE_SET_TCP);
630 		break;
631 	case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
632 		err = nfp_fl_set_tport(act, offset, &set_tport,
633 				       NFP_FL_ACTION_OPCODE_SET_UDP);
634 		break;
635 	default:
636 		return -EOPNOTSUPP;
637 	}
638 	if (err)
639 		return err;
640 
641 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
642 		struct flow_match_basic match;
643 
644 		flow_rule_match_basic(rule, &match);
645 		ip_proto = match.key->ip_proto;
646 	}
647 
648 	if (set_eth.head.len_lw) {
649 		act_size = sizeof(set_eth);
650 		memcpy(nfp_action, &set_eth, act_size);
651 		*a_len += act_size;
652 	}
653 	if (set_ip_ttl_tos.head.len_lw) {
654 		nfp_action += act_size;
655 		act_size = sizeof(set_ip_ttl_tos);
656 		memcpy(nfp_action, &set_ip_ttl_tos, act_size);
657 		*a_len += act_size;
658 
659 		/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
660 		*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
661 				nfp_fl_csum_l4_to_flag(ip_proto);
662 	}
663 	if (set_ip_addr.head.len_lw) {
664 		nfp_action += act_size;
665 		act_size = sizeof(set_ip_addr);
666 		memcpy(nfp_action, &set_ip_addr, act_size);
667 		*a_len += act_size;
668 
669 		/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
670 		*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
671 				nfp_fl_csum_l4_to_flag(ip_proto);
672 	}
673 	if (set_ip6_tc_hl_fl.head.len_lw) {
674 		nfp_action += act_size;
675 		act_size = sizeof(set_ip6_tc_hl_fl);
676 		memcpy(nfp_action, &set_ip6_tc_hl_fl, act_size);
677 		*a_len += act_size;
678 
679 		/* Hardware will automatically fix TCP/UDP checksum. */
680 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
681 	}
682 	if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
683 		/* TC compiles set src and dst IPv6 address as a single action,
684 		 * the hardware requires this to be 2 separate actions.
685 		 */
686 		nfp_action += act_size;
687 		act_size = sizeof(set_ip6_src);
688 		memcpy(nfp_action, &set_ip6_src, act_size);
689 		*a_len += act_size;
690 
691 		act_size = sizeof(set_ip6_dst);
692 		memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
693 		       act_size);
694 		*a_len += act_size;
695 
696 		/* Hardware will automatically fix TCP/UDP checksum. */
697 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
698 	} else if (set_ip6_dst.head.len_lw) {
699 		nfp_action += act_size;
700 		act_size = sizeof(set_ip6_dst);
701 		memcpy(nfp_action, &set_ip6_dst, act_size);
702 		*a_len += act_size;
703 
704 		/* Hardware will automatically fix TCP/UDP checksum. */
705 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
706 	} else if (set_ip6_src.head.len_lw) {
707 		nfp_action += act_size;
708 		act_size = sizeof(set_ip6_src);
709 		memcpy(nfp_action, &set_ip6_src, act_size);
710 		*a_len += act_size;
711 
712 		/* Hardware will automatically fix TCP/UDP checksum. */
713 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
714 	}
715 	if (set_tport.head.len_lw) {
716 		nfp_action += act_size;
717 		act_size = sizeof(set_tport);
718 		memcpy(nfp_action, &set_tport, act_size);
719 		*a_len += act_size;
720 
721 		/* Hardware will automatically fix TCP/UDP checksum. */
722 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
723 	}
724 
725 	return 0;
726 }
727 
728 static int
729 nfp_flower_output_action(struct nfp_app *app, const struct flow_action_entry *act,
730 			 struct nfp_fl_payload *nfp_fl, int *a_len,
731 			 struct net_device *netdev, bool last,
732 			 enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
733 			 int *out_cnt, u32 *csum_updated)
734 {
735 	struct nfp_flower_priv *priv = app->priv;
736 	struct nfp_fl_output *output;
737 	int err, prelag_size;
738 
739 	/* If csum_updated has not been reset by now, it means HW will
740 	 * incorrectly update csums when they are not requested.
741 	 */
742 	if (*csum_updated)
743 		return -EOPNOTSUPP;
744 
745 	if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
746 		return -EOPNOTSUPP;
747 
748 	output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
749 	err = nfp_fl_output(app, output, act, nfp_fl, last, netdev, *tun_type,
750 			    tun_out_cnt);
751 	if (err)
752 		return err;
753 
754 	*a_len += sizeof(struct nfp_fl_output);
755 
756 	if (priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
757 		/* nfp_fl_pre_lag returns -err or size of prelag action added.
758 		 * This will be 0 if it is not egressing to a lag dev.
759 		 */
760 		prelag_size = nfp_fl_pre_lag(app, act, nfp_fl, *a_len);
761 		if (prelag_size < 0)
762 			return prelag_size;
763 		else if (prelag_size > 0 && (!last || *out_cnt))
764 			return -EOPNOTSUPP;
765 
766 		*a_len += prelag_size;
767 	}
768 	(*out_cnt)++;
769 
770 	return 0;
771 }
772 
773 static int
774 nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
775 		       struct tc_cls_flower_offload *flow,
776 		       struct nfp_fl_payload *nfp_fl, int *a_len,
777 		       struct net_device *netdev,
778 		       enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
779 		       int *out_cnt, u32 *csum_updated)
780 {
781 	struct nfp_fl_set_ipv4_udp_tun *set_tun;
782 	struct nfp_fl_pre_tunnel *pre_tun;
783 	struct nfp_fl_push_vlan *psh_v;
784 	struct nfp_fl_pop_vlan *pop_v;
785 	int err;
786 
787 	switch (act->id) {
788 	case FLOW_ACTION_DROP:
789 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
790 		break;
791 	case FLOW_ACTION_REDIRECT:
792 		err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
793 					       true, tun_type, tun_out_cnt,
794 					       out_cnt, csum_updated);
795 		if (err)
796 			return err;
797 		break;
798 	case FLOW_ACTION_MIRRED:
799 		err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
800 					       false, tun_type, tun_out_cnt,
801 					       out_cnt, csum_updated);
802 		if (err)
803 			return err;
804 		break;
805 	case FLOW_ACTION_VLAN_POP:
806 		if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ)
807 			return -EOPNOTSUPP;
808 
809 		pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len];
810 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV);
811 
812 		nfp_fl_pop_vlan(pop_v);
813 		*a_len += sizeof(struct nfp_fl_pop_vlan);
814 		break;
815 	case FLOW_ACTION_VLAN_PUSH:
816 		if (*a_len + sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ)
817 			return -EOPNOTSUPP;
818 
819 		psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len];
820 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
821 
822 		nfp_fl_push_vlan(psh_v, act);
823 		*a_len += sizeof(struct nfp_fl_push_vlan);
824 		break;
825 	case FLOW_ACTION_TUNNEL_ENCAP: {
826 		const struct ip_tunnel_info *ip_tun = act->tunnel;
827 
828 		*tun_type = nfp_fl_get_tun_from_act_l4_port(app, act);
829 		if (*tun_type == NFP_FL_TUNNEL_NONE)
830 			return -EOPNOTSUPP;
831 
832 		if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS)
833 			return -EOPNOTSUPP;
834 
835 		/* Pre-tunnel action is required for tunnel encap.
836 		 * This checks for next hop entries on NFP.
837 		 * If none, the packet falls back before applying other actions.
838 		 */
839 		if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
840 		    sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ)
841 			return -EOPNOTSUPP;
842 
843 		pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
844 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
845 		*a_len += sizeof(struct nfp_fl_pre_tunnel);
846 
847 		err = nfp_fl_push_geneve_options(nfp_fl, a_len, act);
848 		if (err)
849 			return err;
850 
851 		set_tun = (void *)&nfp_fl->action_data[*a_len];
852 		err = nfp_fl_set_ipv4_udp_tun(app, set_tun, act, pre_tun,
853 					      *tun_type, netdev);
854 		if (err)
855 			return err;
856 		*a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
857 		}
858 		break;
859 	case FLOW_ACTION_TUNNEL_DECAP:
860 		/* Tunnel decap is handled by default so accept action. */
861 		return 0;
862 	case FLOW_ACTION_MANGLE:
863 		if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len],
864 				 a_len, csum_updated))
865 			return -EOPNOTSUPP;
866 		break;
867 	case FLOW_ACTION_CSUM:
868 		/* csum action requests recalc of something we have not fixed */
869 		if (act->csum_flags & ~*csum_updated)
870 			return -EOPNOTSUPP;
871 		/* If we will correctly fix the csum we can remove it from the
872 		 * csum update list. Which will later be used to check support.
873 		 */
874 		*csum_updated &= ~act->csum_flags;
875 		break;
876 	default:
877 		/* Currently we do not handle any other actions. */
878 		return -EOPNOTSUPP;
879 	}
880 
881 	return 0;
882 }
883 
884 int nfp_flower_compile_action(struct nfp_app *app,
885 			      struct tc_cls_flower_offload *flow,
886 			      struct net_device *netdev,
887 			      struct nfp_fl_payload *nfp_flow)
888 {
889 	int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
890 	enum nfp_flower_tun_type tun_type;
891 	struct flow_action_entry *act;
892 	u32 csum_updated = 0;
893 
894 	memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
895 	nfp_flow->meta.act_len = 0;
896 	tun_type = NFP_FL_TUNNEL_NONE;
897 	act_len = 0;
898 	act_cnt = 0;
899 	tun_out_cnt = 0;
900 	out_cnt = 0;
901 
902 	flow_action_for_each(i, act, &flow->rule->action) {
903 		err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
904 					     netdev, &tun_type, &tun_out_cnt,
905 					     &out_cnt, &csum_updated);
906 		if (err)
907 			return err;
908 		act_cnt++;
909 	}
910 
911 	/* We optimise when the action list is small, this can unfortunately
912 	 * not happen once we have more than one action in the action list.
913 	 */
914 	if (act_cnt > 1)
915 		nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
916 
917 	nfp_flow->meta.act_len = act_len;
918 
919 	return 0;
920 }
921