1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/bitfield.h>
5 #include <net/pkt_cls.h>
6 
7 #include "cmsg.h"
8 #include "main.h"
9 
10 static void
11 nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
12 			    struct nfp_flower_meta_tci *msk,
13 			    struct tc_cls_flower_offload *flow, u8 key_type)
14 {
15 	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
16 	u16 tmp_tci;
17 
18 	memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
19 	memset(msk, 0, sizeof(struct nfp_flower_meta_tci));
20 
21 	/* Populate the metadata frame. */
22 	ext->nfp_flow_key_layer = key_type;
23 	ext->mask_id = ~0;
24 
25 	msk->nfp_flow_key_layer = key_type;
26 	msk->mask_id = ~0;
27 
28 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
29 		struct flow_match_vlan match;
30 
31 		flow_rule_match_vlan(rule, &match);
32 		/* Populate the tci field. */
33 		if (match.key->vlan_id || match.key->vlan_priority) {
34 			tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
35 					     match.key->vlan_priority) |
36 				  FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
37 					     match.key->vlan_id) |
38 				  NFP_FLOWER_MASK_VLAN_CFI;
39 			ext->tci = cpu_to_be16(tmp_tci);
40 			tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
41 					     match.mask->vlan_priority) |
42 				  FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
43 					     match.mask->vlan_id) |
44 				  NFP_FLOWER_MASK_VLAN_CFI;
45 			msk->tci = cpu_to_be16(tmp_tci);
46 		}
47 	}
48 }
49 
50 static void
51 nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
52 {
53 	frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext);
54 }
55 
56 static int
57 nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
58 			bool mask_version, enum nfp_flower_tun_type tun_type)
59 {
60 	if (mask_version) {
61 		frame->in_port = cpu_to_be32(~0);
62 		return 0;
63 	}
64 
65 	if (tun_type) {
66 		frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
67 	} else {
68 		if (!cmsg_port)
69 			return -EOPNOTSUPP;
70 		frame->in_port = cpu_to_be32(cmsg_port);
71 	}
72 
73 	return 0;
74 }
75 
76 static void
77 nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
78 		       struct nfp_flower_mac_mpls *msk,
79 		       struct tc_cls_flower_offload *flow)
80 {
81 	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
82 
83 	memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
84 	memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
85 
86 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
87 		struct flow_match_eth_addrs match;
88 
89 		flow_rule_match_eth_addrs(rule, &match);
90 		/* Populate mac frame. */
91 		ether_addr_copy(ext->mac_dst, &match.key->dst[0]);
92 		ether_addr_copy(ext->mac_src, &match.key->src[0]);
93 		ether_addr_copy(msk->mac_dst, &match.mask->dst[0]);
94 		ether_addr_copy(msk->mac_src, &match.mask->src[0]);
95 	}
96 
97 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
98 		struct flow_match_mpls match;
99 		u32 t_mpls;
100 
101 		flow_rule_match_mpls(rule, &match);
102 		t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.key->mpls_label) |
103 			 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.key->mpls_tc) |
104 			 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.key->mpls_bos) |
105 			 NFP_FLOWER_MASK_MPLS_Q;
106 		ext->mpls_lse = cpu_to_be32(t_mpls);
107 		t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.mask->mpls_label) |
108 			 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.mask->mpls_tc) |
109 			 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.mask->mpls_bos) |
110 			 NFP_FLOWER_MASK_MPLS_Q;
111 		msk->mpls_lse = cpu_to_be32(t_mpls);
112 	} else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
113 		/* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
114 		 * bit, which indicates an mpls ether type but without any
115 		 * mpls fields.
116 		 */
117 		struct flow_match_basic match;
118 
119 		flow_rule_match_basic(rule, &match);
120 		if (match.key->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
121 		    match.key->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) {
122 			ext->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
123 			msk->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
124 		}
125 	}
126 }
127 
128 static void
129 nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
130 			 struct nfp_flower_tp_ports *msk,
131 			 struct tc_cls_flower_offload *flow)
132 {
133 	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
134 
135 	memset(ext, 0, sizeof(struct nfp_flower_tp_ports));
136 	memset(msk, 0, sizeof(struct nfp_flower_tp_ports));
137 
138 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
139 		struct flow_match_ports match;
140 
141 		flow_rule_match_ports(rule, &match);
142 		ext->port_src = match.key->src;
143 		ext->port_dst = match.key->dst;
144 		msk->port_src = match.mask->src;
145 		msk->port_dst = match.mask->dst;
146 	}
147 }
148 
149 static void
150 nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
151 			  struct nfp_flower_ip_ext *msk,
152 			  struct tc_cls_flower_offload *flow)
153 {
154 	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
155 
156 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
157 		struct flow_match_basic match;
158 
159 		flow_rule_match_basic(rule, &match);
160 		ext->proto = match.key->ip_proto;
161 		msk->proto = match.mask->ip_proto;
162 	}
163 
164 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
165 		struct flow_match_ip match;
166 
167 		flow_rule_match_ip(rule, &match);
168 		ext->tos = match.key->tos;
169 		ext->ttl = match.key->ttl;
170 		msk->tos = match.mask->tos;
171 		msk->ttl = match.mask->ttl;
172 	}
173 
174 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
175 		u16 tcp_flags, tcp_flags_mask;
176 		struct flow_match_tcp match;
177 
178 		flow_rule_match_tcp(rule, &match);
179 		tcp_flags = be16_to_cpu(match.key->flags);
180 		tcp_flags_mask = be16_to_cpu(match.mask->flags);
181 
182 		if (tcp_flags & TCPHDR_FIN)
183 			ext->flags |= NFP_FL_TCP_FLAG_FIN;
184 		if (tcp_flags_mask & TCPHDR_FIN)
185 			msk->flags |= NFP_FL_TCP_FLAG_FIN;
186 
187 		if (tcp_flags & TCPHDR_SYN)
188 			ext->flags |= NFP_FL_TCP_FLAG_SYN;
189 		if (tcp_flags_mask & TCPHDR_SYN)
190 			msk->flags |= NFP_FL_TCP_FLAG_SYN;
191 
192 		if (tcp_flags & TCPHDR_RST)
193 			ext->flags |= NFP_FL_TCP_FLAG_RST;
194 		if (tcp_flags_mask & TCPHDR_RST)
195 			msk->flags |= NFP_FL_TCP_FLAG_RST;
196 
197 		if (tcp_flags & TCPHDR_PSH)
198 			ext->flags |= NFP_FL_TCP_FLAG_PSH;
199 		if (tcp_flags_mask & TCPHDR_PSH)
200 			msk->flags |= NFP_FL_TCP_FLAG_PSH;
201 
202 		if (tcp_flags & TCPHDR_URG)
203 			ext->flags |= NFP_FL_TCP_FLAG_URG;
204 		if (tcp_flags_mask & TCPHDR_URG)
205 			msk->flags |= NFP_FL_TCP_FLAG_URG;
206 	}
207 
208 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
209 		struct flow_match_control match;
210 
211 		flow_rule_match_control(rule, &match);
212 		if (match.key->flags & FLOW_DIS_IS_FRAGMENT)
213 			ext->flags |= NFP_FL_IP_FRAGMENTED;
214 		if (match.mask->flags & FLOW_DIS_IS_FRAGMENT)
215 			msk->flags |= NFP_FL_IP_FRAGMENTED;
216 		if (match.key->flags & FLOW_DIS_FIRST_FRAG)
217 			ext->flags |= NFP_FL_IP_FRAG_FIRST;
218 		if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
219 			msk->flags |= NFP_FL_IP_FRAG_FIRST;
220 	}
221 }
222 
223 static void
224 nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
225 			struct nfp_flower_ipv4 *msk,
226 			struct tc_cls_flower_offload *flow)
227 {
228 	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
229 	struct flow_match_ipv4_addrs match;
230 
231 	memset(ext, 0, sizeof(struct nfp_flower_ipv4));
232 	memset(msk, 0, sizeof(struct nfp_flower_ipv4));
233 
234 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
235 		flow_rule_match_ipv4_addrs(rule, &match);
236 		ext->ipv4_src = match.key->src;
237 		ext->ipv4_dst = match.key->dst;
238 		msk->ipv4_src = match.mask->src;
239 		msk->ipv4_dst = match.mask->dst;
240 	}
241 
242 	nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
243 }
244 
245 static void
246 nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
247 			struct nfp_flower_ipv6 *msk,
248 			struct tc_cls_flower_offload *flow)
249 {
250 	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
251 
252 	memset(ext, 0, sizeof(struct nfp_flower_ipv6));
253 	memset(msk, 0, sizeof(struct nfp_flower_ipv6));
254 
255 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
256 		struct flow_match_ipv6_addrs match;
257 
258 		flow_rule_match_ipv6_addrs(rule, &match);
259 		ext->ipv6_src = match.key->src;
260 		ext->ipv6_dst = match.key->dst;
261 		msk->ipv6_src = match.mask->src;
262 		msk->ipv6_dst = match.mask->dst;
263 	}
264 
265 	nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
266 }
267 
268 static int
269 nfp_flower_compile_geneve_opt(void *ext, void *msk,
270 			      struct tc_cls_flower_offload *flow)
271 {
272 	struct flow_match_enc_opts match;
273 
274 	flow_rule_match_enc_opts(flow->rule, &match);
275 	memcpy(ext, match.key->data, match.key->len);
276 	memcpy(msk, match.mask->data, match.mask->len);
277 
278 	return 0;
279 }
280 
281 static void
282 nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
283 				struct nfp_flower_ipv4_udp_tun *msk,
284 				struct tc_cls_flower_offload *flow)
285 {
286 	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
287 
288 	memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
289 	memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
290 
291 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
292 		struct flow_match_enc_keyid match;
293 		u32 temp_vni;
294 
295 		flow_rule_match_enc_keyid(rule, &match);
296 		temp_vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET;
297 		ext->tun_id = cpu_to_be32(temp_vni);
298 		temp_vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET;
299 		msk->tun_id = cpu_to_be32(temp_vni);
300 	}
301 
302 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
303 		struct flow_match_ipv4_addrs match;
304 
305 		flow_rule_match_enc_ipv4_addrs(rule, &match);
306 		ext->ip_src = match.key->src;
307 		ext->ip_dst = match.key->dst;
308 		msk->ip_src = match.mask->src;
309 		msk->ip_dst = match.mask->dst;
310 	}
311 
312 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
313 		struct flow_match_ip match;
314 
315 		flow_rule_match_enc_ip(rule, &match);
316 		ext->tos = match.key->tos;
317 		ext->ttl = match.key->ttl;
318 		msk->tos = match.mask->tos;
319 		msk->ttl = match.mask->ttl;
320 	}
321 }
322 
323 int nfp_flower_compile_flow_match(struct nfp_app *app,
324 				  struct tc_cls_flower_offload *flow,
325 				  struct nfp_fl_key_ls *key_ls,
326 				  struct net_device *netdev,
327 				  struct nfp_fl_payload *nfp_flow,
328 				  enum nfp_flower_tun_type tun_type)
329 {
330 	u32 cmsg_port = 0;
331 	int err;
332 	u8 *ext;
333 	u8 *msk;
334 
335 	if (nfp_netdev_is_nfp_repr(netdev))
336 		cmsg_port = nfp_repr_get_port_id(netdev);
337 
338 	memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
339 	memset(nfp_flow->mask_data, 0, key_ls->key_size);
340 
341 	ext = nfp_flow->unmasked_data;
342 	msk = nfp_flow->mask_data;
343 
344 	nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
345 				    (struct nfp_flower_meta_tci *)msk,
346 				    flow, key_ls->key_layer);
347 	ext += sizeof(struct nfp_flower_meta_tci);
348 	msk += sizeof(struct nfp_flower_meta_tci);
349 
350 	/* Populate Extended Metadata if Required. */
351 	if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) {
352 		nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext,
353 					    key_ls->key_layer_two);
354 		nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
355 					    key_ls->key_layer_two);
356 		ext += sizeof(struct nfp_flower_ext_meta);
357 		msk += sizeof(struct nfp_flower_ext_meta);
358 	}
359 
360 	/* Populate Exact Port data. */
361 	err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
362 				      cmsg_port, false, tun_type);
363 	if (err)
364 		return err;
365 
366 	/* Populate Mask Port Data. */
367 	err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
368 				      cmsg_port, true, tun_type);
369 	if (err)
370 		return err;
371 
372 	ext += sizeof(struct nfp_flower_in_port);
373 	msk += sizeof(struct nfp_flower_in_port);
374 
375 	if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
376 		nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
377 				       (struct nfp_flower_mac_mpls *)msk,
378 				       flow);
379 		ext += sizeof(struct nfp_flower_mac_mpls);
380 		msk += sizeof(struct nfp_flower_mac_mpls);
381 	}
382 
383 	if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) {
384 		nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext,
385 					 (struct nfp_flower_tp_ports *)msk,
386 					 flow);
387 		ext += sizeof(struct nfp_flower_tp_ports);
388 		msk += sizeof(struct nfp_flower_tp_ports);
389 	}
390 
391 	if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) {
392 		nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext,
393 					(struct nfp_flower_ipv4 *)msk,
394 					flow);
395 		ext += sizeof(struct nfp_flower_ipv4);
396 		msk += sizeof(struct nfp_flower_ipv4);
397 	}
398 
399 	if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) {
400 		nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext,
401 					(struct nfp_flower_ipv6 *)msk,
402 					flow);
403 		ext += sizeof(struct nfp_flower_ipv6);
404 		msk += sizeof(struct nfp_flower_ipv6);
405 	}
406 
407 	if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
408 	    key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
409 		__be32 tun_dst;
410 
411 		nfp_flower_compile_ipv4_udp_tun((void *)ext, (void *)msk, flow);
412 		tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst;
413 		ext += sizeof(struct nfp_flower_ipv4_udp_tun);
414 		msk += sizeof(struct nfp_flower_ipv4_udp_tun);
415 
416 		/* Store the tunnel destination in the rule data.
417 		 * This must be present and be an exact match.
418 		 */
419 		nfp_flow->nfp_tun_ipv4_addr = tun_dst;
420 		nfp_tunnel_add_ipv4_off(app, tun_dst);
421 
422 		if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
423 			err = nfp_flower_compile_geneve_opt(ext, msk, flow);
424 			if (err)
425 				return err;
426 		}
427 	}
428 
429 	return 0;
430 }
431