1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/bitfield.h>
5 #include <net/pkt_cls.h>
6 
7 #include "cmsg.h"
8 #include "main.h"
9 
10 static void
11 nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
12 			    struct tc_cls_flower_offload *flow, u8 key_type,
13 			    bool mask_version)
14 {
15 	struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
16 	struct flow_dissector_key_vlan *flow_vlan;
17 	u16 tmp_tci;
18 
19 	memset(frame, 0, sizeof(struct nfp_flower_meta_tci));
20 	/* Populate the metadata frame. */
21 	frame->nfp_flow_key_layer = key_type;
22 	frame->mask_id = ~0;
23 
24 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
25 		flow_vlan = skb_flow_dissector_target(flow->dissector,
26 						      FLOW_DISSECTOR_KEY_VLAN,
27 						      target);
28 		/* Populate the tci field. */
29 		if (flow_vlan->vlan_id || flow_vlan->vlan_priority) {
30 			tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
31 					     flow_vlan->vlan_priority) |
32 				  FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
33 					     flow_vlan->vlan_id) |
34 				  NFP_FLOWER_MASK_VLAN_CFI;
35 			frame->tci = cpu_to_be16(tmp_tci);
36 		}
37 	}
38 }
39 
40 static void
41 nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
42 {
43 	frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext);
44 }
45 
46 static int
47 nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
48 			bool mask_version, enum nfp_flower_tun_type tun_type)
49 {
50 	if (mask_version) {
51 		frame->in_port = cpu_to_be32(~0);
52 		return 0;
53 	}
54 
55 	if (tun_type)
56 		frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
57 	else
58 		frame->in_port = cpu_to_be32(cmsg_port);
59 
60 	return 0;
61 }
62 
63 static void
64 nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
65 		       struct tc_cls_flower_offload *flow,
66 		       bool mask_version)
67 {
68 	struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
69 	struct flow_dissector_key_eth_addrs *addr;
70 
71 	memset(frame, 0, sizeof(struct nfp_flower_mac_mpls));
72 
73 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
74 		addr = skb_flow_dissector_target(flow->dissector,
75 						 FLOW_DISSECTOR_KEY_ETH_ADDRS,
76 						 target);
77 		/* Populate mac frame. */
78 		ether_addr_copy(frame->mac_dst, &addr->dst[0]);
79 		ether_addr_copy(frame->mac_src, &addr->src[0]);
80 	}
81 
82 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
83 		struct flow_dissector_key_mpls *mpls;
84 		u32 t_mpls;
85 
86 		mpls = skb_flow_dissector_target(flow->dissector,
87 						 FLOW_DISSECTOR_KEY_MPLS,
88 						 target);
89 
90 		t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, mpls->mpls_label) |
91 			 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, mpls->mpls_tc) |
92 			 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, mpls->mpls_bos) |
93 			 NFP_FLOWER_MASK_MPLS_Q;
94 
95 		frame->mpls_lse = cpu_to_be32(t_mpls);
96 	} else if (dissector_uses_key(flow->dissector,
97 				      FLOW_DISSECTOR_KEY_BASIC)) {
98 		/* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
99 		 * bit, which indicates an mpls ether type but without any
100 		 * mpls fields.
101 		 */
102 		struct flow_dissector_key_basic *key_basic;
103 
104 		key_basic = skb_flow_dissector_target(flow->dissector,
105 						      FLOW_DISSECTOR_KEY_BASIC,
106 						      flow->key);
107 		if (key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
108 		    key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_MC))
109 			frame->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
110 	}
111 }
112 
113 static void
114 nfp_flower_compile_tport(struct nfp_flower_tp_ports *frame,
115 			 struct tc_cls_flower_offload *flow,
116 			 bool mask_version)
117 {
118 	struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
119 	struct flow_dissector_key_ports *tp;
120 
121 	memset(frame, 0, sizeof(struct nfp_flower_tp_ports));
122 
123 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
124 		tp = skb_flow_dissector_target(flow->dissector,
125 					       FLOW_DISSECTOR_KEY_PORTS,
126 					       target);
127 		frame->port_src = tp->src;
128 		frame->port_dst = tp->dst;
129 	}
130 }
131 
132 static void
133 nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *frame,
134 			  struct tc_cls_flower_offload *flow,
135 			  bool mask_version)
136 {
137 	struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
138 
139 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
140 		struct flow_dissector_key_basic *basic;
141 
142 		basic = skb_flow_dissector_target(flow->dissector,
143 						  FLOW_DISSECTOR_KEY_BASIC,
144 						  target);
145 		frame->proto = basic->ip_proto;
146 	}
147 
148 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) {
149 		struct flow_dissector_key_ip *flow_ip;
150 
151 		flow_ip = skb_flow_dissector_target(flow->dissector,
152 						    FLOW_DISSECTOR_KEY_IP,
153 						    target);
154 		frame->tos = flow_ip->tos;
155 		frame->ttl = flow_ip->ttl;
156 	}
157 
158 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) {
159 		struct flow_dissector_key_tcp *tcp;
160 		u32 tcp_flags;
161 
162 		tcp = skb_flow_dissector_target(flow->dissector,
163 						FLOW_DISSECTOR_KEY_TCP, target);
164 		tcp_flags = be16_to_cpu(tcp->flags);
165 
166 		if (tcp_flags & TCPHDR_FIN)
167 			frame->flags |= NFP_FL_TCP_FLAG_FIN;
168 		if (tcp_flags & TCPHDR_SYN)
169 			frame->flags |= NFP_FL_TCP_FLAG_SYN;
170 		if (tcp_flags & TCPHDR_RST)
171 			frame->flags |= NFP_FL_TCP_FLAG_RST;
172 		if (tcp_flags & TCPHDR_PSH)
173 			frame->flags |= NFP_FL_TCP_FLAG_PSH;
174 		if (tcp_flags & TCPHDR_URG)
175 			frame->flags |= NFP_FL_TCP_FLAG_URG;
176 	}
177 
178 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
179 		struct flow_dissector_key_control *key;
180 
181 		key = skb_flow_dissector_target(flow->dissector,
182 						FLOW_DISSECTOR_KEY_CONTROL,
183 						target);
184 		if (key->flags & FLOW_DIS_IS_FRAGMENT)
185 			frame->flags |= NFP_FL_IP_FRAGMENTED;
186 		if (key->flags & FLOW_DIS_FIRST_FRAG)
187 			frame->flags |= NFP_FL_IP_FRAG_FIRST;
188 	}
189 }
190 
191 static void
192 nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
193 			struct tc_cls_flower_offload *flow,
194 			bool mask_version)
195 {
196 	struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
197 	struct flow_dissector_key_ipv4_addrs *addr;
198 
199 	memset(frame, 0, sizeof(struct nfp_flower_ipv4));
200 
201 	if (dissector_uses_key(flow->dissector,
202 			       FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
203 		addr = skb_flow_dissector_target(flow->dissector,
204 						 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
205 						 target);
206 		frame->ipv4_src = addr->src;
207 		frame->ipv4_dst = addr->dst;
208 	}
209 
210 	nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version);
211 }
212 
213 static void
214 nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
215 			struct tc_cls_flower_offload *flow,
216 			bool mask_version)
217 {
218 	struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
219 	struct flow_dissector_key_ipv6_addrs *addr;
220 
221 	memset(frame, 0, sizeof(struct nfp_flower_ipv6));
222 
223 	if (dissector_uses_key(flow->dissector,
224 			       FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
225 		addr = skb_flow_dissector_target(flow->dissector,
226 						 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
227 						 target);
228 		frame->ipv6_src = addr->src;
229 		frame->ipv6_dst = addr->dst;
230 	}
231 
232 	nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version);
233 }
234 
235 static int
236 nfp_flower_compile_geneve_opt(void *key_buf, struct tc_cls_flower_offload *flow,
237 			      bool mask_version)
238 {
239 	struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
240 	struct flow_dissector_key_enc_opts *opts;
241 
242 	opts = skb_flow_dissector_target(flow->dissector,
243 					 FLOW_DISSECTOR_KEY_ENC_OPTS,
244 					 target);
245 	memcpy(key_buf, opts->data, opts->len);
246 
247 	return 0;
248 }
249 
250 static void
251 nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
252 				struct tc_cls_flower_offload *flow,
253 				bool mask_version)
254 {
255 	struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
256 	struct flow_dissector_key_ipv4_addrs *tun_ips;
257 	struct flow_dissector_key_keyid *vni;
258 	struct flow_dissector_key_ip *ip;
259 
260 	memset(frame, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
261 
262 	if (dissector_uses_key(flow->dissector,
263 			       FLOW_DISSECTOR_KEY_ENC_KEYID)) {
264 		u32 temp_vni;
265 
266 		vni = skb_flow_dissector_target(flow->dissector,
267 						FLOW_DISSECTOR_KEY_ENC_KEYID,
268 						target);
269 		temp_vni = be32_to_cpu(vni->keyid) << NFP_FL_TUN_VNI_OFFSET;
270 		frame->tun_id = cpu_to_be32(temp_vni);
271 	}
272 
273 	if (dissector_uses_key(flow->dissector,
274 			       FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
275 		tun_ips =
276 		   skb_flow_dissector_target(flow->dissector,
277 					     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
278 					     target);
279 		frame->ip_src = tun_ips->src;
280 		frame->ip_dst = tun_ips->dst;
281 	}
282 
283 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
284 		ip = skb_flow_dissector_target(flow->dissector,
285 					       FLOW_DISSECTOR_KEY_ENC_IP,
286 					       target);
287 		frame->tos = ip->tos;
288 		frame->ttl = ip->ttl;
289 	}
290 }
291 
292 int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
293 				  struct nfp_fl_key_ls *key_ls,
294 				  struct net_device *netdev,
295 				  struct nfp_fl_payload *nfp_flow,
296 				  enum nfp_flower_tun_type tun_type)
297 {
298 	struct nfp_repr *netdev_repr;
299 	int err;
300 	u8 *ext;
301 	u8 *msk;
302 
303 	memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
304 	memset(nfp_flow->mask_data, 0, key_ls->key_size);
305 
306 	ext = nfp_flow->unmasked_data;
307 	msk = nfp_flow->mask_data;
308 
309 	/* Populate Exact Metadata. */
310 	nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
311 				    flow, key_ls->key_layer, false);
312 	/* Populate Mask Metadata. */
313 	nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)msk,
314 				    flow, key_ls->key_layer, true);
315 	ext += sizeof(struct nfp_flower_meta_tci);
316 	msk += sizeof(struct nfp_flower_meta_tci);
317 
318 	/* Populate Extended Metadata if Required. */
319 	if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) {
320 		nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext,
321 					    key_ls->key_layer_two);
322 		nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
323 					    key_ls->key_layer_two);
324 		ext += sizeof(struct nfp_flower_ext_meta);
325 		msk += sizeof(struct nfp_flower_ext_meta);
326 	}
327 
328 	/* Populate Exact Port data. */
329 	err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
330 				      nfp_repr_get_port_id(netdev),
331 				      false, tun_type);
332 	if (err)
333 		return err;
334 
335 	/* Populate Mask Port Data. */
336 	err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
337 				      nfp_repr_get_port_id(netdev),
338 				      true, tun_type);
339 	if (err)
340 		return err;
341 
342 	ext += sizeof(struct nfp_flower_in_port);
343 	msk += sizeof(struct nfp_flower_in_port);
344 
345 	if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
346 		/* Populate Exact MAC Data. */
347 		nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
348 				       flow, false);
349 		/* Populate Mask MAC Data. */
350 		nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)msk,
351 				       flow, true);
352 		ext += sizeof(struct nfp_flower_mac_mpls);
353 		msk += sizeof(struct nfp_flower_mac_mpls);
354 	}
355 
356 	if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) {
357 		/* Populate Exact TP Data. */
358 		nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext,
359 					 flow, false);
360 		/* Populate Mask TP Data. */
361 		nfp_flower_compile_tport((struct nfp_flower_tp_ports *)msk,
362 					 flow, true);
363 		ext += sizeof(struct nfp_flower_tp_ports);
364 		msk += sizeof(struct nfp_flower_tp_ports);
365 	}
366 
367 	if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) {
368 		/* Populate Exact IPv4 Data. */
369 		nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext,
370 					flow, false);
371 		/* Populate Mask IPv4 Data. */
372 		nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)msk,
373 					flow, true);
374 		ext += sizeof(struct nfp_flower_ipv4);
375 		msk += sizeof(struct nfp_flower_ipv4);
376 	}
377 
378 	if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) {
379 		/* Populate Exact IPv4 Data. */
380 		nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext,
381 					flow, false);
382 		/* Populate Mask IPv4 Data. */
383 		nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)msk,
384 					flow, true);
385 		ext += sizeof(struct nfp_flower_ipv6);
386 		msk += sizeof(struct nfp_flower_ipv6);
387 	}
388 
389 	if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
390 	    key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
391 		__be32 tun_dst;
392 
393 		/* Populate Exact VXLAN Data. */
394 		nfp_flower_compile_ipv4_udp_tun((void *)ext, flow, false);
395 		/* Populate Mask VXLAN Data. */
396 		nfp_flower_compile_ipv4_udp_tun((void *)msk, flow, true);
397 		tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst;
398 		ext += sizeof(struct nfp_flower_ipv4_udp_tun);
399 		msk += sizeof(struct nfp_flower_ipv4_udp_tun);
400 
401 		/* Configure tunnel end point MAC. */
402 		if (nfp_netdev_is_nfp_repr(netdev)) {
403 			netdev_repr = netdev_priv(netdev);
404 			nfp_tunnel_write_macs(netdev_repr->app);
405 
406 			/* Store the tunnel destination in the rule data.
407 			 * This must be present and be an exact match.
408 			 */
409 			nfp_flow->nfp_tun_ipv4_addr = tun_dst;
410 			nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst);
411 		}
412 
413 		if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
414 			err = nfp_flower_compile_geneve_opt(ext, flow, false);
415 			if (err)
416 				return err;
417 
418 			err = nfp_flower_compile_geneve_opt(msk, flow, true);
419 			if (err)
420 				return err;
421 		}
422 	}
423 
424 	return 0;
425 }
426