xref: /openbmc/linux/drivers/net/ethernet/netronome/nfp/flower/match.c (revision 22fc4c4c9fd60427bcda00878cee94e7622cfa7a)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/bitfield.h>
5 #include <net/pkt_cls.h>
6 
7 #include "cmsg.h"
8 #include "main.h"
9 
10 static void
11 nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
12 			    struct tc_cls_flower_offload *flow, u8 key_type,
13 			    bool mask_version)
14 {
15 	struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
16 	struct flow_dissector_key_vlan *flow_vlan;
17 	u16 tmp_tci;
18 
19 	memset(frame, 0, sizeof(struct nfp_flower_meta_tci));
20 	/* Populate the metadata frame. */
21 	frame->nfp_flow_key_layer = key_type;
22 	frame->mask_id = ~0;
23 
24 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
25 		flow_vlan = skb_flow_dissector_target(flow->dissector,
26 						      FLOW_DISSECTOR_KEY_VLAN,
27 						      target);
28 		/* Populate the tci field. */
29 		if (flow_vlan->vlan_id || flow_vlan->vlan_priority) {
30 			tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
31 					     flow_vlan->vlan_priority) |
32 				  FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
33 					     flow_vlan->vlan_id) |
34 				  NFP_FLOWER_MASK_VLAN_CFI;
35 			frame->tci = cpu_to_be16(tmp_tci);
36 		}
37 	}
38 }
39 
40 static void
41 nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
42 {
43 	frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext);
44 }
45 
46 static int
47 nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
48 			bool mask_version, enum nfp_flower_tun_type tun_type)
49 {
50 	if (mask_version) {
51 		frame->in_port = cpu_to_be32(~0);
52 		return 0;
53 	}
54 
55 	if (tun_type) {
56 		frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
57 	} else {
58 		if (!cmsg_port)
59 			return -EOPNOTSUPP;
60 		frame->in_port = cpu_to_be32(cmsg_port);
61 	}
62 
63 	return 0;
64 }
65 
66 static void
67 nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
68 		       struct tc_cls_flower_offload *flow,
69 		       bool mask_version)
70 {
71 	struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
72 	struct flow_dissector_key_eth_addrs *addr;
73 
74 	memset(frame, 0, sizeof(struct nfp_flower_mac_mpls));
75 
76 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
77 		addr = skb_flow_dissector_target(flow->dissector,
78 						 FLOW_DISSECTOR_KEY_ETH_ADDRS,
79 						 target);
80 		/* Populate mac frame. */
81 		ether_addr_copy(frame->mac_dst, &addr->dst[0]);
82 		ether_addr_copy(frame->mac_src, &addr->src[0]);
83 	}
84 
85 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
86 		struct flow_dissector_key_mpls *mpls;
87 		u32 t_mpls;
88 
89 		mpls = skb_flow_dissector_target(flow->dissector,
90 						 FLOW_DISSECTOR_KEY_MPLS,
91 						 target);
92 
93 		t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, mpls->mpls_label) |
94 			 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, mpls->mpls_tc) |
95 			 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, mpls->mpls_bos) |
96 			 NFP_FLOWER_MASK_MPLS_Q;
97 
98 		frame->mpls_lse = cpu_to_be32(t_mpls);
99 	} else if (dissector_uses_key(flow->dissector,
100 				      FLOW_DISSECTOR_KEY_BASIC)) {
101 		/* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
102 		 * bit, which indicates an mpls ether type but without any
103 		 * mpls fields.
104 		 */
105 		struct flow_dissector_key_basic *key_basic;
106 
107 		key_basic = skb_flow_dissector_target(flow->dissector,
108 						      FLOW_DISSECTOR_KEY_BASIC,
109 						      flow->key);
110 		if (key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
111 		    key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_MC))
112 			frame->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
113 	}
114 }
115 
116 static void
117 nfp_flower_compile_tport(struct nfp_flower_tp_ports *frame,
118 			 struct tc_cls_flower_offload *flow,
119 			 bool mask_version)
120 {
121 	struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
122 	struct flow_dissector_key_ports *tp;
123 
124 	memset(frame, 0, sizeof(struct nfp_flower_tp_ports));
125 
126 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
127 		tp = skb_flow_dissector_target(flow->dissector,
128 					       FLOW_DISSECTOR_KEY_PORTS,
129 					       target);
130 		frame->port_src = tp->src;
131 		frame->port_dst = tp->dst;
132 	}
133 }
134 
135 static void
136 nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *frame,
137 			  struct tc_cls_flower_offload *flow,
138 			  bool mask_version)
139 {
140 	struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
141 
142 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
143 		struct flow_dissector_key_basic *basic;
144 
145 		basic = skb_flow_dissector_target(flow->dissector,
146 						  FLOW_DISSECTOR_KEY_BASIC,
147 						  target);
148 		frame->proto = basic->ip_proto;
149 	}
150 
151 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) {
152 		struct flow_dissector_key_ip *flow_ip;
153 
154 		flow_ip = skb_flow_dissector_target(flow->dissector,
155 						    FLOW_DISSECTOR_KEY_IP,
156 						    target);
157 		frame->tos = flow_ip->tos;
158 		frame->ttl = flow_ip->ttl;
159 	}
160 
161 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) {
162 		struct flow_dissector_key_tcp *tcp;
163 		u32 tcp_flags;
164 
165 		tcp = skb_flow_dissector_target(flow->dissector,
166 						FLOW_DISSECTOR_KEY_TCP, target);
167 		tcp_flags = be16_to_cpu(tcp->flags);
168 
169 		if (tcp_flags & TCPHDR_FIN)
170 			frame->flags |= NFP_FL_TCP_FLAG_FIN;
171 		if (tcp_flags & TCPHDR_SYN)
172 			frame->flags |= NFP_FL_TCP_FLAG_SYN;
173 		if (tcp_flags & TCPHDR_RST)
174 			frame->flags |= NFP_FL_TCP_FLAG_RST;
175 		if (tcp_flags & TCPHDR_PSH)
176 			frame->flags |= NFP_FL_TCP_FLAG_PSH;
177 		if (tcp_flags & TCPHDR_URG)
178 			frame->flags |= NFP_FL_TCP_FLAG_URG;
179 	}
180 
181 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
182 		struct flow_dissector_key_control *key;
183 
184 		key = skb_flow_dissector_target(flow->dissector,
185 						FLOW_DISSECTOR_KEY_CONTROL,
186 						target);
187 		if (key->flags & FLOW_DIS_IS_FRAGMENT)
188 			frame->flags |= NFP_FL_IP_FRAGMENTED;
189 		if (key->flags & FLOW_DIS_FIRST_FRAG)
190 			frame->flags |= NFP_FL_IP_FRAG_FIRST;
191 	}
192 }
193 
194 static void
195 nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
196 			struct tc_cls_flower_offload *flow,
197 			bool mask_version)
198 {
199 	struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
200 	struct flow_dissector_key_ipv4_addrs *addr;
201 
202 	memset(frame, 0, sizeof(struct nfp_flower_ipv4));
203 
204 	if (dissector_uses_key(flow->dissector,
205 			       FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
206 		addr = skb_flow_dissector_target(flow->dissector,
207 						 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
208 						 target);
209 		frame->ipv4_src = addr->src;
210 		frame->ipv4_dst = addr->dst;
211 	}
212 
213 	nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version);
214 }
215 
216 static void
217 nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
218 			struct tc_cls_flower_offload *flow,
219 			bool mask_version)
220 {
221 	struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
222 	struct flow_dissector_key_ipv6_addrs *addr;
223 
224 	memset(frame, 0, sizeof(struct nfp_flower_ipv6));
225 
226 	if (dissector_uses_key(flow->dissector,
227 			       FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
228 		addr = skb_flow_dissector_target(flow->dissector,
229 						 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
230 						 target);
231 		frame->ipv6_src = addr->src;
232 		frame->ipv6_dst = addr->dst;
233 	}
234 
235 	nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version);
236 }
237 
238 static int
239 nfp_flower_compile_geneve_opt(void *key_buf, struct tc_cls_flower_offload *flow,
240 			      bool mask_version)
241 {
242 	struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
243 	struct flow_dissector_key_enc_opts *opts;
244 
245 	opts = skb_flow_dissector_target(flow->dissector,
246 					 FLOW_DISSECTOR_KEY_ENC_OPTS,
247 					 target);
248 	memcpy(key_buf, opts->data, opts->len);
249 
250 	return 0;
251 }
252 
253 static void
254 nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
255 				struct tc_cls_flower_offload *flow,
256 				bool mask_version)
257 {
258 	struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
259 	struct flow_dissector_key_ipv4_addrs *tun_ips;
260 	struct flow_dissector_key_keyid *vni;
261 	struct flow_dissector_key_ip *ip;
262 
263 	memset(frame, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
264 
265 	if (dissector_uses_key(flow->dissector,
266 			       FLOW_DISSECTOR_KEY_ENC_KEYID)) {
267 		u32 temp_vni;
268 
269 		vni = skb_flow_dissector_target(flow->dissector,
270 						FLOW_DISSECTOR_KEY_ENC_KEYID,
271 						target);
272 		temp_vni = be32_to_cpu(vni->keyid) << NFP_FL_TUN_VNI_OFFSET;
273 		frame->tun_id = cpu_to_be32(temp_vni);
274 	}
275 
276 	if (dissector_uses_key(flow->dissector,
277 			       FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
278 		tun_ips =
279 		   skb_flow_dissector_target(flow->dissector,
280 					     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
281 					     target);
282 		frame->ip_src = tun_ips->src;
283 		frame->ip_dst = tun_ips->dst;
284 	}
285 
286 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
287 		ip = skb_flow_dissector_target(flow->dissector,
288 					       FLOW_DISSECTOR_KEY_ENC_IP,
289 					       target);
290 		frame->tos = ip->tos;
291 		frame->ttl = ip->ttl;
292 	}
293 }
294 
295 int nfp_flower_compile_flow_match(struct nfp_app *app,
296 				  struct tc_cls_flower_offload *flow,
297 				  struct nfp_fl_key_ls *key_ls,
298 				  struct net_device *netdev,
299 				  struct nfp_fl_payload *nfp_flow,
300 				  enum nfp_flower_tun_type tun_type)
301 {
302 	u32 cmsg_port = 0;
303 	int err;
304 	u8 *ext;
305 	u8 *msk;
306 
307 	if (nfp_netdev_is_nfp_repr(netdev))
308 		cmsg_port = nfp_repr_get_port_id(netdev);
309 
310 	memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
311 	memset(nfp_flow->mask_data, 0, key_ls->key_size);
312 
313 	ext = nfp_flow->unmasked_data;
314 	msk = nfp_flow->mask_data;
315 
316 	/* Populate Exact Metadata. */
317 	nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
318 				    flow, key_ls->key_layer, false);
319 	/* Populate Mask Metadata. */
320 	nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)msk,
321 				    flow, key_ls->key_layer, true);
322 	ext += sizeof(struct nfp_flower_meta_tci);
323 	msk += sizeof(struct nfp_flower_meta_tci);
324 
325 	/* Populate Extended Metadata if Required. */
326 	if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) {
327 		nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext,
328 					    key_ls->key_layer_two);
329 		nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
330 					    key_ls->key_layer_two);
331 		ext += sizeof(struct nfp_flower_ext_meta);
332 		msk += sizeof(struct nfp_flower_ext_meta);
333 	}
334 
335 	/* Populate Exact Port data. */
336 	err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
337 				      cmsg_port, false, tun_type);
338 	if (err)
339 		return err;
340 
341 	/* Populate Mask Port Data. */
342 	err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
343 				      cmsg_port, true, tun_type);
344 	if (err)
345 		return err;
346 
347 	ext += sizeof(struct nfp_flower_in_port);
348 	msk += sizeof(struct nfp_flower_in_port);
349 
350 	if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
351 		/* Populate Exact MAC Data. */
352 		nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
353 				       flow, false);
354 		/* Populate Mask MAC Data. */
355 		nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)msk,
356 				       flow, true);
357 		ext += sizeof(struct nfp_flower_mac_mpls);
358 		msk += sizeof(struct nfp_flower_mac_mpls);
359 	}
360 
361 	if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) {
362 		/* Populate Exact TP Data. */
363 		nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext,
364 					 flow, false);
365 		/* Populate Mask TP Data. */
366 		nfp_flower_compile_tport((struct nfp_flower_tp_ports *)msk,
367 					 flow, true);
368 		ext += sizeof(struct nfp_flower_tp_ports);
369 		msk += sizeof(struct nfp_flower_tp_ports);
370 	}
371 
372 	if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) {
373 		/* Populate Exact IPv4 Data. */
374 		nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext,
375 					flow, false);
376 		/* Populate Mask IPv4 Data. */
377 		nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)msk,
378 					flow, true);
379 		ext += sizeof(struct nfp_flower_ipv4);
380 		msk += sizeof(struct nfp_flower_ipv4);
381 	}
382 
383 	if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) {
384 		/* Populate Exact IPv4 Data. */
385 		nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext,
386 					flow, false);
387 		/* Populate Mask IPv4 Data. */
388 		nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)msk,
389 					flow, true);
390 		ext += sizeof(struct nfp_flower_ipv6);
391 		msk += sizeof(struct nfp_flower_ipv6);
392 	}
393 
394 	if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
395 	    key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
396 		__be32 tun_dst;
397 
398 		/* Populate Exact VXLAN Data. */
399 		nfp_flower_compile_ipv4_udp_tun((void *)ext, flow, false);
400 		/* Populate Mask VXLAN Data. */
401 		nfp_flower_compile_ipv4_udp_tun((void *)msk, flow, true);
402 		tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst;
403 		ext += sizeof(struct nfp_flower_ipv4_udp_tun);
404 		msk += sizeof(struct nfp_flower_ipv4_udp_tun);
405 
406 		/* Store the tunnel destination in the rule data.
407 		 * This must be present and be an exact match.
408 		 */
409 		nfp_flow->nfp_tun_ipv4_addr = tun_dst;
410 		nfp_tunnel_add_ipv4_off(app, tun_dst);
411 
412 		if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
413 			err = nfp_flower_compile_geneve_opt(ext, flow, false);
414 			if (err)
415 				return err;
416 
417 			err = nfp_flower_compile_geneve_opt(msk, flow, true);
418 			if (err)
419 				return err;
420 		}
421 	}
422 
423 	return 0;
424 }
425