1 /*
2  * Copyright (C) 2017 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/bitfield.h>
35 #include <net/pkt_cls.h>
36 
37 #include "cmsg.h"
38 #include "main.h"
39 
40 static void
41 nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
42 			    struct tc_cls_flower_offload *flow, u8 key_type,
43 			    bool mask_version)
44 {
45 	struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
46 	struct flow_dissector_key_vlan *flow_vlan;
47 	u16 tmp_tci;
48 
49 	memset(frame, 0, sizeof(struct nfp_flower_meta_two));
50 	/* Populate the metadata frame. */
51 	frame->nfp_flow_key_layer = key_type;
52 	frame->mask_id = ~0;
53 
54 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
55 		flow_vlan = skb_flow_dissector_target(flow->dissector,
56 						      FLOW_DISSECTOR_KEY_VLAN,
57 						      target);
58 		/* Populate the tci field. */
59 		if (flow_vlan->vlan_id) {
60 			tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
61 					     flow_vlan->vlan_priority) |
62 				  FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
63 					     flow_vlan->vlan_id) |
64 				  NFP_FLOWER_MASK_VLAN_CFI;
65 			frame->tci = cpu_to_be16(tmp_tci);
66 		}
67 	}
68 }
69 
70 static void
71 nfp_flower_compile_meta(struct nfp_flower_meta_one *frame, u8 key_type)
72 {
73 	frame->nfp_flow_key_layer = key_type;
74 	frame->mask_id = 0;
75 	frame->reserved = 0;
76 }
77 
78 static int
79 nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
80 			bool mask_version, enum nfp_flower_tun_type tun_type)
81 {
82 	if (mask_version) {
83 		frame->in_port = cpu_to_be32(~0);
84 		return 0;
85 	}
86 
87 	if (tun_type)
88 		frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
89 	else
90 		frame->in_port = cpu_to_be32(cmsg_port);
91 
92 	return 0;
93 }
94 
95 static void
96 nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
97 		       struct tc_cls_flower_offload *flow,
98 		       bool mask_version)
99 {
100 	struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
101 	struct flow_dissector_key_eth_addrs *addr;
102 
103 	memset(frame, 0, sizeof(struct nfp_flower_mac_mpls));
104 
105 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
106 		addr = skb_flow_dissector_target(flow->dissector,
107 						 FLOW_DISSECTOR_KEY_ETH_ADDRS,
108 						 target);
109 		/* Populate mac frame. */
110 		ether_addr_copy(frame->mac_dst, &addr->dst[0]);
111 		ether_addr_copy(frame->mac_src, &addr->src[0]);
112 	}
113 
114 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
115 		struct flow_dissector_key_mpls *mpls;
116 		u32 t_mpls;
117 
118 		mpls = skb_flow_dissector_target(flow->dissector,
119 						 FLOW_DISSECTOR_KEY_MPLS,
120 						 target);
121 
122 		t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, mpls->mpls_label) |
123 			 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, mpls->mpls_tc) |
124 			 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, mpls->mpls_bos) |
125 			 NFP_FLOWER_MASK_MPLS_Q;
126 
127 		frame->mpls_lse = cpu_to_be32(t_mpls);
128 	}
129 }
130 
131 static void
132 nfp_flower_compile_tport(struct nfp_flower_tp_ports *frame,
133 			 struct tc_cls_flower_offload *flow,
134 			 bool mask_version)
135 {
136 	struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
137 	struct flow_dissector_key_ports *tp;
138 
139 	memset(frame, 0, sizeof(struct nfp_flower_tp_ports));
140 
141 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
142 		tp = skb_flow_dissector_target(flow->dissector,
143 					       FLOW_DISSECTOR_KEY_PORTS,
144 					       target);
145 		frame->port_src = tp->src;
146 		frame->port_dst = tp->dst;
147 	}
148 }
149 
150 static void
151 nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
152 			struct tc_cls_flower_offload *flow,
153 			bool mask_version)
154 {
155 	struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
156 	struct flow_dissector_key_ipv4_addrs *addr;
157 	struct flow_dissector_key_basic *basic;
158 
159 	memset(frame, 0, sizeof(struct nfp_flower_ipv4));
160 
161 	if (dissector_uses_key(flow->dissector,
162 			       FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
163 		addr = skb_flow_dissector_target(flow->dissector,
164 						 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
165 						 target);
166 		frame->ipv4_src = addr->src;
167 		frame->ipv4_dst = addr->dst;
168 	}
169 
170 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
171 		basic = skb_flow_dissector_target(flow->dissector,
172 						  FLOW_DISSECTOR_KEY_BASIC,
173 						  target);
174 		frame->proto = basic->ip_proto;
175 	}
176 
177 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) {
178 		struct flow_dissector_key_ip *flow_ip;
179 
180 		flow_ip = skb_flow_dissector_target(flow->dissector,
181 						    FLOW_DISSECTOR_KEY_IP,
182 						    target);
183 		frame->tos = flow_ip->tos;
184 		frame->ttl = flow_ip->ttl;
185 	}
186 }
187 
188 static void
189 nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
190 			struct tc_cls_flower_offload *flow,
191 			bool mask_version)
192 {
193 	struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
194 	struct flow_dissector_key_ipv6_addrs *addr;
195 	struct flow_dissector_key_basic *basic;
196 
197 	memset(frame, 0, sizeof(struct nfp_flower_ipv6));
198 
199 	if (dissector_uses_key(flow->dissector,
200 			       FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
201 		addr = skb_flow_dissector_target(flow->dissector,
202 						 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
203 						 target);
204 		frame->ipv6_src = addr->src;
205 		frame->ipv6_dst = addr->dst;
206 	}
207 
208 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
209 		basic = skb_flow_dissector_target(flow->dissector,
210 						  FLOW_DISSECTOR_KEY_BASIC,
211 						  target);
212 		frame->proto = basic->ip_proto;
213 	}
214 
215 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) {
216 		struct flow_dissector_key_ip *flow_ip;
217 
218 		flow_ip = skb_flow_dissector_target(flow->dissector,
219 						    FLOW_DISSECTOR_KEY_IP,
220 						    target);
221 		frame->tos = flow_ip->tos;
222 		frame->ttl = flow_ip->ttl;
223 	}
224 }
225 
226 static void
227 nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame,
228 			 struct tc_cls_flower_offload *flow,
229 			 bool mask_version, __be32 *tun_dst)
230 {
231 	struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
232 	struct flow_dissector_key_ipv4_addrs *vxlan_ips;
233 	struct flow_dissector_key_keyid *vni;
234 
235 	/* Wildcard TOS/TTL/GPE_FLAGS/NXT_PROTO for now. */
236 	memset(frame, 0, sizeof(struct nfp_flower_vxlan));
237 
238 	if (dissector_uses_key(flow->dissector,
239 			       FLOW_DISSECTOR_KEY_ENC_KEYID)) {
240 		u32 temp_vni;
241 
242 		vni = skb_flow_dissector_target(flow->dissector,
243 						FLOW_DISSECTOR_KEY_ENC_KEYID,
244 						target);
245 		temp_vni = be32_to_cpu(vni->keyid) << NFP_FL_TUN_VNI_OFFSET;
246 		frame->tun_id = cpu_to_be32(temp_vni);
247 	}
248 
249 	if (dissector_uses_key(flow->dissector,
250 			       FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
251 		vxlan_ips =
252 		   skb_flow_dissector_target(flow->dissector,
253 					     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
254 					     target);
255 		frame->ip_src = vxlan_ips->src;
256 		frame->ip_dst = vxlan_ips->dst;
257 		*tun_dst = vxlan_ips->dst;
258 	}
259 }
260 
261 int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
262 				  struct nfp_fl_key_ls *key_ls,
263 				  struct net_device *netdev,
264 				  struct nfp_fl_payload *nfp_flow)
265 {
266 	enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
267 	__be32 tun_dst, tun_dst_mask = 0;
268 	struct nfp_repr *netdev_repr;
269 	int err;
270 	u8 *ext;
271 	u8 *msk;
272 
273 	if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN)
274 		tun_type = NFP_FL_TUNNEL_VXLAN;
275 
276 	memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
277 	memset(nfp_flow->mask_data, 0, key_ls->key_size);
278 
279 	ext = nfp_flow->unmasked_data;
280 	msk = nfp_flow->mask_data;
281 	if (NFP_FLOWER_LAYER_PORT & key_ls->key_layer) {
282 		/* Populate Exact Metadata. */
283 		nfp_flower_compile_meta_tci((struct nfp_flower_meta_two *)ext,
284 					    flow, key_ls->key_layer, false);
285 		/* Populate Mask Metadata. */
286 		nfp_flower_compile_meta_tci((struct nfp_flower_meta_two *)msk,
287 					    flow, key_ls->key_layer, true);
288 		ext += sizeof(struct nfp_flower_meta_two);
289 		msk += sizeof(struct nfp_flower_meta_two);
290 
291 		/* Populate Exact Port data. */
292 		err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
293 					      nfp_repr_get_port_id(netdev),
294 					      false, tun_type);
295 		if (err)
296 			return err;
297 
298 		/* Populate Mask Port Data. */
299 		err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
300 					      nfp_repr_get_port_id(netdev),
301 					      true, tun_type);
302 		if (err)
303 			return err;
304 
305 		ext += sizeof(struct nfp_flower_in_port);
306 		msk += sizeof(struct nfp_flower_in_port);
307 	} else {
308 		/* Populate Exact Metadata. */
309 		nfp_flower_compile_meta((struct nfp_flower_meta_one *)ext,
310 					key_ls->key_layer);
311 		/* Populate Mask Metadata. */
312 		nfp_flower_compile_meta((struct nfp_flower_meta_one *)msk,
313 					key_ls->key_layer);
314 		ext += sizeof(struct nfp_flower_meta_one);
315 		msk += sizeof(struct nfp_flower_meta_one);
316 	}
317 
318 	if (NFP_FLOWER_LAYER_META & key_ls->key_layer) {
319 		/* Additional Metadata Fields.
320 		 * Currently unsupported.
321 		 */
322 		return -EOPNOTSUPP;
323 	}
324 
325 	if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
326 		/* Populate Exact MAC Data. */
327 		nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
328 				       flow, false);
329 		/* Populate Mask MAC Data. */
330 		nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)msk,
331 				       flow, true);
332 		ext += sizeof(struct nfp_flower_mac_mpls);
333 		msk += sizeof(struct nfp_flower_mac_mpls);
334 	}
335 
336 	if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) {
337 		/* Populate Exact TP Data. */
338 		nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext,
339 					 flow, false);
340 		/* Populate Mask TP Data. */
341 		nfp_flower_compile_tport((struct nfp_flower_tp_ports *)msk,
342 					 flow, true);
343 		ext += sizeof(struct nfp_flower_tp_ports);
344 		msk += sizeof(struct nfp_flower_tp_ports);
345 	}
346 
347 	if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) {
348 		/* Populate Exact IPv4 Data. */
349 		nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext,
350 					flow, false);
351 		/* Populate Mask IPv4 Data. */
352 		nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)msk,
353 					flow, true);
354 		ext += sizeof(struct nfp_flower_ipv4);
355 		msk += sizeof(struct nfp_flower_ipv4);
356 	}
357 
358 	if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) {
359 		/* Populate Exact IPv4 Data. */
360 		nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext,
361 					flow, false);
362 		/* Populate Mask IPv4 Data. */
363 		nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)msk,
364 					flow, true);
365 		ext += sizeof(struct nfp_flower_ipv6);
366 		msk += sizeof(struct nfp_flower_ipv6);
367 	}
368 
369 	if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN) {
370 		/* Populate Exact VXLAN Data. */
371 		nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)ext,
372 					 flow, false, &tun_dst);
373 		/* Populate Mask VXLAN Data. */
374 		nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)msk,
375 					 flow, true, &tun_dst_mask);
376 		ext += sizeof(struct nfp_flower_vxlan);
377 		msk += sizeof(struct nfp_flower_vxlan);
378 
379 		/* Configure tunnel end point MAC. */
380 		if (nfp_netdev_is_nfp_repr(netdev)) {
381 			netdev_repr = netdev_priv(netdev);
382 			nfp_tunnel_write_macs(netdev_repr->app);
383 
384 			/* Store the tunnel destination in the rule data.
385 			 * This must be present and be an exact match.
386 			 */
387 			nfp_flow->nfp_tun_ipv4_addr = tun_dst;
388 			nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst);
389 		}
390 	}
391 
392 	return 0;
393 }
394