1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/netdevice.h>
5 #include <linux/netlink.h>
6 #include <linux/random.h>
7 #include <net/vxlan.h>
8 
9 #include "reg.h"
10 #include "spectrum.h"
11 #include "spectrum_nve.h"
12 
13 /* Eth (18B) | IPv6 (40B) | UDP (8B) | VxLAN (8B) | Eth (14B) | IPv6 (40B)
14  *
15  * In the worst case - where we have a VLAN tag on the outer Ethernet
16  * header and IPv6 in overlay and underlay - we need to parse 128 bytes
17  */
18 #define MLXSW_SP_NVE_VXLAN_PARSING_DEPTH 128
19 #define MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH 96
20 
21 #define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS	(VXLAN_F_UDP_ZERO_CSUM_TX | \
22 						 VXLAN_F_LEARN)
23 
24 static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
25 					   const struct net_device *dev,
26 					   struct netlink_ext_ack *extack)
27 {
28 	struct vxlan_dev *vxlan = netdev_priv(dev);
29 	struct vxlan_config *cfg = &vxlan->cfg;
30 
31 	if (cfg->saddr.sa.sa_family != AF_INET) {
32 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Only IPv4 underlay is supported");
33 		return false;
34 	}
35 
36 	if (vxlan_addr_multicast(&cfg->remote_ip)) {
37 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Multicast destination IP is not supported");
38 		return false;
39 	}
40 
41 	if (vxlan_addr_any(&cfg->saddr)) {
42 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Source address must be specified");
43 		return false;
44 	}
45 
46 	if (cfg->remote_ifindex) {
47 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Local interface is not supported");
48 		return false;
49 	}
50 
51 	if (cfg->port_min || cfg->port_max) {
52 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Only default UDP source port range is supported");
53 		return false;
54 	}
55 
56 	if (cfg->tos != 1) {
57 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: TOS must be configured to inherit");
58 		return false;
59 	}
60 
61 	if (cfg->flags & VXLAN_F_TTL_INHERIT) {
62 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: TTL must not be configured to inherit");
63 		return false;
64 	}
65 
66 	if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) {
67 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: UDP checksum is not supported");
68 		return false;
69 	}
70 
71 	if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS) {
72 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag");
73 		return false;
74 	}
75 
76 	if (cfg->ttl == 0) {
77 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: TTL must not be configured to 0");
78 		return false;
79 	}
80 
81 	if (cfg->label != 0) {
82 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Flow label must be configured to 0");
83 		return false;
84 	}
85 
86 	return true;
87 }
88 
89 static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve,
90 				      const struct net_device *dev,
91 				      struct mlxsw_sp_nve_config *config)
92 {
93 	struct vxlan_dev *vxlan = netdev_priv(dev);
94 	struct vxlan_config *cfg = &vxlan->cfg;
95 
96 	config->type = MLXSW_SP_NVE_TYPE_VXLAN;
97 	config->ttl = cfg->ttl;
98 	config->flowlabel = cfg->label;
99 	config->learning_en = cfg->flags & VXLAN_F_LEARN ? 1 : 0;
100 	config->ul_tb_id = RT_TABLE_MAIN;
101 	config->ul_proto = MLXSW_SP_L3_PROTO_IPV4;
102 	config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr;
103 	config->udp_dport = cfg->dst_port;
104 }
105 
106 static int mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp,
107 				    unsigned int parsing_depth,
108 				    __be16 udp_dport)
109 {
110 	char mprs_pl[MLXSW_REG_MPRS_LEN];
111 
112 	mlxsw_reg_mprs_pack(mprs_pl, parsing_depth, be16_to_cpu(udp_dport));
113 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
114 }
115 
116 static void
117 mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl,
118 				  const struct mlxsw_sp_nve_config *config)
119 {
120 	u8 udp_sport;
121 
122 	mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, true,
123 			     config->ttl);
124 	/* VxLAN driver's default UDP source port range is 32768 (0x8000)
125 	 * to 60999 (0xee47). Set the upper 8 bits of the UDP source port
126 	 * to a random number between 0x80 and 0xee
127 	 */
128 	get_random_bytes(&udp_sport, sizeof(udp_sport));
129 	udp_sport = (udp_sport % (0xee - 0x80 + 1)) + 0x80;
130 	mlxsw_reg_tngcr_nve_udp_sport_prefix_set(tngcr_pl, udp_sport);
131 	mlxsw_reg_tngcr_usipv4_set(tngcr_pl, be32_to_cpu(config->ul_sip.addr4));
132 }
133 
134 static int
135 mlxsw_sp1_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
136 			       const struct mlxsw_sp_nve_config *config)
137 {
138 	char tngcr_pl[MLXSW_REG_TNGCR_LEN];
139 	u16 ul_vr_id;
140 	int err;
141 
142 	err = mlxsw_sp_router_tb_id_vr_id(mlxsw_sp, config->ul_tb_id,
143 					  &ul_vr_id);
144 	if (err)
145 		return err;
146 
147 	mlxsw_sp_nve_vxlan_config_prepare(tngcr_pl, config);
148 	mlxsw_reg_tngcr_learn_enable_set(tngcr_pl, config->learning_en);
149 	mlxsw_reg_tngcr_underlay_virtual_router_set(tngcr_pl, ul_vr_id);
150 
151 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
152 }
153 
154 static void mlxsw_sp1_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp)
155 {
156 	char tngcr_pl[MLXSW_REG_TNGCR_LEN];
157 
158 	mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0);
159 
160 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
161 }
162 
163 static int mlxsw_sp1_nve_vxlan_rtdp_set(struct mlxsw_sp *mlxsw_sp,
164 					unsigned int tunnel_index)
165 {
166 	char rtdp_pl[MLXSW_REG_RTDP_LEN];
167 
168 	mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_NVE, tunnel_index);
169 
170 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
171 }
172 
173 static int mlxsw_sp1_nve_vxlan_init(struct mlxsw_sp_nve *nve,
174 				    const struct mlxsw_sp_nve_config *config)
175 {
176 	struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
177 	int err;
178 
179 	err = mlxsw_sp_nve_parsing_set(mlxsw_sp,
180 				       MLXSW_SP_NVE_VXLAN_PARSING_DEPTH,
181 				       config->udp_dport);
182 	if (err)
183 		return err;
184 
185 	err = mlxsw_sp1_nve_vxlan_config_set(mlxsw_sp, config);
186 	if (err)
187 		goto err_config_set;
188 
189 	err = mlxsw_sp1_nve_vxlan_rtdp_set(mlxsw_sp, nve->tunnel_index);
190 	if (err)
191 		goto err_rtdp_set;
192 
193 	err = mlxsw_sp_router_nve_promote_decap(mlxsw_sp, config->ul_tb_id,
194 						config->ul_proto,
195 						&config->ul_sip,
196 						nve->tunnel_index);
197 	if (err)
198 		goto err_promote_decap;
199 
200 	return 0;
201 
202 err_promote_decap:
203 err_rtdp_set:
204 	mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp);
205 err_config_set:
206 	mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH,
207 				 config->udp_dport);
208 	return err;
209 }
210 
211 static void mlxsw_sp1_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
212 {
213 	struct mlxsw_sp_nve_config *config = &nve->config;
214 	struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
215 
216 	mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id,
217 					 config->ul_proto, &config->ul_sip);
218 	mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp);
219 	mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH,
220 				 config->udp_dport);
221 }
222 
223 static int
224 mlxsw_sp_nve_vxlan_fdb_replay(const struct net_device *nve_dev, __be32 vni,
225 			      struct netlink_ext_ack *extack)
226 {
227 	if (WARN_ON(!netif_is_vxlan(nve_dev)))
228 		return -EINVAL;
229 	return vxlan_fdb_replay(nve_dev, vni, &mlxsw_sp_switchdev_notifier,
230 				extack);
231 }
232 
233 static void
234 mlxsw_sp_nve_vxlan_clear_offload(const struct net_device *nve_dev, __be32 vni)
235 {
236 	if (WARN_ON(!netif_is_vxlan(nve_dev)))
237 		return;
238 	vxlan_fdb_clear_offload(nve_dev, vni);
239 }
240 
241 const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = {
242 	.type		= MLXSW_SP_NVE_TYPE_VXLAN,
243 	.can_offload	= mlxsw_sp_nve_vxlan_can_offload,
244 	.nve_config	= mlxsw_sp_nve_vxlan_config,
245 	.init		= mlxsw_sp1_nve_vxlan_init,
246 	.fini		= mlxsw_sp1_nve_vxlan_fini,
247 	.fdb_replay	= mlxsw_sp_nve_vxlan_fdb_replay,
248 	.fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload,
249 };
250 
251 static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
252 					     bool learning_en)
253 {
254 	char tnpc_pl[MLXSW_REG_TNPC_LEN];
255 
256 	mlxsw_reg_tnpc_pack(tnpc_pl, MLXSW_REG_TNPC_TUNNEL_PORT_NVE,
257 			    learning_en);
258 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnpc), tnpc_pl);
259 }
260 
261 static int
262 mlxsw_sp2_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
263 			       const struct mlxsw_sp_nve_config *config)
264 {
265 	char tngcr_pl[MLXSW_REG_TNGCR_LEN];
266 	u16 ul_rif_index;
267 	int err;
268 
269 	err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, config->ul_tb_id,
270 					 &ul_rif_index);
271 	if (err)
272 		return err;
273 	mlxsw_sp->nve->ul_rif_index = ul_rif_index;
274 
275 	err = mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, config->learning_en);
276 	if (err)
277 		goto err_vxlan_learning_set;
278 
279 	mlxsw_sp_nve_vxlan_config_prepare(tngcr_pl, config);
280 	mlxsw_reg_tngcr_underlay_rif_set(tngcr_pl, ul_rif_index);
281 
282 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
283 	if (err)
284 		goto err_tngcr_write;
285 
286 	return 0;
287 
288 err_tngcr_write:
289 	mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false);
290 err_vxlan_learning_set:
291 	mlxsw_sp_router_ul_rif_put(mlxsw_sp, ul_rif_index);
292 	return err;
293 }
294 
295 static void mlxsw_sp2_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp)
296 {
297 	char tngcr_pl[MLXSW_REG_TNGCR_LEN];
298 
299 	mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0);
300 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
301 	mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false);
302 	mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->nve->ul_rif_index);
303 }
304 
305 static int mlxsw_sp2_nve_vxlan_rtdp_set(struct mlxsw_sp *mlxsw_sp,
306 					unsigned int tunnel_index,
307 					u16 ul_rif_index)
308 {
309 	char rtdp_pl[MLXSW_REG_RTDP_LEN];
310 
311 	mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_NVE, tunnel_index);
312 	mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_index);
313 
314 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
315 }
316 
317 static int mlxsw_sp2_nve_vxlan_init(struct mlxsw_sp_nve *nve,
318 				    const struct mlxsw_sp_nve_config *config)
319 {
320 	struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
321 	int err;
322 
323 	err = mlxsw_sp_nve_parsing_set(mlxsw_sp,
324 				       MLXSW_SP_NVE_VXLAN_PARSING_DEPTH,
325 				       config->udp_dport);
326 	if (err)
327 		return err;
328 
329 	err = mlxsw_sp2_nve_vxlan_config_set(mlxsw_sp, config);
330 	if (err)
331 		goto err_config_set;
332 
333 	err = mlxsw_sp2_nve_vxlan_rtdp_set(mlxsw_sp, nve->tunnel_index,
334 					   nve->ul_rif_index);
335 	if (err)
336 		goto err_rtdp_set;
337 
338 	err = mlxsw_sp_router_nve_promote_decap(mlxsw_sp, config->ul_tb_id,
339 						config->ul_proto,
340 						&config->ul_sip,
341 						nve->tunnel_index);
342 	if (err)
343 		goto err_promote_decap;
344 
345 	return 0;
346 
347 err_promote_decap:
348 err_rtdp_set:
349 	mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp);
350 err_config_set:
351 	mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH,
352 				 config->udp_dport);
353 	return err;
354 }
355 
356 static void mlxsw_sp2_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
357 {
358 	struct mlxsw_sp_nve_config *config = &nve->config;
359 	struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
360 
361 	mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id,
362 					 config->ul_proto, &config->ul_sip);
363 	mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp);
364 	mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH,
365 				 config->udp_dport);
366 }
367 
368 const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = {
369 	.type		= MLXSW_SP_NVE_TYPE_VXLAN,
370 	.can_offload	= mlxsw_sp_nve_vxlan_can_offload,
371 	.nve_config	= mlxsw_sp_nve_vxlan_config,
372 	.init		= mlxsw_sp2_nve_vxlan_init,
373 	.fini		= mlxsw_sp2_nve_vxlan_fini,
374 	.fdb_replay	= mlxsw_sp_nve_vxlan_fdb_replay,
375 	.fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload,
376 };
377