1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/types.h>
7 #include <linux/pci.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/ethtool.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/if_bridge.h>
16 #include <linux/workqueue.h>
17 #include <linux/jiffies.h>
18 #include <linux/bitops.h>
19 #include <linux/list.h>
20 #include <linux/notifier.h>
21 #include <linux/dcbnl.h>
22 #include <linux/inetdevice.h>
23 #include <linux/netlink.h>
24 #include <linux/jhash.h>
25 #include <linux/log2.h>
26 #include <linux/refcount.h>
27 #include <linux/rhashtable.h>
28 #include <net/switchdev.h>
29 #include <net/pkt_cls.h>
30 #include <net/netevent.h>
31 #include <net/addrconf.h>
32 
33 #include "spectrum.h"
34 #include "pci.h"
35 #include "core.h"
36 #include "core_env.h"
37 #include "reg.h"
38 #include "port.h"
39 #include "trap.h"
40 #include "txheader.h"
41 #include "spectrum_cnt.h"
42 #include "spectrum_dpipe.h"
43 #include "spectrum_acl_flex_actions.h"
44 #include "spectrum_span.h"
45 #include "spectrum_ptp.h"
46 #include "spectrum_trap.h"
47 
48 #define MLXSW_SP1_FWREV_MAJOR 13
49 #define MLXSW_SP1_FWREV_MINOR 2008
50 #define MLXSW_SP1_FWREV_SUBMINOR 2406
51 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
52 
53 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
54 	.major = MLXSW_SP1_FWREV_MAJOR,
55 	.minor = MLXSW_SP1_FWREV_MINOR,
56 	.subminor = MLXSW_SP1_FWREV_SUBMINOR,
57 	.can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
58 };
59 
60 #define MLXSW_SP1_FW_FILENAME \
61 	"mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
62 	"." __stringify(MLXSW_SP1_FWREV_MINOR) \
63 	"." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
64 
65 #define MLXSW_SP2_FWREV_MAJOR 29
66 #define MLXSW_SP2_FWREV_MINOR 2008
67 #define MLXSW_SP2_FWREV_SUBMINOR 2406
68 
69 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
70 	.major = MLXSW_SP2_FWREV_MAJOR,
71 	.minor = MLXSW_SP2_FWREV_MINOR,
72 	.subminor = MLXSW_SP2_FWREV_SUBMINOR,
73 };
74 
75 #define MLXSW_SP2_FW_FILENAME \
76 	"mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
77 	"." __stringify(MLXSW_SP2_FWREV_MINOR) \
78 	"." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2"
79 
80 #define MLXSW_SP3_FWREV_MAJOR 30
81 #define MLXSW_SP3_FWREV_MINOR 2008
82 #define MLXSW_SP3_FWREV_SUBMINOR 2406
83 
84 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
85 	.major = MLXSW_SP3_FWREV_MAJOR,
86 	.minor = MLXSW_SP3_FWREV_MINOR,
87 	.subminor = MLXSW_SP3_FWREV_SUBMINOR,
88 };
89 
90 #define MLXSW_SP3_FW_FILENAME \
91 	"mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
92 	"." __stringify(MLXSW_SP3_FWREV_MINOR) \
93 	"." __stringify(MLXSW_SP3_FWREV_SUBMINOR) ".mfa2"
94 
95 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
96 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
97 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
98 
99 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
100 	0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
101 };
102 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
103 	0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
104 };
105 
106 /* tx_hdr_version
107  * Tx header version.
108  * Must be set to 1.
109  */
110 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
111 
112 /* tx_hdr_ctl
113  * Packet control type.
114  * 0 - Ethernet control (e.g. EMADs, LACP)
115  * 1 - Ethernet data
116  */
117 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
118 
119 /* tx_hdr_proto
120  * Packet protocol type. Must be set to 1 (Ethernet).
121  */
122 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
123 
124 /* tx_hdr_rx_is_router
125  * Packet is sent from the router. Valid for data packets only.
126  */
127 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
128 
129 /* tx_hdr_fid_valid
130  * Indicates if the 'fid' field is valid and should be used for
131  * forwarding lookup. Valid for data packets only.
132  */
133 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
134 
135 /* tx_hdr_swid
136  * Switch partition ID. Must be set to 0.
137  */
138 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
139 
140 /* tx_hdr_control_tclass
141  * Indicates if the packet should use the control TClass and not one
142  * of the data TClasses.
143  */
144 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
145 
146 /* tx_hdr_etclass
147  * Egress TClass to be used on the egress device on the egress port.
148  */
149 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
150 
151 /* tx_hdr_port_mid
152  * Destination local port for unicast packets.
153  * Destination multicast ID for multicast packets.
154  *
155  * Control packets are directed to a specific egress port, while data
156  * packets are transmitted through the CPU port (0) into the switch partition,
157  * where forwarding rules are applied.
158  */
159 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
160 
161 /* tx_hdr_fid
162  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
163  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
164  * Valid for data packets only.
165  */
166 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
167 
168 /* tx_hdr_type
169  * 0 - Data packets
170  * 6 - Control packets
171  */
172 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
173 
174 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
175 			      unsigned int counter_index, u64 *packets,
176 			      u64 *bytes)
177 {
178 	char mgpc_pl[MLXSW_REG_MGPC_LEN];
179 	int err;
180 
181 	mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
182 			    MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
183 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
184 	if (err)
185 		return err;
186 	if (packets)
187 		*packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
188 	if (bytes)
189 		*bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
190 	return 0;
191 }
192 
193 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
194 				       unsigned int counter_index)
195 {
196 	char mgpc_pl[MLXSW_REG_MGPC_LEN];
197 
198 	mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
199 			    MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
200 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
201 }
202 
203 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
204 				unsigned int *p_counter_index)
205 {
206 	int err;
207 
208 	err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
209 				     p_counter_index);
210 	if (err)
211 		return err;
212 	err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
213 	if (err)
214 		goto err_counter_clear;
215 	return 0;
216 
217 err_counter_clear:
218 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
219 			      *p_counter_index);
220 	return err;
221 }
222 
223 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
224 				unsigned int counter_index)
225 {
226 	 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
227 			       counter_index);
228 }
229 
230 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
231 				     const struct mlxsw_tx_info *tx_info)
232 {
233 	char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
234 
235 	memset(txhdr, 0, MLXSW_TXHDR_LEN);
236 
237 	mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
238 	mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
239 	mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
240 	mlxsw_tx_hdr_swid_set(txhdr, 0);
241 	mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
242 	mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
243 	mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
244 }
245 
246 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
247 {
248 	switch (state) {
249 	case BR_STATE_FORWARDING:
250 		return MLXSW_REG_SPMS_STATE_FORWARDING;
251 	case BR_STATE_LEARNING:
252 		return MLXSW_REG_SPMS_STATE_LEARNING;
253 	case BR_STATE_LISTENING:
254 	case BR_STATE_DISABLED:
255 	case BR_STATE_BLOCKING:
256 		return MLXSW_REG_SPMS_STATE_DISCARDING;
257 	default:
258 		BUG();
259 	}
260 }
261 
262 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
263 			      u8 state)
264 {
265 	enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
266 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
267 	char *spms_pl;
268 	int err;
269 
270 	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
271 	if (!spms_pl)
272 		return -ENOMEM;
273 	mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
274 	mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
275 
276 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
277 	kfree(spms_pl);
278 	return err;
279 }
280 
281 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
282 {
283 	char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
284 	int err;
285 
286 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
287 	if (err)
288 		return err;
289 	mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
290 	return 0;
291 }
292 
293 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
294 				   bool is_up)
295 {
296 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
297 	char paos_pl[MLXSW_REG_PAOS_LEN];
298 
299 	mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
300 			    is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
301 			    MLXSW_PORT_ADMIN_STATUS_DOWN);
302 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
303 }
304 
305 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
306 				      unsigned char *addr)
307 {
308 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
309 	char ppad_pl[MLXSW_REG_PPAD_LEN];
310 
311 	mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
312 	mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
313 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
314 }
315 
316 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
317 {
318 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
319 	unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
320 
321 	ether_addr_copy(addr, mlxsw_sp->base_mac);
322 	addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
323 	return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
324 }
325 
326 static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
327 {
328 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
329 	char pmtu_pl[MLXSW_REG_PMTU_LEN];
330 	int err;
331 
332 	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
333 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
334 	if (err)
335 		return err;
336 
337 	*p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
338 	return 0;
339 }
340 
341 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
342 {
343 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
344 	char pmtu_pl[MLXSW_REG_PMTU_LEN];
345 
346 	mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
347 	if (mtu > mlxsw_sp_port->max_mtu)
348 		return -EINVAL;
349 
350 	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
351 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
352 }
353 
354 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
355 {
356 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
357 	char pspa_pl[MLXSW_REG_PSPA_LEN];
358 
359 	mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
360 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
361 }
362 
363 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
364 {
365 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
366 	char svpe_pl[MLXSW_REG_SVPE_LEN];
367 
368 	mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
369 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
370 }
371 
372 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
373 				   bool learn_enable)
374 {
375 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
376 	char *spvmlr_pl;
377 	int err;
378 
379 	spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
380 	if (!spvmlr_pl)
381 		return -ENOMEM;
382 	mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
383 			      learn_enable);
384 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
385 	kfree(spvmlr_pl);
386 	return err;
387 }
388 
389 int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type)
390 {
391 	switch (ethtype) {
392 	case ETH_P_8021Q:
393 		*p_sver_type = 0;
394 		break;
395 	case ETH_P_8021AD:
396 		*p_sver_type = 1;
397 		break;
398 	default:
399 		return -EINVAL;
400 	}
401 
402 	return 0;
403 }
404 
405 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
406 				    u16 vid, u16 ethtype)
407 {
408 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
409 	char spvid_pl[MLXSW_REG_SPVID_LEN];
410 	u8 sver_type;
411 	int err;
412 
413 	err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
414 	if (err)
415 		return err;
416 
417 	mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid,
418 			     sver_type);
419 
420 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
421 }
422 
423 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
424 					    bool allow)
425 {
426 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
427 	char spaft_pl[MLXSW_REG_SPAFT_LEN];
428 
429 	mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
430 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
431 }
432 
433 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
434 			   u16 ethtype)
435 {
436 	int err;
437 
438 	if (!vid) {
439 		err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
440 		if (err)
441 			return err;
442 	} else {
443 		err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype);
444 		if (err)
445 			return err;
446 		err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
447 		if (err)
448 			goto err_port_allow_untagged_set;
449 	}
450 
451 	mlxsw_sp_port->pvid = vid;
452 	return 0;
453 
454 err_port_allow_untagged_set:
455 	__mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype);
456 	return err;
457 }
458 
459 static int
460 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
461 {
462 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
463 	char sspr_pl[MLXSW_REG_SSPR_LEN];
464 
465 	mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
466 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
467 }
468 
469 static int
470 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port,
471 			      struct mlxsw_sp_port_mapping *port_mapping)
472 {
473 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
474 	bool separate_rxtx;
475 	u8 module;
476 	u8 width;
477 	int err;
478 	int i;
479 
480 	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
481 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
482 	if (err)
483 		return err;
484 	module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
485 	width = mlxsw_reg_pmlp_width_get(pmlp_pl);
486 	separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
487 
488 	if (width && !is_power_of_2(width)) {
489 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
490 			local_port);
491 		return -EINVAL;
492 	}
493 
494 	for (i = 0; i < width; i++) {
495 		if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
496 			dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
497 				local_port);
498 			return -EINVAL;
499 		}
500 		if (separate_rxtx &&
501 		    mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
502 		    mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
503 			dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
504 				local_port);
505 			return -EINVAL;
506 		}
507 		if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) {
508 			dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
509 				local_port);
510 			return -EINVAL;
511 		}
512 	}
513 
514 	port_mapping->module = module;
515 	port_mapping->width = width;
516 	port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
517 	return 0;
518 }
519 
520 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port)
521 {
522 	struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping;
523 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
524 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
525 	int i;
526 
527 	mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
528 	mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
529 	for (i = 0; i < port_mapping->width; i++) {
530 		mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
531 		mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
532 	}
533 
534 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
535 }
536 
537 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
538 {
539 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
540 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
541 
542 	mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
543 	mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
544 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
545 }
546 
547 static int mlxsw_sp_port_open(struct net_device *dev)
548 {
549 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
550 	int err;
551 
552 	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
553 	if (err)
554 		return err;
555 	netif_start_queue(dev);
556 	return 0;
557 }
558 
559 static int mlxsw_sp_port_stop(struct net_device *dev)
560 {
561 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
562 
563 	netif_stop_queue(dev);
564 	return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
565 }
566 
567 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
568 				      struct net_device *dev)
569 {
570 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
571 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
572 	struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
573 	const struct mlxsw_tx_info tx_info = {
574 		.local_port = mlxsw_sp_port->local_port,
575 		.is_emad = false,
576 	};
577 	u64 len;
578 	int err;
579 
580 	if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
581 		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
582 		dev_kfree_skb_any(skb);
583 		return NETDEV_TX_OK;
584 	}
585 
586 	memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
587 
588 	if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
589 		return NETDEV_TX_BUSY;
590 
591 	if (eth_skb_pad(skb)) {
592 		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
593 		return NETDEV_TX_OK;
594 	}
595 
596 	mlxsw_sp_txhdr_construct(skb, &tx_info);
597 	/* TX header is consumed by HW on the way so we shouldn't count its
598 	 * bytes as being sent.
599 	 */
600 	len = skb->len - MLXSW_TXHDR_LEN;
601 
602 	/* Due to a race we might fail here because of a full queue. In that
603 	 * unlikely case we simply drop the packet.
604 	 */
605 	err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
606 
607 	if (!err) {
608 		pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
609 		u64_stats_update_begin(&pcpu_stats->syncp);
610 		pcpu_stats->tx_packets++;
611 		pcpu_stats->tx_bytes += len;
612 		u64_stats_update_end(&pcpu_stats->syncp);
613 	} else {
614 		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
615 		dev_kfree_skb_any(skb);
616 	}
617 	return NETDEV_TX_OK;
618 }
619 
620 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
621 {
622 }
623 
624 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
625 {
626 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
627 	struct sockaddr *addr = p;
628 	int err;
629 
630 	if (!is_valid_ether_addr(addr->sa_data))
631 		return -EADDRNOTAVAIL;
632 
633 	err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
634 	if (err)
635 		return err;
636 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
637 	return 0;
638 }
639 
640 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
641 {
642 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
643 	struct mlxsw_sp_hdroom orig_hdroom;
644 	struct mlxsw_sp_hdroom hdroom;
645 	int err;
646 
647 	orig_hdroom = *mlxsw_sp_port->hdroom;
648 
649 	hdroom = orig_hdroom;
650 	hdroom.mtu = mtu;
651 	mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
652 
653 	err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
654 	if (err) {
655 		netdev_err(dev, "Failed to configure port's headroom\n");
656 		return err;
657 	}
658 
659 	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
660 	if (err)
661 		goto err_port_mtu_set;
662 	dev->mtu = mtu;
663 	return 0;
664 
665 err_port_mtu_set:
666 	mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
667 	return err;
668 }
669 
670 static int
671 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
672 			     struct rtnl_link_stats64 *stats)
673 {
674 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
675 	struct mlxsw_sp_port_pcpu_stats *p;
676 	u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
677 	u32 tx_dropped = 0;
678 	unsigned int start;
679 	int i;
680 
681 	for_each_possible_cpu(i) {
682 		p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
683 		do {
684 			start = u64_stats_fetch_begin_irq(&p->syncp);
685 			rx_packets	= p->rx_packets;
686 			rx_bytes	= p->rx_bytes;
687 			tx_packets	= p->tx_packets;
688 			tx_bytes	= p->tx_bytes;
689 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
690 
691 		stats->rx_packets	+= rx_packets;
692 		stats->rx_bytes		+= rx_bytes;
693 		stats->tx_packets	+= tx_packets;
694 		stats->tx_bytes		+= tx_bytes;
695 		/* tx_dropped is u32, updated without syncp protection. */
696 		tx_dropped	+= p->tx_dropped;
697 	}
698 	stats->tx_dropped	= tx_dropped;
699 	return 0;
700 }
701 
702 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
703 {
704 	switch (attr_id) {
705 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
706 		return true;
707 	}
708 
709 	return false;
710 }
711 
712 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
713 					   void *sp)
714 {
715 	switch (attr_id) {
716 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
717 		return mlxsw_sp_port_get_sw_stats64(dev, sp);
718 	}
719 
720 	return -EINVAL;
721 }
722 
723 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
724 				int prio, char *ppcnt_pl)
725 {
726 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
727 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
728 
729 	mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
730 	return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
731 }
732 
733 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
734 				      struct rtnl_link_stats64 *stats)
735 {
736 	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
737 	int err;
738 
739 	err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
740 					  0, ppcnt_pl);
741 	if (err)
742 		goto out;
743 
744 	stats->tx_packets =
745 		mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
746 	stats->rx_packets =
747 		mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
748 	stats->tx_bytes =
749 		mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
750 	stats->rx_bytes =
751 		mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
752 	stats->multicast =
753 		mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
754 
755 	stats->rx_crc_errors =
756 		mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
757 	stats->rx_frame_errors =
758 		mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
759 
760 	stats->rx_length_errors = (
761 		mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
762 		mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
763 		mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
764 
765 	stats->rx_errors = (stats->rx_crc_errors +
766 		stats->rx_frame_errors + stats->rx_length_errors);
767 
768 out:
769 	return err;
770 }
771 
772 static void
773 mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
774 			    struct mlxsw_sp_port_xstats *xstats)
775 {
776 	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
777 	int err, i;
778 
779 	err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
780 					  ppcnt_pl);
781 	if (!err)
782 		xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
783 
784 	for (i = 0; i < TC_MAX_QUEUE; i++) {
785 		err = mlxsw_sp_port_get_stats_raw(dev,
786 						  MLXSW_REG_PPCNT_TC_CONG_TC,
787 						  i, ppcnt_pl);
788 		if (!err)
789 			xstats->wred_drop[i] =
790 				mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
791 
792 		err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
793 						  i, ppcnt_pl);
794 		if (err)
795 			continue;
796 
797 		xstats->backlog[i] =
798 			mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
799 		xstats->tail_drop[i] =
800 			mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
801 	}
802 
803 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
804 		err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
805 						  i, ppcnt_pl);
806 		if (err)
807 			continue;
808 
809 		xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
810 		xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
811 	}
812 }
813 
814 static void update_stats_cache(struct work_struct *work)
815 {
816 	struct mlxsw_sp_port *mlxsw_sp_port =
817 		container_of(work, struct mlxsw_sp_port,
818 			     periodic_hw_stats.update_dw.work);
819 
820 	if (!netif_carrier_ok(mlxsw_sp_port->dev))
821 		/* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
822 		 * necessary when port goes down.
823 		 */
824 		goto out;
825 
826 	mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
827 				   &mlxsw_sp_port->periodic_hw_stats.stats);
828 	mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
829 				    &mlxsw_sp_port->periodic_hw_stats.xstats);
830 
831 out:
832 	mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
833 			       MLXSW_HW_STATS_UPDATE_TIME);
834 }
835 
836 /* Return the stats from a cache that is updated periodically,
837  * as this function might get called in an atomic context.
838  */
839 static void
840 mlxsw_sp_port_get_stats64(struct net_device *dev,
841 			  struct rtnl_link_stats64 *stats)
842 {
843 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
844 
845 	memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
846 }
847 
848 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
849 				    u16 vid_begin, u16 vid_end,
850 				    bool is_member, bool untagged)
851 {
852 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
853 	char *spvm_pl;
854 	int err;
855 
856 	spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
857 	if (!spvm_pl)
858 		return -ENOMEM;
859 
860 	mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port,	vid_begin,
861 			    vid_end, is_member, untagged);
862 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
863 	kfree(spvm_pl);
864 	return err;
865 }
866 
867 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
868 			   u16 vid_end, bool is_member, bool untagged)
869 {
870 	u16 vid, vid_e;
871 	int err;
872 
873 	for (vid = vid_begin; vid <= vid_end;
874 	     vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
875 		vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
876 			    vid_end);
877 
878 		err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
879 					       is_member, untagged);
880 		if (err)
881 			return err;
882 	}
883 
884 	return 0;
885 }
886 
887 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
888 				     bool flush_default)
889 {
890 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
891 
892 	list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
893 				 &mlxsw_sp_port->vlans_list, list) {
894 		if (!flush_default &&
895 		    mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
896 			continue;
897 		mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
898 	}
899 }
900 
901 static void
902 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
903 {
904 	if (mlxsw_sp_port_vlan->bridge_port)
905 		mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
906 	else if (mlxsw_sp_port_vlan->fid)
907 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
908 }
909 
910 struct mlxsw_sp_port_vlan *
911 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
912 {
913 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
914 	bool untagged = vid == MLXSW_SP_DEFAULT_VID;
915 	int err;
916 
917 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
918 	if (mlxsw_sp_port_vlan)
919 		return ERR_PTR(-EEXIST);
920 
921 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
922 	if (err)
923 		return ERR_PTR(err);
924 
925 	mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
926 	if (!mlxsw_sp_port_vlan) {
927 		err = -ENOMEM;
928 		goto err_port_vlan_alloc;
929 	}
930 
931 	mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
932 	mlxsw_sp_port_vlan->vid = vid;
933 	list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
934 
935 	return mlxsw_sp_port_vlan;
936 
937 err_port_vlan_alloc:
938 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
939 	return ERR_PTR(err);
940 }
941 
942 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
943 {
944 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
945 	u16 vid = mlxsw_sp_port_vlan->vid;
946 
947 	mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
948 	list_del(&mlxsw_sp_port_vlan->list);
949 	kfree(mlxsw_sp_port_vlan);
950 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
951 }
952 
953 static int mlxsw_sp_port_add_vid(struct net_device *dev,
954 				 __be16 __always_unused proto, u16 vid)
955 {
956 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
957 
958 	/* VLAN 0 is added to HW filter when device goes up, but it is
959 	 * reserved in our case, so simply return.
960 	 */
961 	if (!vid)
962 		return 0;
963 
964 	return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
965 }
966 
967 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
968 				  __be16 __always_unused proto, u16 vid)
969 {
970 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
971 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
972 
973 	/* VLAN 0 is removed from HW filter when device goes down, but
974 	 * it is reserved in our case, so simply return.
975 	 */
976 	if (!vid)
977 		return 0;
978 
979 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
980 	if (!mlxsw_sp_port_vlan)
981 		return 0;
982 	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
983 
984 	return 0;
985 }
986 
987 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
988 				   struct flow_block_offload *f)
989 {
990 	switch (f->binder_type) {
991 	case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
992 		return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true);
993 	case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
994 		return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
995 	case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
996 		return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
997 	default:
998 		return -EOPNOTSUPP;
999 	}
1000 }
1001 
1002 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1003 			     void *type_data)
1004 {
1005 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1006 
1007 	switch (type) {
1008 	case TC_SETUP_BLOCK:
1009 		return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
1010 	case TC_SETUP_QDISC_RED:
1011 		return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
1012 	case TC_SETUP_QDISC_PRIO:
1013 		return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
1014 	case TC_SETUP_QDISC_ETS:
1015 		return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
1016 	case TC_SETUP_QDISC_TBF:
1017 		return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
1018 	case TC_SETUP_QDISC_FIFO:
1019 		return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
1020 	default:
1021 		return -EOPNOTSUPP;
1022 	}
1023 }
1024 
1025 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
1026 {
1027 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1028 
1029 	if (!enable) {
1030 		if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
1031 		    mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
1032 			netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1033 			return -EINVAL;
1034 		}
1035 		mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
1036 		mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
1037 	} else {
1038 		mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
1039 		mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
1040 	}
1041 	return 0;
1042 }
1043 
1044 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
1045 {
1046 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1047 	char pplr_pl[MLXSW_REG_PPLR_LEN];
1048 	int err;
1049 
1050 	if (netif_running(dev))
1051 		mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1052 
1053 	mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
1054 	err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
1055 			      pplr_pl);
1056 
1057 	if (netif_running(dev))
1058 		mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1059 
1060 	return err;
1061 }
1062 
1063 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1064 
1065 static int mlxsw_sp_handle_feature(struct net_device *dev,
1066 				   netdev_features_t wanted_features,
1067 				   netdev_features_t feature,
1068 				   mlxsw_sp_feature_handler feature_handler)
1069 {
1070 	netdev_features_t changes = wanted_features ^ dev->features;
1071 	bool enable = !!(wanted_features & feature);
1072 	int err;
1073 
1074 	if (!(changes & feature))
1075 		return 0;
1076 
1077 	err = feature_handler(dev, enable);
1078 	if (err) {
1079 		netdev_err(dev, "%s feature %pNF failed, err %d\n",
1080 			   enable ? "Enable" : "Disable", &feature, err);
1081 		return err;
1082 	}
1083 
1084 	if (enable)
1085 		dev->features |= feature;
1086 	else
1087 		dev->features &= ~feature;
1088 
1089 	return 0;
1090 }
1091 static int mlxsw_sp_set_features(struct net_device *dev,
1092 				 netdev_features_t features)
1093 {
1094 	netdev_features_t oper_features = dev->features;
1095 	int err = 0;
1096 
1097 	err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
1098 				       mlxsw_sp_feature_hw_tc);
1099 	err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
1100 				       mlxsw_sp_feature_loopback);
1101 
1102 	if (err) {
1103 		dev->features = oper_features;
1104 		return -EINVAL;
1105 	}
1106 
1107 	return 0;
1108 }
1109 
1110 static struct devlink_port *
1111 mlxsw_sp_port_get_devlink_port(struct net_device *dev)
1112 {
1113 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1114 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1115 
1116 	return mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
1117 						mlxsw_sp_port->local_port);
1118 }
1119 
1120 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1121 				      struct ifreq *ifr)
1122 {
1123 	struct hwtstamp_config config;
1124 	int err;
1125 
1126 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1127 		return -EFAULT;
1128 
1129 	err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
1130 							     &config);
1131 	if (err)
1132 		return err;
1133 
1134 	if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1135 		return -EFAULT;
1136 
1137 	return 0;
1138 }
1139 
1140 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1141 				      struct ifreq *ifr)
1142 {
1143 	struct hwtstamp_config config;
1144 	int err;
1145 
1146 	err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
1147 							     &config);
1148 	if (err)
1149 		return err;
1150 
1151 	if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1152 		return -EFAULT;
1153 
1154 	return 0;
1155 }
1156 
1157 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
1158 {
1159 	struct hwtstamp_config config = {0};
1160 
1161 	mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
1162 }
1163 
1164 static int
1165 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1166 {
1167 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1168 
1169 	switch (cmd) {
1170 	case SIOCSHWTSTAMP:
1171 		return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
1172 	case SIOCGHWTSTAMP:
1173 		return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
1174 	default:
1175 		return -EOPNOTSUPP;
1176 	}
1177 }
1178 
1179 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1180 	.ndo_open		= mlxsw_sp_port_open,
1181 	.ndo_stop		= mlxsw_sp_port_stop,
1182 	.ndo_start_xmit		= mlxsw_sp_port_xmit,
1183 	.ndo_setup_tc           = mlxsw_sp_setup_tc,
1184 	.ndo_set_rx_mode	= mlxsw_sp_set_rx_mode,
1185 	.ndo_set_mac_address	= mlxsw_sp_port_set_mac_address,
1186 	.ndo_change_mtu		= mlxsw_sp_port_change_mtu,
1187 	.ndo_get_stats64	= mlxsw_sp_port_get_stats64,
1188 	.ndo_has_offload_stats	= mlxsw_sp_port_has_offload_stats,
1189 	.ndo_get_offload_stats	= mlxsw_sp_port_get_offload_stats,
1190 	.ndo_vlan_rx_add_vid	= mlxsw_sp_port_add_vid,
1191 	.ndo_vlan_rx_kill_vid	= mlxsw_sp_port_kill_vid,
1192 	.ndo_set_features	= mlxsw_sp_set_features,
1193 	.ndo_get_devlink_port	= mlxsw_sp_port_get_devlink_port,
1194 	.ndo_do_ioctl		= mlxsw_sp_port_ioctl,
1195 };
1196 
1197 static int
1198 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
1199 {
1200 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1201 	u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
1202 	const struct mlxsw_sp_port_type_speed_ops *ops;
1203 	char ptys_pl[MLXSW_REG_PTYS_LEN];
1204 	u32 eth_proto_cap_masked;
1205 	int err;
1206 
1207 	ops = mlxsw_sp->port_type_speed_ops;
1208 
1209 	/* Set advertised speeds to speeds supported by both the driver
1210 	 * and the device.
1211 	 */
1212 	ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1213 			       0, false);
1214 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1215 	if (err)
1216 		return err;
1217 
1218 	ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
1219 				 &eth_proto_admin, &eth_proto_oper);
1220 	eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap);
1221 	ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1222 			       eth_proto_cap_masked,
1223 			       mlxsw_sp_port->link.autoneg);
1224 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1225 }
1226 
1227 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
1228 {
1229 	const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
1230 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1231 	char ptys_pl[MLXSW_REG_PTYS_LEN];
1232 	u32 eth_proto_oper;
1233 	int err;
1234 
1235 	port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
1236 	port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
1237 					       mlxsw_sp_port->local_port, 0,
1238 					       false);
1239 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1240 	if (err)
1241 		return err;
1242 	port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
1243 						 &eth_proto_oper);
1244 	*speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
1245 	return 0;
1246 }
1247 
1248 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1249 			  enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1250 			  bool dwrr, u8 dwrr_weight)
1251 {
1252 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1253 	char qeec_pl[MLXSW_REG_QEEC_LEN];
1254 
1255 	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1256 			    next_index);
1257 	mlxsw_reg_qeec_de_set(qeec_pl, true);
1258 	mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1259 	mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1260 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1261 }
1262 
1263 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1264 				  enum mlxsw_reg_qeec_hr hr, u8 index,
1265 				  u8 next_index, u32 maxrate, u8 burst_size)
1266 {
1267 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1268 	char qeec_pl[MLXSW_REG_QEEC_LEN];
1269 
1270 	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1271 			    next_index);
1272 	mlxsw_reg_qeec_mase_set(qeec_pl, true);
1273 	mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1274 	mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size);
1275 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1276 }
1277 
1278 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
1279 				    enum mlxsw_reg_qeec_hr hr, u8 index,
1280 				    u8 next_index, u32 minrate)
1281 {
1282 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1283 	char qeec_pl[MLXSW_REG_QEEC_LEN];
1284 
1285 	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1286 			    next_index);
1287 	mlxsw_reg_qeec_mise_set(qeec_pl, true);
1288 	mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
1289 
1290 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1291 }
1292 
1293 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1294 			      u8 switch_prio, u8 tclass)
1295 {
1296 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1297 	char qtct_pl[MLXSW_REG_QTCT_LEN];
1298 
1299 	mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1300 			    tclass);
1301 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1302 }
1303 
1304 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1305 {
1306 	int err, i;
1307 
1308 	/* Setup the elements hierarcy, so that each TC is linked to
1309 	 * one subgroup, which are all member in the same group.
1310 	 */
1311 	err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1312 				    MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0);
1313 	if (err)
1314 		return err;
1315 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1316 		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1317 					    MLXSW_REG_QEEC_HR_SUBGROUP, i,
1318 					    0, false, 0);
1319 		if (err)
1320 			return err;
1321 	}
1322 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1323 		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1324 					    MLXSW_REG_QEEC_HR_TC, i, i,
1325 					    false, 0);
1326 		if (err)
1327 			return err;
1328 
1329 		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1330 					    MLXSW_REG_QEEC_HR_TC,
1331 					    i + 8, i,
1332 					    true, 100);
1333 		if (err)
1334 			return err;
1335 	}
1336 
1337 	/* Make sure the max shaper is disabled in all hierarchies that support
1338 	 * it. Note that this disables ptps (PTP shaper), but that is intended
1339 	 * for the initial configuration.
1340 	 */
1341 	err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1342 					    MLXSW_REG_QEEC_HR_PORT, 0, 0,
1343 					    MLXSW_REG_QEEC_MAS_DIS, 0);
1344 	if (err)
1345 		return err;
1346 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1347 		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1348 						    MLXSW_REG_QEEC_HR_SUBGROUP,
1349 						    i, 0,
1350 						    MLXSW_REG_QEEC_MAS_DIS, 0);
1351 		if (err)
1352 			return err;
1353 	}
1354 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1355 		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1356 						    MLXSW_REG_QEEC_HR_TC,
1357 						    i, i,
1358 						    MLXSW_REG_QEEC_MAS_DIS, 0);
1359 		if (err)
1360 			return err;
1361 
1362 		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1363 						    MLXSW_REG_QEEC_HR_TC,
1364 						    i + 8, i,
1365 						    MLXSW_REG_QEEC_MAS_DIS, 0);
1366 		if (err)
1367 			return err;
1368 	}
1369 
1370 	/* Configure the min shaper for multicast TCs. */
1371 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1372 		err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
1373 					       MLXSW_REG_QEEC_HR_TC,
1374 					       i + 8, i,
1375 					       MLXSW_REG_QEEC_MIS_MIN);
1376 		if (err)
1377 			return err;
1378 	}
1379 
1380 	/* Map all priorities to traffic class 0. */
1381 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1382 		err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1383 		if (err)
1384 			return err;
1385 	}
1386 
1387 	return 0;
1388 }
1389 
1390 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
1391 					bool enable)
1392 {
1393 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1394 	char qtctm_pl[MLXSW_REG_QTCTM_LEN];
1395 
1396 	mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
1397 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
1398 }
1399 
1400 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
1401 {
1402 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1403 	u8 module = mlxsw_sp_port->mapping.module;
1404 	u64 overheat_counter;
1405 	int err;
1406 
1407 	err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, module,
1408 						    &overheat_counter);
1409 	if (err)
1410 		return err;
1411 
1412 	mlxsw_sp_port->module_overheat_initial_val = overheat_counter;
1413 	return 0;
1414 }
1415 
1416 int
1417 mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
1418 				      bool is_8021ad_tagged,
1419 				      bool is_8021q_tagged)
1420 {
1421 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1422 	char spvc_pl[MLXSW_REG_SPVC_LEN];
1423 
1424 	mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port,
1425 			    is_8021ad_tagged, is_8021q_tagged);
1426 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl);
1427 }
1428 
1429 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1430 				u8 split_base_local_port,
1431 				struct mlxsw_sp_port_mapping *port_mapping)
1432 {
1433 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1434 	bool split = !!split_base_local_port;
1435 	struct mlxsw_sp_port *mlxsw_sp_port;
1436 	u32 lanes = port_mapping->width;
1437 	struct net_device *dev;
1438 	bool splittable;
1439 	int err;
1440 
1441 	splittable = lanes > 1 && !split;
1442 	err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
1443 				   port_mapping->module + 1, split,
1444 				   port_mapping->lane / lanes,
1445 				   splittable, lanes,
1446 				   mlxsw_sp->base_mac,
1447 				   sizeof(mlxsw_sp->base_mac));
1448 	if (err) {
1449 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1450 			local_port);
1451 		return err;
1452 	}
1453 
1454 	dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1455 	if (!dev) {
1456 		err = -ENOMEM;
1457 		goto err_alloc_etherdev;
1458 	}
1459 	SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
1460 	dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
1461 	mlxsw_sp_port = netdev_priv(dev);
1462 	mlxsw_sp_port->dev = dev;
1463 	mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1464 	mlxsw_sp_port->local_port = local_port;
1465 	mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
1466 	mlxsw_sp_port->split = split;
1467 	mlxsw_sp_port->split_base_local_port = split_base_local_port;
1468 	mlxsw_sp_port->mapping = *port_mapping;
1469 	mlxsw_sp_port->link.autoneg = 1;
1470 	INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
1471 
1472 	mlxsw_sp_port->pcpu_stats =
1473 		netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1474 	if (!mlxsw_sp_port->pcpu_stats) {
1475 		err = -ENOMEM;
1476 		goto err_alloc_stats;
1477 	}
1478 
1479 	INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1480 			  &update_stats_cache);
1481 
1482 	dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1483 	dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1484 
1485 	err = mlxsw_sp_port_module_map(mlxsw_sp_port);
1486 	if (err) {
1487 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
1488 			mlxsw_sp_port->local_port);
1489 		goto err_port_module_map;
1490 	}
1491 
1492 	err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1493 	if (err) {
1494 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1495 			mlxsw_sp_port->local_port);
1496 		goto err_port_swid_set;
1497 	}
1498 
1499 	err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1500 	if (err) {
1501 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1502 			mlxsw_sp_port->local_port);
1503 		goto err_dev_addr_init;
1504 	}
1505 
1506 	netif_carrier_off(dev);
1507 
1508 	dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1509 			 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
1510 	dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
1511 
1512 	dev->min_mtu = 0;
1513 	dev->max_mtu = ETH_MAX_MTU;
1514 
1515 	/* Each packet needs to have a Tx header (metadata) on top all other
1516 	 * headers.
1517 	 */
1518 	dev->needed_headroom = MLXSW_TXHDR_LEN;
1519 
1520 	err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1521 	if (err) {
1522 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1523 			mlxsw_sp_port->local_port);
1524 		goto err_port_system_port_mapping_set;
1525 	}
1526 
1527 	err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
1528 	if (err) {
1529 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1530 			mlxsw_sp_port->local_port);
1531 		goto err_port_speed_by_width_set;
1532 	}
1533 
1534 	err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port,
1535 							    &mlxsw_sp_port->max_speed);
1536 	if (err) {
1537 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n",
1538 			mlxsw_sp_port->local_port);
1539 		goto err_max_speed_get;
1540 	}
1541 
1542 	err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu);
1543 	if (err) {
1544 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
1545 			mlxsw_sp_port->local_port);
1546 		goto err_port_max_mtu_get;
1547 	}
1548 
1549 	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1550 	if (err) {
1551 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1552 			mlxsw_sp_port->local_port);
1553 		goto err_port_mtu_set;
1554 	}
1555 
1556 	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1557 	if (err)
1558 		goto err_port_admin_status_set;
1559 
1560 	err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1561 	if (err) {
1562 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1563 			mlxsw_sp_port->local_port);
1564 		goto err_port_buffers_init;
1565 	}
1566 
1567 	err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1568 	if (err) {
1569 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1570 			mlxsw_sp_port->local_port);
1571 		goto err_port_ets_init;
1572 	}
1573 
1574 	err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
1575 	if (err) {
1576 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
1577 			mlxsw_sp_port->local_port);
1578 		goto err_port_tc_mc_mode;
1579 	}
1580 
1581 	/* ETS and buffers must be initialized before DCB. */
1582 	err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1583 	if (err) {
1584 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1585 			mlxsw_sp_port->local_port);
1586 		goto err_port_dcb_init;
1587 	}
1588 
1589 	err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
1590 	if (err) {
1591 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
1592 			mlxsw_sp_port->local_port);
1593 		goto err_port_fids_init;
1594 	}
1595 
1596 	err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
1597 	if (err) {
1598 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
1599 			mlxsw_sp_port->local_port);
1600 		goto err_port_qdiscs_init;
1601 	}
1602 
1603 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false,
1604 				     false);
1605 	if (err) {
1606 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n",
1607 			mlxsw_sp_port->local_port);
1608 		goto err_port_vlan_clear;
1609 	}
1610 
1611 	err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
1612 	if (err) {
1613 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
1614 			mlxsw_sp_port->local_port);
1615 		goto err_port_nve_init;
1616 	}
1617 
1618 	err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
1619 				     ETH_P_8021Q);
1620 	if (err) {
1621 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
1622 			mlxsw_sp_port->local_port);
1623 		goto err_port_pvid_set;
1624 	}
1625 
1626 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1627 						       MLXSW_SP_DEFAULT_VID);
1628 	if (IS_ERR(mlxsw_sp_port_vlan)) {
1629 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
1630 			mlxsw_sp_port->local_port);
1631 		err = PTR_ERR(mlxsw_sp_port_vlan);
1632 		goto err_port_vlan_create;
1633 	}
1634 	mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
1635 
1636 	/* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat
1637 	 * only packets with 802.1q header as tagged packets.
1638 	 */
1639 	err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
1640 	if (err) {
1641 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n",
1642 			local_port);
1643 		goto err_port_vlan_classification_set;
1644 	}
1645 
1646 	INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
1647 			  mlxsw_sp->ptp_ops->shaper_work);
1648 
1649 	mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1650 
1651 	err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port);
1652 	if (err) {
1653 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n",
1654 			mlxsw_sp_port->local_port);
1655 		goto err_port_overheat_init_val_set;
1656 	}
1657 
1658 	err = register_netdev(dev);
1659 	if (err) {
1660 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1661 			mlxsw_sp_port->local_port);
1662 		goto err_register_netdev;
1663 	}
1664 
1665 	mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
1666 				mlxsw_sp_port, dev);
1667 	mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
1668 	return 0;
1669 
1670 err_register_netdev:
1671 err_port_overheat_init_val_set:
1672 	mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1673 err_port_vlan_classification_set:
1674 	mlxsw_sp->ports[local_port] = NULL;
1675 	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1676 err_port_vlan_create:
1677 err_port_pvid_set:
1678 	mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1679 err_port_nve_init:
1680 err_port_vlan_clear:
1681 	mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1682 err_port_qdiscs_init:
1683 	mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1684 err_port_fids_init:
1685 	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1686 err_port_dcb_init:
1687 	mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1688 err_port_tc_mc_mode:
1689 err_port_ets_init:
1690 	mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1691 err_port_buffers_init:
1692 err_port_admin_status_set:
1693 err_port_mtu_set:
1694 err_port_max_mtu_get:
1695 err_max_speed_get:
1696 err_port_speed_by_width_set:
1697 err_port_system_port_mapping_set:
1698 err_dev_addr_init:
1699 	mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1700 err_port_swid_set:
1701 	mlxsw_sp_port_module_unmap(mlxsw_sp_port);
1702 err_port_module_map:
1703 	free_percpu(mlxsw_sp_port->pcpu_stats);
1704 err_alloc_stats:
1705 	free_netdev(dev);
1706 err_alloc_etherdev:
1707 	mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1708 	return err;
1709 }
1710 
1711 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1712 {
1713 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1714 
1715 	cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
1716 	cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
1717 	mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
1718 	mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
1719 	unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1720 	mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1721 	mlxsw_sp->ports[local_port] = NULL;
1722 	mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
1723 	mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1724 	mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1725 	mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1726 	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1727 	mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1728 	mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1729 	mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1730 	mlxsw_sp_port_module_unmap(mlxsw_sp_port);
1731 	free_percpu(mlxsw_sp_port->pcpu_stats);
1732 	WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
1733 	free_netdev(mlxsw_sp_port->dev);
1734 	mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1735 }
1736 
1737 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
1738 {
1739 	struct mlxsw_sp_port *mlxsw_sp_port;
1740 	int err;
1741 
1742 	mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL);
1743 	if (!mlxsw_sp_port)
1744 		return -ENOMEM;
1745 
1746 	mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1747 	mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT;
1748 
1749 	err = mlxsw_core_cpu_port_init(mlxsw_sp->core,
1750 				       mlxsw_sp_port,
1751 				       mlxsw_sp->base_mac,
1752 				       sizeof(mlxsw_sp->base_mac));
1753 	if (err) {
1754 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n");
1755 		goto err_core_cpu_port_init;
1756 	}
1757 
1758 	mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port;
1759 	return 0;
1760 
1761 err_core_cpu_port_init:
1762 	kfree(mlxsw_sp_port);
1763 	return err;
1764 }
1765 
1766 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
1767 {
1768 	struct mlxsw_sp_port *mlxsw_sp_port =
1769 				mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
1770 
1771 	mlxsw_core_cpu_port_fini(mlxsw_sp->core);
1772 	mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL;
1773 	kfree(mlxsw_sp_port);
1774 }
1775 
1776 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1777 {
1778 	return mlxsw_sp->ports[local_port] != NULL;
1779 }
1780 
1781 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1782 {
1783 	int i;
1784 
1785 	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
1786 		if (mlxsw_sp_port_created(mlxsw_sp, i))
1787 			mlxsw_sp_port_remove(mlxsw_sp, i);
1788 	mlxsw_sp_cpu_port_remove(mlxsw_sp);
1789 	kfree(mlxsw_sp->ports);
1790 	mlxsw_sp->ports = NULL;
1791 }
1792 
1793 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1794 {
1795 	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1796 	struct mlxsw_sp_port_mapping *port_mapping;
1797 	size_t alloc_size;
1798 	int i;
1799 	int err;
1800 
1801 	alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
1802 	mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1803 	if (!mlxsw_sp->ports)
1804 		return -ENOMEM;
1805 
1806 	err = mlxsw_sp_cpu_port_create(mlxsw_sp);
1807 	if (err)
1808 		goto err_cpu_port_create;
1809 
1810 	for (i = 1; i < max_ports; i++) {
1811 		port_mapping = mlxsw_sp->port_mapping[i];
1812 		if (!port_mapping)
1813 			continue;
1814 		err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping);
1815 		if (err)
1816 			goto err_port_create;
1817 	}
1818 	return 0;
1819 
1820 err_port_create:
1821 	for (i--; i >= 1; i--)
1822 		if (mlxsw_sp_port_created(mlxsw_sp, i))
1823 			mlxsw_sp_port_remove(mlxsw_sp, i);
1824 	mlxsw_sp_cpu_port_remove(mlxsw_sp);
1825 err_cpu_port_create:
1826 	kfree(mlxsw_sp->ports);
1827 	mlxsw_sp->ports = NULL;
1828 	return err;
1829 }
1830 
1831 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
1832 {
1833 	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1834 	struct mlxsw_sp_port_mapping port_mapping;
1835 	int i;
1836 	int err;
1837 
1838 	mlxsw_sp->port_mapping = kcalloc(max_ports,
1839 					 sizeof(struct mlxsw_sp_port_mapping *),
1840 					 GFP_KERNEL);
1841 	if (!mlxsw_sp->port_mapping)
1842 		return -ENOMEM;
1843 
1844 	for (i = 1; i < max_ports; i++) {
1845 		if (mlxsw_core_port_is_xm(mlxsw_sp->core, i))
1846 			continue;
1847 
1848 		err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping);
1849 		if (err)
1850 			goto err_port_module_info_get;
1851 		if (!port_mapping.width)
1852 			continue;
1853 
1854 		mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping,
1855 						    sizeof(port_mapping),
1856 						    GFP_KERNEL);
1857 		if (!mlxsw_sp->port_mapping[i]) {
1858 			err = -ENOMEM;
1859 			goto err_port_module_info_dup;
1860 		}
1861 	}
1862 	return 0;
1863 
1864 err_port_module_info_get:
1865 err_port_module_info_dup:
1866 	for (i--; i >= 1; i--)
1867 		kfree(mlxsw_sp->port_mapping[i]);
1868 	kfree(mlxsw_sp->port_mapping);
1869 	return err;
1870 }
1871 
1872 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
1873 {
1874 	int i;
1875 
1876 	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
1877 		kfree(mlxsw_sp->port_mapping[i]);
1878 	kfree(mlxsw_sp->port_mapping);
1879 }
1880 
1881 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width)
1882 {
1883 	u8 offset = (local_port - 1) % max_width;
1884 
1885 	return local_port - offset;
1886 }
1887 
1888 static int
1889 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
1890 			   struct mlxsw_sp_port_mapping *port_mapping,
1891 			   unsigned int count, u8 offset)
1892 {
1893 	struct mlxsw_sp_port_mapping split_port_mapping;
1894 	int err, i;
1895 
1896 	split_port_mapping = *port_mapping;
1897 	split_port_mapping.width /= count;
1898 	for (i = 0; i < count; i++) {
1899 		err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset,
1900 					   base_port, &split_port_mapping);
1901 		if (err)
1902 			goto err_port_create;
1903 		split_port_mapping.lane += split_port_mapping.width;
1904 	}
1905 
1906 	return 0;
1907 
1908 err_port_create:
1909 	for (i--; i >= 0; i--)
1910 		if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
1911 			mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
1912 	return err;
1913 }
1914 
1915 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
1916 					 u8 base_port,
1917 					 unsigned int count, u8 offset)
1918 {
1919 	struct mlxsw_sp_port_mapping *port_mapping;
1920 	int i;
1921 
1922 	/* Go over original unsplit ports in the gap and recreate them. */
1923 	for (i = 0; i < count * offset; i++) {
1924 		port_mapping = mlxsw_sp->port_mapping[base_port + i];
1925 		if (!port_mapping)
1926 			continue;
1927 		mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping);
1928 	}
1929 }
1930 
1931 static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core,
1932 				       unsigned int count,
1933 				       unsigned int max_width)
1934 {
1935 	enum mlxsw_res_id local_ports_in_x_res_id;
1936 	int split_width = max_width / count;
1937 
1938 	if (split_width == 1)
1939 		local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X;
1940 	else if (split_width == 2)
1941 		local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X;
1942 	else if (split_width == 4)
1943 		local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X;
1944 	else
1945 		return -EINVAL;
1946 
1947 	if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id))
1948 		return -EINVAL;
1949 	return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id);
1950 }
1951 
1952 static struct mlxsw_sp_port *
1953 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1954 {
1955 	if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
1956 		return mlxsw_sp->ports[local_port];
1957 	return NULL;
1958 }
1959 
1960 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
1961 			       unsigned int count,
1962 			       struct netlink_ext_ack *extack)
1963 {
1964 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1965 	struct mlxsw_sp_port_mapping port_mapping;
1966 	struct mlxsw_sp_port *mlxsw_sp_port;
1967 	int max_width;
1968 	u8 base_port;
1969 	int offset;
1970 	int i;
1971 	int err;
1972 
1973 	mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
1974 	if (!mlxsw_sp_port) {
1975 		dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
1976 			local_port);
1977 		NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
1978 		return -EINVAL;
1979 	}
1980 
1981 	max_width = mlxsw_core_module_max_width(mlxsw_core,
1982 						mlxsw_sp_port->mapping.module);
1983 	if (max_width < 0) {
1984 		netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
1985 		NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
1986 		return max_width;
1987 	}
1988 
1989 	/* Split port with non-max cannot be split. */
1990 	if (mlxsw_sp_port->mapping.width != max_width) {
1991 		netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n");
1992 		NL_SET_ERR_MSG_MOD(extack, "Port cannot be split");
1993 		return -EINVAL;
1994 	}
1995 
1996 	offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
1997 	if (offset < 0) {
1998 		netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
1999 		NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
2000 		return -EINVAL;
2001 	}
2002 
2003 	/* Only in case max split is being done, the local port and
2004 	 * base port may differ.
2005 	 */
2006 	base_port = count == max_width ?
2007 		    mlxsw_sp_cluster_base_port_get(local_port, max_width) :
2008 		    local_port;
2009 
2010 	for (i = 0; i < count * offset; i++) {
2011 		/* Expect base port to exist and also the one in the middle in
2012 		 * case of maximal split count.
2013 		 */
2014 		if (i == 0 || (count == max_width && i == count / 2))
2015 			continue;
2016 
2017 		if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) {
2018 			netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2019 			NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
2020 			return -EINVAL;
2021 		}
2022 	}
2023 
2024 	port_mapping = mlxsw_sp_port->mapping;
2025 
2026 	for (i = 0; i < count; i++)
2027 		if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
2028 			mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
2029 
2030 	err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping,
2031 					 count, offset);
2032 	if (err) {
2033 		dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2034 		goto err_port_split_create;
2035 	}
2036 
2037 	return 0;
2038 
2039 err_port_split_create:
2040 	mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
2041 	return err;
2042 }
2043 
2044 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
2045 				 struct netlink_ext_ack *extack)
2046 {
2047 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2048 	struct mlxsw_sp_port *mlxsw_sp_port;
2049 	unsigned int count;
2050 	int max_width;
2051 	u8 base_port;
2052 	int offset;
2053 	int i;
2054 
2055 	mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2056 	if (!mlxsw_sp_port) {
2057 		dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2058 			local_port);
2059 		NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2060 		return -EINVAL;
2061 	}
2062 
2063 	if (!mlxsw_sp_port->split) {
2064 		netdev_err(mlxsw_sp_port->dev, "Port was not split\n");
2065 		NL_SET_ERR_MSG_MOD(extack, "Port was not split");
2066 		return -EINVAL;
2067 	}
2068 
2069 	max_width = mlxsw_core_module_max_width(mlxsw_core,
2070 						mlxsw_sp_port->mapping.module);
2071 	if (max_width < 0) {
2072 		netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
2073 		NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
2074 		return max_width;
2075 	}
2076 
2077 	count = max_width / mlxsw_sp_port->mapping.width;
2078 
2079 	offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
2080 	if (WARN_ON(offset < 0)) {
2081 		netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
2082 		NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
2083 		return -EINVAL;
2084 	}
2085 
2086 	base_port = mlxsw_sp_port->split_base_local_port;
2087 
2088 	for (i = 0; i < count; i++)
2089 		if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
2090 			mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
2091 
2092 	mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
2093 
2094 	return 0;
2095 }
2096 
2097 static void
2098 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
2099 {
2100 	int i;
2101 
2102 	for (i = 0; i < TC_MAX_QUEUE; i++)
2103 		mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
2104 }
2105 
2106 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2107 				     char *pude_pl, void *priv)
2108 {
2109 	struct mlxsw_sp *mlxsw_sp = priv;
2110 	struct mlxsw_sp_port *mlxsw_sp_port;
2111 	enum mlxsw_reg_pude_oper_status status;
2112 	u8 local_port;
2113 
2114 	local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2115 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2116 	if (!mlxsw_sp_port)
2117 		return;
2118 
2119 	status = mlxsw_reg_pude_oper_status_get(pude_pl);
2120 	if (status == MLXSW_PORT_OPER_STATUS_UP) {
2121 		netdev_info(mlxsw_sp_port->dev, "link up\n");
2122 		netif_carrier_on(mlxsw_sp_port->dev);
2123 		mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
2124 	} else {
2125 		netdev_info(mlxsw_sp_port->dev, "link down\n");
2126 		netif_carrier_off(mlxsw_sp_port->dev);
2127 		mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
2128 	}
2129 }
2130 
2131 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
2132 					  char *mtpptr_pl, bool ingress)
2133 {
2134 	u8 local_port;
2135 	u8 num_rec;
2136 	int i;
2137 
2138 	local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl);
2139 	num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl);
2140 	for (i = 0; i < num_rec; i++) {
2141 		u8 domain_number;
2142 		u8 message_type;
2143 		u16 sequence_id;
2144 		u64 timestamp;
2145 
2146 		mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type,
2147 					&domain_number, &sequence_id,
2148 					&timestamp);
2149 		mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port,
2150 					    message_type, domain_number,
2151 					    sequence_id, timestamp);
2152 	}
2153 }
2154 
2155 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg,
2156 					      char *mtpptr_pl, void *priv)
2157 {
2158 	struct mlxsw_sp *mlxsw_sp = priv;
2159 
2160 	mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true);
2161 }
2162 
2163 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
2164 					      char *mtpptr_pl, void *priv)
2165 {
2166 	struct mlxsw_sp *mlxsw_sp = priv;
2167 
2168 	mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false);
2169 }
2170 
2171 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2172 				       u8 local_port, void *priv)
2173 {
2174 	struct mlxsw_sp *mlxsw_sp = priv;
2175 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2176 	struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2177 
2178 	if (unlikely(!mlxsw_sp_port)) {
2179 		dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2180 				     local_port);
2181 		return;
2182 	}
2183 
2184 	skb->dev = mlxsw_sp_port->dev;
2185 
2186 	pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2187 	u64_stats_update_begin(&pcpu_stats->syncp);
2188 	pcpu_stats->rx_packets++;
2189 	pcpu_stats->rx_bytes += skb->len;
2190 	u64_stats_update_end(&pcpu_stats->syncp);
2191 
2192 	skb->protocol = eth_type_trans(skb, skb->dev);
2193 	netif_receive_skb(skb);
2194 }
2195 
2196 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
2197 					   void *priv)
2198 {
2199 	skb->offload_fwd_mark = 1;
2200 	return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2201 }
2202 
2203 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
2204 					      u8 local_port, void *priv)
2205 {
2206 	skb->offload_l3_fwd_mark = 1;
2207 	skb->offload_fwd_mark = 1;
2208 	return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2209 }
2210 
2211 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2212 			  u8 local_port)
2213 {
2214 	mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
2215 }
2216 
2217 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl)	\
2218 	MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action,	\
2219 		  _is_ctrl, SP_##_trap_group, DISCARD)
2220 
2221 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl)	\
2222 	MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action,	\
2223 		_is_ctrl, SP_##_trap_group, DISCARD)
2224 
2225 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl)	\
2226 	MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action,	\
2227 		_is_ctrl, SP_##_trap_group, DISCARD)
2228 
2229 #define MLXSW_SP_EVENTL(_func, _trap_id)		\
2230 	MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2231 
2232 static const struct mlxsw_listener mlxsw_sp_listener[] = {
2233 	/* Events */
2234 	MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2235 	/* L2 traps */
2236 	MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false),
2237 	/* L3 traps */
2238 	MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
2239 			  false),
2240 	MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
2241 	MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
2242 			  false),
2243 	MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
2244 			     ROUTER_EXP, false),
2245 	MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
2246 			     ROUTER_EXP, false),
2247 	MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
2248 			     ROUTER_EXP, false),
2249 	MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
2250 			     ROUTER_EXP, false),
2251 	/* Multicast Router Traps */
2252 	MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
2253 	MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
2254 	/* NVE traps */
2255 	MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
2256 };
2257 
2258 static const struct mlxsw_listener mlxsw_sp1_listener[] = {
2259 	/* Events */
2260 	MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0),
2261 	MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
2262 };
2263 
2264 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2265 {
2266 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2267 	char qpcr_pl[MLXSW_REG_QPCR_LEN];
2268 	enum mlxsw_reg_qpcr_ir_units ir_units;
2269 	int max_cpu_policers;
2270 	bool is_bytes;
2271 	u8 burst_size;
2272 	u32 rate;
2273 	int i, err;
2274 
2275 	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
2276 		return -EIO;
2277 
2278 	max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2279 
2280 	ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
2281 	for (i = 0; i < max_cpu_policers; i++) {
2282 		is_bytes = false;
2283 		switch (i) {
2284 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2285 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2286 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2287 			rate = 1024;
2288 			burst_size = 7;
2289 			break;
2290 		default:
2291 			continue;
2292 		}
2293 
2294 		__set_bit(i, mlxsw_sp->trap->policers_usage);
2295 		mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
2296 				    burst_size);
2297 		err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
2298 		if (err)
2299 			return err;
2300 	}
2301 
2302 	return 0;
2303 }
2304 
2305 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
2306 {
2307 	char htgt_pl[MLXSW_REG_HTGT_LEN];
2308 	enum mlxsw_reg_htgt_trap_group i;
2309 	int max_cpu_policers;
2310 	int max_trap_groups;
2311 	u8 priority, tc;
2312 	u16 policer_id;
2313 	int err;
2314 
2315 	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
2316 		return -EIO;
2317 
2318 	max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
2319 	max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2320 
2321 	for (i = 0; i < max_trap_groups; i++) {
2322 		policer_id = i;
2323 		switch (i) {
2324 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2325 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2326 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2327 			priority = 1;
2328 			tc = 1;
2329 			break;
2330 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
2331 			priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
2332 			tc = MLXSW_REG_HTGT_DEFAULT_TC;
2333 			policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
2334 			break;
2335 		default:
2336 			continue;
2337 		}
2338 
2339 		if (max_cpu_policers <= policer_id &&
2340 		    policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
2341 			return -EIO;
2342 
2343 		mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
2344 		err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2345 		if (err)
2346 			return err;
2347 	}
2348 
2349 	return 0;
2350 }
2351 
2352 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp,
2353 				   const struct mlxsw_listener listeners[],
2354 				   size_t listeners_count)
2355 {
2356 	int i;
2357 	int err;
2358 
2359 	for (i = 0; i < listeners_count; i++) {
2360 		err = mlxsw_core_trap_register(mlxsw_sp->core,
2361 					       &listeners[i],
2362 					       mlxsw_sp);
2363 		if (err)
2364 			goto err_listener_register;
2365 
2366 	}
2367 	return 0;
2368 
2369 err_listener_register:
2370 	for (i--; i >= 0; i--) {
2371 		mlxsw_core_trap_unregister(mlxsw_sp->core,
2372 					   &listeners[i],
2373 					   mlxsw_sp);
2374 	}
2375 	return err;
2376 }
2377 
2378 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp,
2379 				      const struct mlxsw_listener listeners[],
2380 				      size_t listeners_count)
2381 {
2382 	int i;
2383 
2384 	for (i = 0; i < listeners_count; i++) {
2385 		mlxsw_core_trap_unregister(mlxsw_sp->core,
2386 					   &listeners[i],
2387 					   mlxsw_sp);
2388 	}
2389 }
2390 
2391 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2392 {
2393 	struct mlxsw_sp_trap *trap;
2394 	u64 max_policers;
2395 	int err;
2396 
2397 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
2398 		return -EIO;
2399 	max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
2400 	trap = kzalloc(struct_size(trap, policers_usage,
2401 				   BITS_TO_LONGS(max_policers)), GFP_KERNEL);
2402 	if (!trap)
2403 		return -ENOMEM;
2404 	trap->max_policers = max_policers;
2405 	mlxsw_sp->trap = trap;
2406 
2407 	err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
2408 	if (err)
2409 		goto err_cpu_policers_set;
2410 
2411 	err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
2412 	if (err)
2413 		goto err_trap_groups_set;
2414 
2415 	err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener,
2416 				      ARRAY_SIZE(mlxsw_sp_listener));
2417 	if (err)
2418 		goto err_traps_register;
2419 
2420 	err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners,
2421 				      mlxsw_sp->listeners_count);
2422 	if (err)
2423 		goto err_extra_traps_init;
2424 
2425 	return 0;
2426 
2427 err_extra_traps_init:
2428 	mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
2429 				  ARRAY_SIZE(mlxsw_sp_listener));
2430 err_traps_register:
2431 err_trap_groups_set:
2432 err_cpu_policers_set:
2433 	kfree(trap);
2434 	return err;
2435 }
2436 
2437 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2438 {
2439 	mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners,
2440 				  mlxsw_sp->listeners_count);
2441 	mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
2442 				  ARRAY_SIZE(mlxsw_sp_listener));
2443 	kfree(mlxsw_sp->trap);
2444 }
2445 
2446 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
2447 
2448 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2449 {
2450 	char slcr_pl[MLXSW_REG_SLCR_LEN];
2451 	u32 seed;
2452 	int err;
2453 
2454 	seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac),
2455 		     MLXSW_SP_LAG_SEED_INIT);
2456 	mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2457 				     MLXSW_REG_SLCR_LAG_HASH_DMAC |
2458 				     MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2459 				     MLXSW_REG_SLCR_LAG_HASH_VLANID |
2460 				     MLXSW_REG_SLCR_LAG_HASH_SIP |
2461 				     MLXSW_REG_SLCR_LAG_HASH_DIP |
2462 				     MLXSW_REG_SLCR_LAG_HASH_SPORT |
2463 				     MLXSW_REG_SLCR_LAG_HASH_DPORT |
2464 				     MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
2465 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2466 	if (err)
2467 		return err;
2468 
2469 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
2470 	    !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
2471 		return -EIO;
2472 
2473 	mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
2474 				 sizeof(struct mlxsw_sp_upper),
2475 				 GFP_KERNEL);
2476 	if (!mlxsw_sp->lags)
2477 		return -ENOMEM;
2478 
2479 	return 0;
2480 }
2481 
2482 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
2483 {
2484 	kfree(mlxsw_sp->lags);
2485 }
2486 
2487 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
2488 {
2489 	char htgt_pl[MLXSW_REG_HTGT_LEN];
2490 	int err;
2491 
2492 	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
2493 			    MLXSW_REG_HTGT_INVALID_POLICER,
2494 			    MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2495 			    MLXSW_REG_HTGT_DEFAULT_TC);
2496 	err =  mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2497 	if (err)
2498 		return err;
2499 
2500 	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MFDE,
2501 			    MLXSW_REG_HTGT_INVALID_POLICER,
2502 			    MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2503 			    MLXSW_REG_HTGT_DEFAULT_TC);
2504 	err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2505 	if (err)
2506 		return err;
2507 
2508 	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MTWE,
2509 			    MLXSW_REG_HTGT_INVALID_POLICER,
2510 			    MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2511 			    MLXSW_REG_HTGT_DEFAULT_TC);
2512 	err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2513 	if (err)
2514 		return err;
2515 
2516 	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_PMPE,
2517 			    MLXSW_REG_HTGT_INVALID_POLICER,
2518 			    MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2519 			    MLXSW_REG_HTGT_DEFAULT_TC);
2520 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2521 }
2522 
2523 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
2524 	.clock_init	= mlxsw_sp1_ptp_clock_init,
2525 	.clock_fini	= mlxsw_sp1_ptp_clock_fini,
2526 	.init		= mlxsw_sp1_ptp_init,
2527 	.fini		= mlxsw_sp1_ptp_fini,
2528 	.receive	= mlxsw_sp1_ptp_receive,
2529 	.transmitted	= mlxsw_sp1_ptp_transmitted,
2530 	.hwtstamp_get	= mlxsw_sp1_ptp_hwtstamp_get,
2531 	.hwtstamp_set	= mlxsw_sp1_ptp_hwtstamp_set,
2532 	.shaper_work	= mlxsw_sp1_ptp_shaper_work,
2533 	.get_ts_info	= mlxsw_sp1_ptp_get_ts_info,
2534 	.get_stats_count = mlxsw_sp1_get_stats_count,
2535 	.get_stats_strings = mlxsw_sp1_get_stats_strings,
2536 	.get_stats	= mlxsw_sp1_get_stats,
2537 };
2538 
2539 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
2540 	.clock_init	= mlxsw_sp2_ptp_clock_init,
2541 	.clock_fini	= mlxsw_sp2_ptp_clock_fini,
2542 	.init		= mlxsw_sp2_ptp_init,
2543 	.fini		= mlxsw_sp2_ptp_fini,
2544 	.receive	= mlxsw_sp2_ptp_receive,
2545 	.transmitted	= mlxsw_sp2_ptp_transmitted,
2546 	.hwtstamp_get	= mlxsw_sp2_ptp_hwtstamp_get,
2547 	.hwtstamp_set	= mlxsw_sp2_ptp_hwtstamp_set,
2548 	.shaper_work	= mlxsw_sp2_ptp_shaper_work,
2549 	.get_ts_info	= mlxsw_sp2_ptp_get_ts_info,
2550 	.get_stats_count = mlxsw_sp2_get_stats_count,
2551 	.get_stats_strings = mlxsw_sp2_get_stats_strings,
2552 	.get_stats	= mlxsw_sp2_get_stats,
2553 };
2554 
2555 struct mlxsw_sp_sample_trigger_node {
2556 	struct mlxsw_sp_sample_trigger trigger;
2557 	struct mlxsw_sp_sample_params params;
2558 	struct rhash_head ht_node;
2559 	struct rcu_head rcu;
2560 	refcount_t refcount;
2561 };
2562 
2563 static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = {
2564 	.key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger),
2565 	.head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node),
2566 	.key_len = sizeof(struct mlxsw_sp_sample_trigger),
2567 	.automatic_shrinking = true,
2568 };
2569 
2570 static void
2571 mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key,
2572 				 const struct mlxsw_sp_sample_trigger *trigger)
2573 {
2574 	memset(key, 0, sizeof(*key));
2575 	key->type = trigger->type;
2576 	key->local_port = trigger->local_port;
2577 }
2578 
2579 /* RCU read lock must be held */
2580 struct mlxsw_sp_sample_params *
2581 mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp,
2582 				      const struct mlxsw_sp_sample_trigger *trigger)
2583 {
2584 	struct mlxsw_sp_sample_trigger_node *trigger_node;
2585 	struct mlxsw_sp_sample_trigger key;
2586 
2587 	mlxsw_sp_sample_trigger_key_init(&key, trigger);
2588 	trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key,
2589 					 mlxsw_sp_sample_trigger_ht_params);
2590 	if (!trigger_node)
2591 		return NULL;
2592 
2593 	return &trigger_node->params;
2594 }
2595 
2596 static int
2597 mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp,
2598 				  const struct mlxsw_sp_sample_trigger *trigger,
2599 				  const struct mlxsw_sp_sample_params *params)
2600 {
2601 	struct mlxsw_sp_sample_trigger_node *trigger_node;
2602 	int err;
2603 
2604 	trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL);
2605 	if (!trigger_node)
2606 		return -ENOMEM;
2607 
2608 	trigger_node->trigger = *trigger;
2609 	trigger_node->params = *params;
2610 	refcount_set(&trigger_node->refcount, 1);
2611 
2612 	err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht,
2613 				     &trigger_node->ht_node,
2614 				     mlxsw_sp_sample_trigger_ht_params);
2615 	if (err)
2616 		goto err_rhashtable_insert;
2617 
2618 	return 0;
2619 
2620 err_rhashtable_insert:
2621 	kfree(trigger_node);
2622 	return err;
2623 }
2624 
2625 static void
2626 mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp,
2627 				  struct mlxsw_sp_sample_trigger_node *trigger_node)
2628 {
2629 	rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht,
2630 			       &trigger_node->ht_node,
2631 			       mlxsw_sp_sample_trigger_ht_params);
2632 	kfree_rcu(trigger_node, rcu);
2633 }
2634 
2635 int
2636 mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp,
2637 				   const struct mlxsw_sp_sample_trigger *trigger,
2638 				   const struct mlxsw_sp_sample_params *params,
2639 				   struct netlink_ext_ack *extack)
2640 {
2641 	struct mlxsw_sp_sample_trigger_node *trigger_node;
2642 	struct mlxsw_sp_sample_trigger key;
2643 
2644 	ASSERT_RTNL();
2645 
2646 	mlxsw_sp_sample_trigger_key_init(&key, trigger);
2647 
2648 	trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
2649 					      &key,
2650 					      mlxsw_sp_sample_trigger_ht_params);
2651 	if (!trigger_node)
2652 		return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key,
2653 							 params);
2654 
2655 	if (trigger_node->params.psample_group != params->psample_group ||
2656 	    trigger_node->params.truncate != params->truncate ||
2657 	    trigger_node->params.rate != params->rate ||
2658 	    trigger_node->params.trunc_size != params->trunc_size) {
2659 		NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger");
2660 		return -EINVAL;
2661 	}
2662 
2663 	refcount_inc(&trigger_node->refcount);
2664 
2665 	return 0;
2666 }
2667 
2668 void
2669 mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp,
2670 				     const struct mlxsw_sp_sample_trigger *trigger)
2671 {
2672 	struct mlxsw_sp_sample_trigger_node *trigger_node;
2673 	struct mlxsw_sp_sample_trigger key;
2674 
2675 	ASSERT_RTNL();
2676 
2677 	mlxsw_sp_sample_trigger_key_init(&key, trigger);
2678 
2679 	trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
2680 					      &key,
2681 					      mlxsw_sp_sample_trigger_ht_params);
2682 	if (!trigger_node)
2683 		return;
2684 
2685 	if (!refcount_dec_and_test(&trigger_node->refcount))
2686 		return;
2687 
2688 	mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node);
2689 }
2690 
2691 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
2692 				    unsigned long event, void *ptr);
2693 
2694 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2695 			 const struct mlxsw_bus_info *mlxsw_bus_info,
2696 			 struct netlink_ext_ack *extack)
2697 {
2698 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2699 	int err;
2700 
2701 	mlxsw_sp->core = mlxsw_core;
2702 	mlxsw_sp->bus_info = mlxsw_bus_info;
2703 
2704 	mlxsw_core_emad_string_tlv_enable(mlxsw_core);
2705 
2706 	err = mlxsw_sp_base_mac_get(mlxsw_sp);
2707 	if (err) {
2708 		dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2709 		return err;
2710 	}
2711 
2712 	err = mlxsw_sp_kvdl_init(mlxsw_sp);
2713 	if (err) {
2714 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
2715 		return err;
2716 	}
2717 
2718 	err = mlxsw_sp_fids_init(mlxsw_sp);
2719 	if (err) {
2720 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
2721 		goto err_fids_init;
2722 	}
2723 
2724 	err = mlxsw_sp_policers_init(mlxsw_sp);
2725 	if (err) {
2726 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n");
2727 		goto err_policers_init;
2728 	}
2729 
2730 	err = mlxsw_sp_traps_init(mlxsw_sp);
2731 	if (err) {
2732 		dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
2733 		goto err_traps_init;
2734 	}
2735 
2736 	err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
2737 	if (err) {
2738 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
2739 		goto err_devlink_traps_init;
2740 	}
2741 
2742 	err = mlxsw_sp_buffers_init(mlxsw_sp);
2743 	if (err) {
2744 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2745 		goto err_buffers_init;
2746 	}
2747 
2748 	err = mlxsw_sp_lag_init(mlxsw_sp);
2749 	if (err) {
2750 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2751 		goto err_lag_init;
2752 	}
2753 
2754 	/* Initialize SPAN before router and switchdev, so that those components
2755 	 * can call mlxsw_sp_span_respin().
2756 	 */
2757 	err = mlxsw_sp_span_init(mlxsw_sp);
2758 	if (err) {
2759 		dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
2760 		goto err_span_init;
2761 	}
2762 
2763 	err = mlxsw_sp_switchdev_init(mlxsw_sp);
2764 	if (err) {
2765 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2766 		goto err_switchdev_init;
2767 	}
2768 
2769 	err = mlxsw_sp_counter_pool_init(mlxsw_sp);
2770 	if (err) {
2771 		dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
2772 		goto err_counter_pool_init;
2773 	}
2774 
2775 	err = mlxsw_sp_afa_init(mlxsw_sp);
2776 	if (err) {
2777 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
2778 		goto err_afa_init;
2779 	}
2780 
2781 	err = mlxsw_sp_nve_init(mlxsw_sp);
2782 	if (err) {
2783 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
2784 		goto err_nve_init;
2785 	}
2786 
2787 	err = mlxsw_sp_acl_init(mlxsw_sp);
2788 	if (err) {
2789 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
2790 		goto err_acl_init;
2791 	}
2792 
2793 	err = mlxsw_sp_router_init(mlxsw_sp, extack);
2794 	if (err) {
2795 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
2796 		goto err_router_init;
2797 	}
2798 
2799 	if (mlxsw_sp->bus_info->read_frc_capable) {
2800 		/* NULL is a valid return value from clock_init */
2801 		mlxsw_sp->clock =
2802 			mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
2803 						      mlxsw_sp->bus_info->dev);
2804 		if (IS_ERR(mlxsw_sp->clock)) {
2805 			err = PTR_ERR(mlxsw_sp->clock);
2806 			dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n");
2807 			goto err_ptp_clock_init;
2808 		}
2809 	}
2810 
2811 	if (mlxsw_sp->clock) {
2812 		/* NULL is a valid return value from ptp_ops->init */
2813 		mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp);
2814 		if (IS_ERR(mlxsw_sp->ptp_state)) {
2815 			err = PTR_ERR(mlxsw_sp->ptp_state);
2816 			dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n");
2817 			goto err_ptp_init;
2818 		}
2819 	}
2820 
2821 	/* Initialize netdevice notifier after router and SPAN is initialized,
2822 	 * so that the event handler can use router structures and call SPAN
2823 	 * respin.
2824 	 */
2825 	mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
2826 	err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
2827 					      &mlxsw_sp->netdevice_nb);
2828 	if (err) {
2829 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
2830 		goto err_netdev_notifier;
2831 	}
2832 
2833 	err = mlxsw_sp_dpipe_init(mlxsw_sp);
2834 	if (err) {
2835 		dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
2836 		goto err_dpipe_init;
2837 	}
2838 
2839 	err = mlxsw_sp_port_module_info_init(mlxsw_sp);
2840 	if (err) {
2841 		dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
2842 		goto err_port_module_info_init;
2843 	}
2844 
2845 	err = rhashtable_init(&mlxsw_sp->sample_trigger_ht,
2846 			      &mlxsw_sp_sample_trigger_ht_params);
2847 	if (err) {
2848 		dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n");
2849 		goto err_sample_trigger_init;
2850 	}
2851 
2852 	err = mlxsw_sp_ports_create(mlxsw_sp);
2853 	if (err) {
2854 		dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2855 		goto err_ports_create;
2856 	}
2857 
2858 	return 0;
2859 
2860 err_ports_create:
2861 	rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
2862 err_sample_trigger_init:
2863 	mlxsw_sp_port_module_info_fini(mlxsw_sp);
2864 err_port_module_info_init:
2865 	mlxsw_sp_dpipe_fini(mlxsw_sp);
2866 err_dpipe_init:
2867 	unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
2868 					  &mlxsw_sp->netdevice_nb);
2869 err_netdev_notifier:
2870 	if (mlxsw_sp->clock)
2871 		mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
2872 err_ptp_init:
2873 	if (mlxsw_sp->clock)
2874 		mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
2875 err_ptp_clock_init:
2876 	mlxsw_sp_router_fini(mlxsw_sp);
2877 err_router_init:
2878 	mlxsw_sp_acl_fini(mlxsw_sp);
2879 err_acl_init:
2880 	mlxsw_sp_nve_fini(mlxsw_sp);
2881 err_nve_init:
2882 	mlxsw_sp_afa_fini(mlxsw_sp);
2883 err_afa_init:
2884 	mlxsw_sp_counter_pool_fini(mlxsw_sp);
2885 err_counter_pool_init:
2886 	mlxsw_sp_switchdev_fini(mlxsw_sp);
2887 err_switchdev_init:
2888 	mlxsw_sp_span_fini(mlxsw_sp);
2889 err_span_init:
2890 	mlxsw_sp_lag_fini(mlxsw_sp);
2891 err_lag_init:
2892 	mlxsw_sp_buffers_fini(mlxsw_sp);
2893 err_buffers_init:
2894 	mlxsw_sp_devlink_traps_fini(mlxsw_sp);
2895 err_devlink_traps_init:
2896 	mlxsw_sp_traps_fini(mlxsw_sp);
2897 err_traps_init:
2898 	mlxsw_sp_policers_fini(mlxsw_sp);
2899 err_policers_init:
2900 	mlxsw_sp_fids_fini(mlxsw_sp);
2901 err_fids_init:
2902 	mlxsw_sp_kvdl_fini(mlxsw_sp);
2903 	return err;
2904 }
2905 
2906 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
2907 			  const struct mlxsw_bus_info *mlxsw_bus_info,
2908 			  struct netlink_ext_ack *extack)
2909 {
2910 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2911 
2912 	mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
2913 	mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
2914 	mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
2915 	mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
2916 	mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
2917 	mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
2918 	mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
2919 	mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
2920 	mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
2921 	mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
2922 	mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
2923 	mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
2924 	mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
2925 	mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
2926 	mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
2927 	mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
2928 	mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops;
2929 	mlxsw_sp->listeners = mlxsw_sp1_listener;
2930 	mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
2931 	mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
2932 
2933 	return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
2934 }
2935 
2936 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
2937 			  const struct mlxsw_bus_info *mlxsw_bus_info,
2938 			  struct netlink_ext_ack *extack)
2939 {
2940 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2941 
2942 	mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
2943 	mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
2944 	mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
2945 	mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
2946 	mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
2947 	mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
2948 	mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
2949 	mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
2950 	mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
2951 	mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
2952 	mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
2953 	mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
2954 	mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
2955 	mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
2956 	mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
2957 	mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
2958 	mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
2959 	mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
2960 
2961 	return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
2962 }
2963 
2964 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
2965 			  const struct mlxsw_bus_info *mlxsw_bus_info,
2966 			  struct netlink_ext_ack *extack)
2967 {
2968 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2969 
2970 	mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
2971 	mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
2972 	mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
2973 	mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
2974 	mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
2975 	mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
2976 	mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
2977 	mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
2978 	mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
2979 	mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
2980 	mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
2981 	mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
2982 	mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
2983 	mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
2984 	mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
2985 	mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
2986 	mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
2987 	mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
2988 
2989 	return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
2990 }
2991 
2992 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2993 {
2994 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2995 
2996 	mlxsw_sp_ports_remove(mlxsw_sp);
2997 	rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
2998 	mlxsw_sp_port_module_info_fini(mlxsw_sp);
2999 	mlxsw_sp_dpipe_fini(mlxsw_sp);
3000 	unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3001 					  &mlxsw_sp->netdevice_nb);
3002 	if (mlxsw_sp->clock) {
3003 		mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3004 		mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3005 	}
3006 	mlxsw_sp_router_fini(mlxsw_sp);
3007 	mlxsw_sp_acl_fini(mlxsw_sp);
3008 	mlxsw_sp_nve_fini(mlxsw_sp);
3009 	mlxsw_sp_afa_fini(mlxsw_sp);
3010 	mlxsw_sp_counter_pool_fini(mlxsw_sp);
3011 	mlxsw_sp_switchdev_fini(mlxsw_sp);
3012 	mlxsw_sp_span_fini(mlxsw_sp);
3013 	mlxsw_sp_lag_fini(mlxsw_sp);
3014 	mlxsw_sp_buffers_fini(mlxsw_sp);
3015 	mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3016 	mlxsw_sp_traps_fini(mlxsw_sp);
3017 	mlxsw_sp_policers_fini(mlxsw_sp);
3018 	mlxsw_sp_fids_fini(mlxsw_sp);
3019 	mlxsw_sp_kvdl_fini(mlxsw_sp);
3020 }
3021 
3022 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated
3023  * 802.1Q FIDs
3024  */
3025 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE	(MLXSW_SP_FID_8021D_MAX + \
3026 					 VLAN_VID_MASK - 1)
3027 
3028 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
3029 	.used_max_mid			= 1,
3030 	.max_mid			= MLXSW_SP_MID_MAX,
3031 	.used_flood_tables		= 1,
3032 	.used_flood_mode		= 1,
3033 	.flood_mode			= 3,
3034 	.max_fid_flood_tables		= 3,
3035 	.fid_flood_table_size		= MLXSW_SP_FID_FLOOD_TABLE_SIZE,
3036 	.used_max_ib_mc			= 1,
3037 	.max_ib_mc			= 0,
3038 	.used_max_pkey			= 1,
3039 	.max_pkey			= 0,
3040 	.used_kvd_sizes			= 1,
3041 	.kvd_hash_single_parts		= 59,
3042 	.kvd_hash_double_parts		= 41,
3043 	.kvd_linear_size		= MLXSW_SP_KVD_LINEAR_SIZE,
3044 	.swid_config			= {
3045 		{
3046 			.used_type	= 1,
3047 			.type		= MLXSW_PORT_SWID_TYPE_ETH,
3048 		}
3049 	},
3050 };
3051 
3052 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
3053 	.used_max_mid			= 1,
3054 	.max_mid			= MLXSW_SP_MID_MAX,
3055 	.used_flood_tables		= 1,
3056 	.used_flood_mode		= 1,
3057 	.flood_mode			= 3,
3058 	.max_fid_flood_tables		= 3,
3059 	.fid_flood_table_size		= MLXSW_SP_FID_FLOOD_TABLE_SIZE,
3060 	.used_max_ib_mc			= 1,
3061 	.max_ib_mc			= 0,
3062 	.used_max_pkey			= 1,
3063 	.max_pkey			= 0,
3064 	.used_kvh_xlt_cache_mode	= 1,
3065 	.kvh_xlt_cache_mode		= 1,
3066 	.swid_config			= {
3067 		{
3068 			.used_type	= 1,
3069 			.type		= MLXSW_PORT_SWID_TYPE_ETH,
3070 		}
3071 	},
3072 };
3073 
3074 static void
3075 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
3076 				      struct devlink_resource_size_params *kvd_size_params,
3077 				      struct devlink_resource_size_params *linear_size_params,
3078 				      struct devlink_resource_size_params *hash_double_size_params,
3079 				      struct devlink_resource_size_params *hash_single_size_params)
3080 {
3081 	u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3082 						 KVD_SINGLE_MIN_SIZE);
3083 	u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3084 						 KVD_DOUBLE_MIN_SIZE);
3085 	u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3086 	u32 linear_size_min = 0;
3087 
3088 	devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
3089 					  MLXSW_SP_KVD_GRANULARITY,
3090 					  DEVLINK_RESOURCE_UNIT_ENTRY);
3091 	devlink_resource_size_params_init(linear_size_params, linear_size_min,
3092 					  kvd_size - single_size_min -
3093 					  double_size_min,
3094 					  MLXSW_SP_KVD_GRANULARITY,
3095 					  DEVLINK_RESOURCE_UNIT_ENTRY);
3096 	devlink_resource_size_params_init(hash_double_size_params,
3097 					  double_size_min,
3098 					  kvd_size - single_size_min -
3099 					  linear_size_min,
3100 					  MLXSW_SP_KVD_GRANULARITY,
3101 					  DEVLINK_RESOURCE_UNIT_ENTRY);
3102 	devlink_resource_size_params_init(hash_single_size_params,
3103 					  single_size_min,
3104 					  kvd_size - double_size_min -
3105 					  linear_size_min,
3106 					  MLXSW_SP_KVD_GRANULARITY,
3107 					  DEVLINK_RESOURCE_UNIT_ENTRY);
3108 }
3109 
3110 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3111 {
3112 	struct devlink *devlink = priv_to_devlink(mlxsw_core);
3113 	struct devlink_resource_size_params hash_single_size_params;
3114 	struct devlink_resource_size_params hash_double_size_params;
3115 	struct devlink_resource_size_params linear_size_params;
3116 	struct devlink_resource_size_params kvd_size_params;
3117 	u32 kvd_size, single_size, double_size, linear_size;
3118 	const struct mlxsw_config_profile *profile;
3119 	int err;
3120 
3121 	profile = &mlxsw_sp1_config_profile;
3122 	if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3123 		return -EIO;
3124 
3125 	mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
3126 					      &linear_size_params,
3127 					      &hash_double_size_params,
3128 					      &hash_single_size_params);
3129 
3130 	kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3131 	err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3132 					kvd_size, MLXSW_SP_RESOURCE_KVD,
3133 					DEVLINK_RESOURCE_ID_PARENT_TOP,
3134 					&kvd_size_params);
3135 	if (err)
3136 		return err;
3137 
3138 	linear_size = profile->kvd_linear_size;
3139 	err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
3140 					linear_size,
3141 					MLXSW_SP_RESOURCE_KVD_LINEAR,
3142 					MLXSW_SP_RESOURCE_KVD,
3143 					&linear_size_params);
3144 	if (err)
3145 		return err;
3146 
3147 	err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
3148 	if  (err)
3149 		return err;
3150 
3151 	double_size = kvd_size - linear_size;
3152 	double_size *= profile->kvd_hash_double_parts;
3153 	double_size /= profile->kvd_hash_double_parts +
3154 		       profile->kvd_hash_single_parts;
3155 	double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
3156 	err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
3157 					double_size,
3158 					MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3159 					MLXSW_SP_RESOURCE_KVD,
3160 					&hash_double_size_params);
3161 	if (err)
3162 		return err;
3163 
3164 	single_size = kvd_size - double_size - linear_size;
3165 	err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
3166 					single_size,
3167 					MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3168 					MLXSW_SP_RESOURCE_KVD,
3169 					&hash_single_size_params);
3170 	if (err)
3171 		return err;
3172 
3173 	return 0;
3174 }
3175 
3176 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3177 {
3178 	struct devlink *devlink = priv_to_devlink(mlxsw_core);
3179 	struct devlink_resource_size_params kvd_size_params;
3180 	u32 kvd_size;
3181 
3182 	if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3183 		return -EIO;
3184 
3185 	kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3186 	devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size,
3187 					  MLXSW_SP_KVD_GRANULARITY,
3188 					  DEVLINK_RESOURCE_UNIT_ENTRY);
3189 
3190 	return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3191 					 kvd_size, MLXSW_SP_RESOURCE_KVD,
3192 					 DEVLINK_RESOURCE_ID_PARENT_TOP,
3193 					 &kvd_size_params);
3194 }
3195 
3196 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
3197 {
3198 	struct devlink *devlink = priv_to_devlink(mlxsw_core);
3199 	struct devlink_resource_size_params span_size_params;
3200 	u32 max_span;
3201 
3202 	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
3203 		return -EIO;
3204 
3205 	max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
3206 	devlink_resource_size_params_init(&span_size_params, max_span, max_span,
3207 					  1, DEVLINK_RESOURCE_UNIT_ENTRY);
3208 
3209 	return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
3210 					 max_span, MLXSW_SP_RESOURCE_SPAN,
3211 					 DEVLINK_RESOURCE_ID_PARENT_TOP,
3212 					 &span_size_params);
3213 }
3214 
3215 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
3216 {
3217 	int err;
3218 
3219 	err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
3220 	if (err)
3221 		return err;
3222 
3223 	err = mlxsw_sp_resources_span_register(mlxsw_core);
3224 	if (err)
3225 		goto err_resources_span_register;
3226 
3227 	err = mlxsw_sp_counter_resources_register(mlxsw_core);
3228 	if (err)
3229 		goto err_resources_counter_register;
3230 
3231 	err = mlxsw_sp_policer_resources_register(mlxsw_core);
3232 	if (err)
3233 		goto err_resources_counter_register;
3234 
3235 	return 0;
3236 
3237 err_resources_counter_register:
3238 err_resources_span_register:
3239 	devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
3240 	return err;
3241 }
3242 
3243 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
3244 {
3245 	int err;
3246 
3247 	err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
3248 	if (err)
3249 		return err;
3250 
3251 	err = mlxsw_sp_resources_span_register(mlxsw_core);
3252 	if (err)
3253 		goto err_resources_span_register;
3254 
3255 	err = mlxsw_sp_counter_resources_register(mlxsw_core);
3256 	if (err)
3257 		goto err_resources_counter_register;
3258 
3259 	err = mlxsw_sp_policer_resources_register(mlxsw_core);
3260 	if (err)
3261 		goto err_resources_counter_register;
3262 
3263 	return 0;
3264 
3265 err_resources_counter_register:
3266 err_resources_span_register:
3267 	devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
3268 	return err;
3269 }
3270 
3271 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
3272 				  const struct mlxsw_config_profile *profile,
3273 				  u64 *p_single_size, u64 *p_double_size,
3274 				  u64 *p_linear_size)
3275 {
3276 	struct devlink *devlink = priv_to_devlink(mlxsw_core);
3277 	u32 double_size;
3278 	int err;
3279 
3280 	if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3281 	    !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
3282 		return -EIO;
3283 
3284 	/* The hash part is what left of the kvd without the
3285 	 * linear part. It is split to the single size and
3286 	 * double size by the parts ratio from the profile.
3287 	 * Both sizes must be a multiplications of the
3288 	 * granularity from the profile. In case the user
3289 	 * provided the sizes they are obtained via devlink.
3290 	 */
3291 	err = devlink_resource_size_get(devlink,
3292 					MLXSW_SP_RESOURCE_KVD_LINEAR,
3293 					p_linear_size);
3294 	if (err)
3295 		*p_linear_size = profile->kvd_linear_size;
3296 
3297 	err = devlink_resource_size_get(devlink,
3298 					MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3299 					p_double_size);
3300 	if (err) {
3301 		double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3302 			      *p_linear_size;
3303 		double_size *= profile->kvd_hash_double_parts;
3304 		double_size /= profile->kvd_hash_double_parts +
3305 			       profile->kvd_hash_single_parts;
3306 		*p_double_size = rounddown(double_size,
3307 					   MLXSW_SP_KVD_GRANULARITY);
3308 	}
3309 
3310 	err = devlink_resource_size_get(devlink,
3311 					MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3312 					p_single_size);
3313 	if (err)
3314 		*p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3315 				 *p_double_size - *p_linear_size;
3316 
3317 	/* Check results are legal. */
3318 	if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3319 	    *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
3320 	    MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
3321 		return -EIO;
3322 
3323 	return 0;
3324 }
3325 
3326 static int
3327 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
3328 					     struct devlink_param_gset_ctx *ctx)
3329 {
3330 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
3331 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3332 
3333 	ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp);
3334 	return 0;
3335 }
3336 
3337 static int
3338 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
3339 					     struct devlink_param_gset_ctx *ctx)
3340 {
3341 	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
3342 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3343 
3344 	return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32);
3345 }
3346 
3347 static const struct devlink_param mlxsw_sp2_devlink_params[] = {
3348 	DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
3349 			     "acl_region_rehash_interval",
3350 			     DEVLINK_PARAM_TYPE_U32,
3351 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
3352 			     mlxsw_sp_params_acl_region_rehash_intrvl_get,
3353 			     mlxsw_sp_params_acl_region_rehash_intrvl_set,
3354 			     NULL),
3355 };
3356 
3357 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core)
3358 {
3359 	struct devlink *devlink = priv_to_devlink(mlxsw_core);
3360 	union devlink_param_value value;
3361 	int err;
3362 
3363 	err = devlink_params_register(devlink, mlxsw_sp2_devlink_params,
3364 				      ARRAY_SIZE(mlxsw_sp2_devlink_params));
3365 	if (err)
3366 		return err;
3367 
3368 	value.vu32 = 0;
3369 	devlink_param_driverinit_value_set(devlink,
3370 					   MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
3371 					   value);
3372 	return 0;
3373 }
3374 
3375 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
3376 {
3377 	devlink_params_unregister(priv_to_devlink(mlxsw_core),
3378 				  mlxsw_sp2_devlink_params,
3379 				  ARRAY_SIZE(mlxsw_sp2_devlink_params));
3380 }
3381 
3382 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
3383 				     struct sk_buff *skb, u8 local_port)
3384 {
3385 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3386 
3387 	skb_pull(skb, MLXSW_TXHDR_LEN);
3388 	mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
3389 }
3390 
3391 static struct mlxsw_driver mlxsw_sp1_driver = {
3392 	.kind				= mlxsw_sp1_driver_name,
3393 	.priv_size			= sizeof(struct mlxsw_sp),
3394 	.fw_req_rev			= &mlxsw_sp1_fw_rev,
3395 	.fw_filename			= MLXSW_SP1_FW_FILENAME,
3396 	.init				= mlxsw_sp1_init,
3397 	.fini				= mlxsw_sp_fini,
3398 	.basic_trap_groups_set		= mlxsw_sp_basic_trap_groups_set,
3399 	.port_split			= mlxsw_sp_port_split,
3400 	.port_unsplit			= mlxsw_sp_port_unsplit,
3401 	.sb_pool_get			= mlxsw_sp_sb_pool_get,
3402 	.sb_pool_set			= mlxsw_sp_sb_pool_set,
3403 	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
3404 	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
3405 	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
3406 	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
3407 	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
3408 	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
3409 	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
3410 	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
3411 	.trap_init			= mlxsw_sp_trap_init,
3412 	.trap_fini			= mlxsw_sp_trap_fini,
3413 	.trap_action_set		= mlxsw_sp_trap_action_set,
3414 	.trap_group_init		= mlxsw_sp_trap_group_init,
3415 	.trap_group_set			= mlxsw_sp_trap_group_set,
3416 	.trap_policer_init		= mlxsw_sp_trap_policer_init,
3417 	.trap_policer_fini		= mlxsw_sp_trap_policer_fini,
3418 	.trap_policer_set		= mlxsw_sp_trap_policer_set,
3419 	.trap_policer_counter_get	= mlxsw_sp_trap_policer_counter_get,
3420 	.txhdr_construct		= mlxsw_sp_txhdr_construct,
3421 	.resources_register		= mlxsw_sp1_resources_register,
3422 	.kvd_sizes_get			= mlxsw_sp_kvd_sizes_get,
3423 	.ptp_transmitted		= mlxsw_sp_ptp_transmitted,
3424 	.txhdr_len			= MLXSW_TXHDR_LEN,
3425 	.profile			= &mlxsw_sp1_config_profile,
3426 	.res_query_enabled		= true,
3427 	.fw_fatal_enabled		= true,
3428 	.temp_warn_enabled		= true,
3429 };
3430 
3431 static struct mlxsw_driver mlxsw_sp2_driver = {
3432 	.kind				= mlxsw_sp2_driver_name,
3433 	.priv_size			= sizeof(struct mlxsw_sp),
3434 	.fw_req_rev			= &mlxsw_sp2_fw_rev,
3435 	.fw_filename			= MLXSW_SP2_FW_FILENAME,
3436 	.init				= mlxsw_sp2_init,
3437 	.fini				= mlxsw_sp_fini,
3438 	.basic_trap_groups_set		= mlxsw_sp_basic_trap_groups_set,
3439 	.port_split			= mlxsw_sp_port_split,
3440 	.port_unsplit			= mlxsw_sp_port_unsplit,
3441 	.sb_pool_get			= mlxsw_sp_sb_pool_get,
3442 	.sb_pool_set			= mlxsw_sp_sb_pool_set,
3443 	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
3444 	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
3445 	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
3446 	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
3447 	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
3448 	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
3449 	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
3450 	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
3451 	.trap_init			= mlxsw_sp_trap_init,
3452 	.trap_fini			= mlxsw_sp_trap_fini,
3453 	.trap_action_set		= mlxsw_sp_trap_action_set,
3454 	.trap_group_init		= mlxsw_sp_trap_group_init,
3455 	.trap_group_set			= mlxsw_sp_trap_group_set,
3456 	.trap_policer_init		= mlxsw_sp_trap_policer_init,
3457 	.trap_policer_fini		= mlxsw_sp_trap_policer_fini,
3458 	.trap_policer_set		= mlxsw_sp_trap_policer_set,
3459 	.trap_policer_counter_get	= mlxsw_sp_trap_policer_counter_get,
3460 	.txhdr_construct		= mlxsw_sp_txhdr_construct,
3461 	.resources_register		= mlxsw_sp2_resources_register,
3462 	.params_register		= mlxsw_sp2_params_register,
3463 	.params_unregister		= mlxsw_sp2_params_unregister,
3464 	.ptp_transmitted		= mlxsw_sp_ptp_transmitted,
3465 	.txhdr_len			= MLXSW_TXHDR_LEN,
3466 	.profile			= &mlxsw_sp2_config_profile,
3467 	.res_query_enabled		= true,
3468 	.fw_fatal_enabled		= true,
3469 	.temp_warn_enabled		= true,
3470 };
3471 
3472 static struct mlxsw_driver mlxsw_sp3_driver = {
3473 	.kind				= mlxsw_sp3_driver_name,
3474 	.priv_size			= sizeof(struct mlxsw_sp),
3475 	.fw_req_rev			= &mlxsw_sp3_fw_rev,
3476 	.fw_filename			= MLXSW_SP3_FW_FILENAME,
3477 	.init				= mlxsw_sp3_init,
3478 	.fini				= mlxsw_sp_fini,
3479 	.basic_trap_groups_set		= mlxsw_sp_basic_trap_groups_set,
3480 	.port_split			= mlxsw_sp_port_split,
3481 	.port_unsplit			= mlxsw_sp_port_unsplit,
3482 	.sb_pool_get			= mlxsw_sp_sb_pool_get,
3483 	.sb_pool_set			= mlxsw_sp_sb_pool_set,
3484 	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
3485 	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
3486 	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
3487 	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
3488 	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
3489 	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
3490 	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
3491 	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
3492 	.trap_init			= mlxsw_sp_trap_init,
3493 	.trap_fini			= mlxsw_sp_trap_fini,
3494 	.trap_action_set		= mlxsw_sp_trap_action_set,
3495 	.trap_group_init		= mlxsw_sp_trap_group_init,
3496 	.trap_group_set			= mlxsw_sp_trap_group_set,
3497 	.trap_policer_init		= mlxsw_sp_trap_policer_init,
3498 	.trap_policer_fini		= mlxsw_sp_trap_policer_fini,
3499 	.trap_policer_set		= mlxsw_sp_trap_policer_set,
3500 	.trap_policer_counter_get	= mlxsw_sp_trap_policer_counter_get,
3501 	.txhdr_construct		= mlxsw_sp_txhdr_construct,
3502 	.resources_register		= mlxsw_sp2_resources_register,
3503 	.params_register		= mlxsw_sp2_params_register,
3504 	.params_unregister		= mlxsw_sp2_params_unregister,
3505 	.ptp_transmitted		= mlxsw_sp_ptp_transmitted,
3506 	.txhdr_len			= MLXSW_TXHDR_LEN,
3507 	.profile			= &mlxsw_sp2_config_profile,
3508 	.res_query_enabled		= true,
3509 	.fw_fatal_enabled		= true,
3510 	.temp_warn_enabled		= true,
3511 };
3512 
3513 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
3514 {
3515 	return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3516 }
3517 
3518 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev,
3519 				   struct netdev_nested_priv *priv)
3520 {
3521 	int ret = 0;
3522 
3523 	if (mlxsw_sp_port_dev_check(lower_dev)) {
3524 		priv->data = (void *)netdev_priv(lower_dev);
3525 		ret = 1;
3526 	}
3527 
3528 	return ret;
3529 }
3530 
3531 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
3532 {
3533 	struct netdev_nested_priv priv = {
3534 		.data = NULL,
3535 	};
3536 
3537 	if (mlxsw_sp_port_dev_check(dev))
3538 		return netdev_priv(dev);
3539 
3540 	netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv);
3541 
3542 	return (struct mlxsw_sp_port *)priv.data;
3543 }
3544 
3545 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
3546 {
3547 	struct mlxsw_sp_port *mlxsw_sp_port;
3548 
3549 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3550 	return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3551 }
3552 
3553 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
3554 {
3555 	struct netdev_nested_priv priv = {
3556 		.data = NULL,
3557 	};
3558 
3559 	if (mlxsw_sp_port_dev_check(dev))
3560 		return netdev_priv(dev);
3561 
3562 	netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3563 				      &priv);
3564 
3565 	return (struct mlxsw_sp_port *)priv.data;
3566 }
3567 
3568 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3569 {
3570 	struct mlxsw_sp_port *mlxsw_sp_port;
3571 
3572 	rcu_read_lock();
3573 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3574 	if (mlxsw_sp_port)
3575 		dev_hold(mlxsw_sp_port->dev);
3576 	rcu_read_unlock();
3577 	return mlxsw_sp_port;
3578 }
3579 
3580 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3581 {
3582 	dev_put(mlxsw_sp_port->dev);
3583 }
3584 
3585 static void
3586 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
3587 				 struct net_device *lag_dev)
3588 {
3589 	struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
3590 	struct net_device *upper_dev;
3591 	struct list_head *iter;
3592 
3593 	if (netif_is_bridge_port(lag_dev))
3594 		mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
3595 
3596 	netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
3597 		if (!netif_is_bridge_port(upper_dev))
3598 			continue;
3599 		br_dev = netdev_master_upper_dev_get(upper_dev);
3600 		mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
3601 	}
3602 }
3603 
3604 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3605 {
3606 	char sldr_pl[MLXSW_REG_SLDR_LEN];
3607 
3608 	mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3609 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3610 }
3611 
3612 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3613 {
3614 	char sldr_pl[MLXSW_REG_SLDR_LEN];
3615 
3616 	mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3617 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3618 }
3619 
3620 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3621 				     u16 lag_id, u8 port_index)
3622 {
3623 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3624 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
3625 
3626 	mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3627 				      lag_id, port_index);
3628 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3629 }
3630 
3631 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3632 					u16 lag_id)
3633 {
3634 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3635 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
3636 
3637 	mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3638 					 lag_id);
3639 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3640 }
3641 
3642 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3643 					u16 lag_id)
3644 {
3645 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3646 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
3647 
3648 	mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3649 					lag_id);
3650 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3651 }
3652 
3653 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3654 					 u16 lag_id)
3655 {
3656 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3657 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
3658 
3659 	mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3660 					 lag_id);
3661 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3662 }
3663 
3664 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3665 				  struct net_device *lag_dev,
3666 				  u16 *p_lag_id)
3667 {
3668 	struct mlxsw_sp_upper *lag;
3669 	int free_lag_id = -1;
3670 	u64 max_lag;
3671 	int i;
3672 
3673 	max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
3674 	for (i = 0; i < max_lag; i++) {
3675 		lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3676 		if (lag->ref_count) {
3677 			if (lag->dev == lag_dev) {
3678 				*p_lag_id = i;
3679 				return 0;
3680 			}
3681 		} else if (free_lag_id < 0) {
3682 			free_lag_id = i;
3683 		}
3684 	}
3685 	if (free_lag_id < 0)
3686 		return -EBUSY;
3687 	*p_lag_id = free_lag_id;
3688 	return 0;
3689 }
3690 
3691 static bool
3692 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3693 			  struct net_device *lag_dev,
3694 			  struct netdev_lag_upper_info *lag_upper_info,
3695 			  struct netlink_ext_ack *extack)
3696 {
3697 	u16 lag_id;
3698 
3699 	if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
3700 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
3701 		return false;
3702 	}
3703 	if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
3704 		NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
3705 		return false;
3706 	}
3707 	return true;
3708 }
3709 
3710 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3711 				       u16 lag_id, u8 *p_port_index)
3712 {
3713 	u64 max_lag_members;
3714 	int i;
3715 
3716 	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3717 					     MAX_LAG_MEMBERS);
3718 	for (i = 0; i < max_lag_members; i++) {
3719 		if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3720 			*p_port_index = i;
3721 			return 0;
3722 		}
3723 	}
3724 	return -EBUSY;
3725 }
3726 
3727 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3728 				  struct net_device *lag_dev,
3729 				  struct netlink_ext_ack *extack)
3730 {
3731 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3732 	struct mlxsw_sp_upper *lag;
3733 	u16 lag_id;
3734 	u8 port_index;
3735 	int err;
3736 
3737 	err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
3738 	if (err)
3739 		return err;
3740 	lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3741 	if (!lag->ref_count) {
3742 		err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
3743 		if (err)
3744 			return err;
3745 		lag->dev = lag_dev;
3746 	}
3747 
3748 	err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
3749 	if (err)
3750 		return err;
3751 	err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
3752 	if (err)
3753 		goto err_col_port_add;
3754 
3755 	mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
3756 				   mlxsw_sp_port->local_port);
3757 	mlxsw_sp_port->lag_id = lag_id;
3758 	mlxsw_sp_port->lagged = 1;
3759 	lag->ref_count++;
3760 
3761 	/* Port is no longer usable as a router interface */
3762 	if (mlxsw_sp_port->default_vlan->fid)
3763 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
3764 
3765 	/* Join a router interface configured on the LAG, if exists */
3766 	err = mlxsw_sp_port_vlan_router_join(mlxsw_sp_port->default_vlan,
3767 					     lag_dev, extack);
3768 	if (err)
3769 		goto err_router_join;
3770 
3771 	return 0;
3772 
3773 err_router_join:
3774 	lag->ref_count--;
3775 	mlxsw_sp_port->lagged = 0;
3776 	mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3777 				     mlxsw_sp_port->local_port);
3778 	mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3779 err_col_port_add:
3780 	if (!lag->ref_count)
3781 		mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3782 	return err;
3783 }
3784 
3785 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
3786 				    struct net_device *lag_dev)
3787 {
3788 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3789 	u16 lag_id = mlxsw_sp_port->lag_id;
3790 	struct mlxsw_sp_upper *lag;
3791 
3792 	if (!mlxsw_sp_port->lagged)
3793 		return;
3794 	lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3795 	WARN_ON(lag->ref_count == 0);
3796 
3797 	mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3798 
3799 	/* Any VLANs configured on the port are no longer valid */
3800 	mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
3801 	mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
3802 	/* Make the LAG and its directly linked uppers leave bridges they
3803 	 * are memeber in
3804 	 */
3805 	mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
3806 
3807 	if (lag->ref_count == 1)
3808 		mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3809 
3810 	mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3811 				     mlxsw_sp_port->local_port);
3812 	mlxsw_sp_port->lagged = 0;
3813 	lag->ref_count--;
3814 
3815 	/* Make sure untagged frames are allowed to ingress */
3816 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
3817 			       ETH_P_8021Q);
3818 }
3819 
3820 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3821 				      u16 lag_id)
3822 {
3823 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3824 	char sldr_pl[MLXSW_REG_SLDR_LEN];
3825 
3826 	mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
3827 					 mlxsw_sp_port->local_port);
3828 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3829 }
3830 
3831 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3832 					 u16 lag_id)
3833 {
3834 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3835 	char sldr_pl[MLXSW_REG_SLDR_LEN];
3836 
3837 	mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
3838 					    mlxsw_sp_port->local_port);
3839 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3840 }
3841 
3842 static int
3843 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
3844 {
3845 	int err;
3846 
3847 	err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
3848 					   mlxsw_sp_port->lag_id);
3849 	if (err)
3850 		return err;
3851 
3852 	err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
3853 	if (err)
3854 		goto err_dist_port_add;
3855 
3856 	return 0;
3857 
3858 err_dist_port_add:
3859 	mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
3860 	return err;
3861 }
3862 
3863 static int
3864 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
3865 {
3866 	int err;
3867 
3868 	err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
3869 					    mlxsw_sp_port->lag_id);
3870 	if (err)
3871 		return err;
3872 
3873 	err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
3874 					    mlxsw_sp_port->lag_id);
3875 	if (err)
3876 		goto err_col_port_disable;
3877 
3878 	return 0;
3879 
3880 err_col_port_disable:
3881 	mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
3882 	return err;
3883 }
3884 
3885 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
3886 				     struct netdev_lag_lower_state_info *info)
3887 {
3888 	if (info->tx_enabled)
3889 		return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
3890 	else
3891 		return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
3892 }
3893 
3894 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
3895 				 bool enable)
3896 {
3897 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3898 	enum mlxsw_reg_spms_state spms_state;
3899 	char *spms_pl;
3900 	u16 vid;
3901 	int err;
3902 
3903 	spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
3904 			      MLXSW_REG_SPMS_STATE_DISCARDING;
3905 
3906 	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
3907 	if (!spms_pl)
3908 		return -ENOMEM;
3909 	mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
3910 
3911 	for (vid = 0; vid < VLAN_N_VID; vid++)
3912 		mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
3913 
3914 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
3915 	kfree(spms_pl);
3916 	return err;
3917 }
3918 
3919 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
3920 {
3921 	u16 vid = 1;
3922 	int err;
3923 
3924 	err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
3925 	if (err)
3926 		return err;
3927 	err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
3928 	if (err)
3929 		goto err_port_stp_set;
3930 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
3931 				     true, false);
3932 	if (err)
3933 		goto err_port_vlan_set;
3934 
3935 	for (; vid <= VLAN_N_VID - 1; vid++) {
3936 		err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
3937 						     vid, false);
3938 		if (err)
3939 			goto err_vid_learning_set;
3940 	}
3941 
3942 	return 0;
3943 
3944 err_vid_learning_set:
3945 	for (vid--; vid >= 1; vid--)
3946 		mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
3947 err_port_vlan_set:
3948 	mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
3949 err_port_stp_set:
3950 	mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
3951 	return err;
3952 }
3953 
3954 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3955 {
3956 	u16 vid;
3957 
3958 	for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
3959 		mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
3960 					       vid, true);
3961 
3962 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
3963 			       false, false);
3964 	mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
3965 	mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
3966 }
3967 
3968 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
3969 {
3970 	unsigned int num_vxlans = 0;
3971 	struct net_device *dev;
3972 	struct list_head *iter;
3973 
3974 	netdev_for_each_lower_dev(br_dev, dev, iter) {
3975 		if (netif_is_vxlan(dev))
3976 			num_vxlans++;
3977 	}
3978 
3979 	return num_vxlans > 1;
3980 }
3981 
3982 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
3983 {
3984 	DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
3985 	struct net_device *dev;
3986 	struct list_head *iter;
3987 
3988 	netdev_for_each_lower_dev(br_dev, dev, iter) {
3989 		u16 pvid;
3990 		int err;
3991 
3992 		if (!netif_is_vxlan(dev))
3993 			continue;
3994 
3995 		err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
3996 		if (err || !pvid)
3997 			continue;
3998 
3999 		if (test_and_set_bit(pvid, vlans))
4000 			return false;
4001 	}
4002 
4003 	return true;
4004 }
4005 
4006 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
4007 					   struct netlink_ext_ack *extack)
4008 {
4009 	if (br_multicast_enabled(br_dev)) {
4010 		NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
4011 		return false;
4012 	}
4013 
4014 	if (!br_vlan_enabled(br_dev) &&
4015 	    mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
4016 		NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
4017 		return false;
4018 	}
4019 
4020 	if (br_vlan_enabled(br_dev) &&
4021 	    !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
4022 		NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
4023 		return false;
4024 	}
4025 
4026 	return true;
4027 }
4028 
4029 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4030 					       struct net_device *dev,
4031 					       unsigned long event, void *ptr)
4032 {
4033 	struct netdev_notifier_changeupper_info *info;
4034 	struct mlxsw_sp_port *mlxsw_sp_port;
4035 	struct netlink_ext_ack *extack;
4036 	struct net_device *upper_dev;
4037 	struct mlxsw_sp *mlxsw_sp;
4038 	int err = 0;
4039 	u16 proto;
4040 
4041 	mlxsw_sp_port = netdev_priv(dev);
4042 	mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4043 	info = ptr;
4044 	extack = netdev_notifier_info_to_extack(&info->info);
4045 
4046 	switch (event) {
4047 	case NETDEV_PRECHANGEUPPER:
4048 		upper_dev = info->upper_dev;
4049 		if (!is_vlan_dev(upper_dev) &&
4050 		    !netif_is_lag_master(upper_dev) &&
4051 		    !netif_is_bridge_master(upper_dev) &&
4052 		    !netif_is_ovs_master(upper_dev) &&
4053 		    !netif_is_macvlan(upper_dev)) {
4054 			NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4055 			return -EINVAL;
4056 		}
4057 		if (!info->linking)
4058 			break;
4059 		if (netif_is_bridge_master(upper_dev) &&
4060 		    !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4061 		    mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4062 		    !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4063 			return -EOPNOTSUPP;
4064 		if (netdev_has_any_upper_dev(upper_dev) &&
4065 		    (!netif_is_bridge_master(upper_dev) ||
4066 		     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4067 							  upper_dev))) {
4068 			NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
4069 			return -EINVAL;
4070 		}
4071 		if (netif_is_lag_master(upper_dev) &&
4072 		    !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4073 					       info->upper_info, extack))
4074 			return -EINVAL;
4075 		if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
4076 			NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
4077 			return -EINVAL;
4078 		}
4079 		if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4080 		    !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
4081 			NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
4082 			return -EINVAL;
4083 		}
4084 		if (netif_is_macvlan(upper_dev) &&
4085 		    !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) {
4086 			NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4087 			return -EOPNOTSUPP;
4088 		}
4089 		if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
4090 			NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
4091 			return -EINVAL;
4092 		}
4093 		if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
4094 			NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
4095 			return -EINVAL;
4096 		}
4097 		if (netif_is_bridge_master(upper_dev)) {
4098 			br_vlan_get_proto(upper_dev, &proto);
4099 			if (br_vlan_enabled(upper_dev) &&
4100 			    proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
4101 				NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported");
4102 				return -EOPNOTSUPP;
4103 			}
4104 			if (vlan_uses_dev(lower_dev) &&
4105 			    br_vlan_enabled(upper_dev) &&
4106 			    proto == ETH_P_8021AD) {
4107 				NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported");
4108 				return -EOPNOTSUPP;
4109 			}
4110 		}
4111 		if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) {
4112 			struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev);
4113 
4114 			if (br_vlan_enabled(br_dev)) {
4115 				br_vlan_get_proto(br_dev, &proto);
4116 				if (proto == ETH_P_8021AD) {
4117 					NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge");
4118 					return -EOPNOTSUPP;
4119 				}
4120 			}
4121 		}
4122 		if (is_vlan_dev(upper_dev) &&
4123 		    ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
4124 			NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
4125 			return -EOPNOTSUPP;
4126 		}
4127 		break;
4128 	case NETDEV_CHANGEUPPER:
4129 		upper_dev = info->upper_dev;
4130 		if (netif_is_bridge_master(upper_dev)) {
4131 			if (info->linking)
4132 				err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4133 								lower_dev,
4134 								upper_dev,
4135 								extack);
4136 			else
4137 				mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4138 							   lower_dev,
4139 							   upper_dev);
4140 		} else if (netif_is_lag_master(upper_dev)) {
4141 			if (info->linking) {
4142 				err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4143 							     upper_dev, extack);
4144 			} else {
4145 				mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4146 				mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4147 							upper_dev);
4148 			}
4149 		} else if (netif_is_ovs_master(upper_dev)) {
4150 			if (info->linking)
4151 				err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4152 			else
4153 				mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
4154 		} else if (netif_is_macvlan(upper_dev)) {
4155 			if (!info->linking)
4156 				mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4157 		} else if (is_vlan_dev(upper_dev)) {
4158 			struct net_device *br_dev;
4159 
4160 			if (!netif_is_bridge_port(upper_dev))
4161 				break;
4162 			if (info->linking)
4163 				break;
4164 			br_dev = netdev_master_upper_dev_get(upper_dev);
4165 			mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
4166 						   br_dev);
4167 		}
4168 		break;
4169 	}
4170 
4171 	return err;
4172 }
4173 
4174 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4175 					       unsigned long event, void *ptr)
4176 {
4177 	struct netdev_notifier_changelowerstate_info *info;
4178 	struct mlxsw_sp_port *mlxsw_sp_port;
4179 	int err;
4180 
4181 	mlxsw_sp_port = netdev_priv(dev);
4182 	info = ptr;
4183 
4184 	switch (event) {
4185 	case NETDEV_CHANGELOWERSTATE:
4186 		if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4187 			err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4188 							info->lower_state_info);
4189 			if (err)
4190 				netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4191 		}
4192 		break;
4193 	}
4194 
4195 	return 0;
4196 }
4197 
4198 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
4199 					 struct net_device *port_dev,
4200 					 unsigned long event, void *ptr)
4201 {
4202 	switch (event) {
4203 	case NETDEV_PRECHANGEUPPER:
4204 	case NETDEV_CHANGEUPPER:
4205 		return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
4206 							   event, ptr);
4207 	case NETDEV_CHANGELOWERSTATE:
4208 		return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
4209 							   ptr);
4210 	}
4211 
4212 	return 0;
4213 }
4214 
4215 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4216 					unsigned long event, void *ptr)
4217 {
4218 	struct net_device *dev;
4219 	struct list_head *iter;
4220 	int ret;
4221 
4222 	netdev_for_each_lower_dev(lag_dev, dev, iter) {
4223 		if (mlxsw_sp_port_dev_check(dev)) {
4224 			ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
4225 							    ptr);
4226 			if (ret)
4227 				return ret;
4228 		}
4229 	}
4230 
4231 	return 0;
4232 }
4233 
4234 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4235 					      struct net_device *dev,
4236 					      unsigned long event, void *ptr,
4237 					      u16 vid)
4238 {
4239 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4240 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4241 	struct netdev_notifier_changeupper_info *info = ptr;
4242 	struct netlink_ext_ack *extack;
4243 	struct net_device *upper_dev;
4244 	int err = 0;
4245 
4246 	extack = netdev_notifier_info_to_extack(&info->info);
4247 
4248 	switch (event) {
4249 	case NETDEV_PRECHANGEUPPER:
4250 		upper_dev = info->upper_dev;
4251 		if (!netif_is_bridge_master(upper_dev) &&
4252 		    !netif_is_macvlan(upper_dev)) {
4253 			NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4254 			return -EINVAL;
4255 		}
4256 		if (!info->linking)
4257 			break;
4258 		if (netif_is_bridge_master(upper_dev) &&
4259 		    !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4260 		    mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4261 		    !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4262 			return -EOPNOTSUPP;
4263 		if (netdev_has_any_upper_dev(upper_dev) &&
4264 		    (!netif_is_bridge_master(upper_dev) ||
4265 		     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4266 							  upper_dev))) {
4267 			NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
4268 			return -EINVAL;
4269 		}
4270 		if (netif_is_macvlan(upper_dev) &&
4271 		    !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
4272 			NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4273 			return -EOPNOTSUPP;
4274 		}
4275 		break;
4276 	case NETDEV_CHANGEUPPER:
4277 		upper_dev = info->upper_dev;
4278 		if (netif_is_bridge_master(upper_dev)) {
4279 			if (info->linking)
4280 				err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4281 								vlan_dev,
4282 								upper_dev,
4283 								extack);
4284 			else
4285 				mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4286 							   vlan_dev,
4287 							   upper_dev);
4288 		} else if (netif_is_macvlan(upper_dev)) {
4289 			if (!info->linking)
4290 				mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4291 		} else {
4292 			err = -EINVAL;
4293 			WARN_ON(1);
4294 		}
4295 		break;
4296 	}
4297 
4298 	return err;
4299 }
4300 
4301 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
4302 						  struct net_device *lag_dev,
4303 						  unsigned long event,
4304 						  void *ptr, u16 vid)
4305 {
4306 	struct net_device *dev;
4307 	struct list_head *iter;
4308 	int ret;
4309 
4310 	netdev_for_each_lower_dev(lag_dev, dev, iter) {
4311 		if (mlxsw_sp_port_dev_check(dev)) {
4312 			ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
4313 								 event, ptr,
4314 								 vid);
4315 			if (ret)
4316 				return ret;
4317 		}
4318 	}
4319 
4320 	return 0;
4321 }
4322 
4323 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
4324 						struct net_device *br_dev,
4325 						unsigned long event, void *ptr,
4326 						u16 vid)
4327 {
4328 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
4329 	struct netdev_notifier_changeupper_info *info = ptr;
4330 	struct netlink_ext_ack *extack;
4331 	struct net_device *upper_dev;
4332 
4333 	if (!mlxsw_sp)
4334 		return 0;
4335 
4336 	extack = netdev_notifier_info_to_extack(&info->info);
4337 
4338 	switch (event) {
4339 	case NETDEV_PRECHANGEUPPER:
4340 		upper_dev = info->upper_dev;
4341 		if (!netif_is_macvlan(upper_dev)) {
4342 			NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4343 			return -EOPNOTSUPP;
4344 		}
4345 		if (!info->linking)
4346 			break;
4347 		if (netif_is_macvlan(upper_dev) &&
4348 		    !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
4349 			NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4350 			return -EOPNOTSUPP;
4351 		}
4352 		break;
4353 	case NETDEV_CHANGEUPPER:
4354 		upper_dev = info->upper_dev;
4355 		if (info->linking)
4356 			break;
4357 		if (netif_is_macvlan(upper_dev))
4358 			mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4359 		break;
4360 	}
4361 
4362 	return 0;
4363 }
4364 
4365 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4366 					 unsigned long event, void *ptr)
4367 {
4368 	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4369 	u16 vid = vlan_dev_vlan_id(vlan_dev);
4370 
4371 	if (mlxsw_sp_port_dev_check(real_dev))
4372 		return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
4373 							  event, ptr, vid);
4374 	else if (netif_is_lag_master(real_dev))
4375 		return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
4376 							      real_dev, event,
4377 							      ptr, vid);
4378 	else if (netif_is_bridge_master(real_dev))
4379 		return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev,
4380 							    event, ptr, vid);
4381 
4382 	return 0;
4383 }
4384 
4385 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
4386 					   unsigned long event, void *ptr)
4387 {
4388 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
4389 	struct netdev_notifier_changeupper_info *info = ptr;
4390 	struct netlink_ext_ack *extack;
4391 	struct net_device *upper_dev;
4392 	u16 proto;
4393 
4394 	if (!mlxsw_sp)
4395 		return 0;
4396 
4397 	extack = netdev_notifier_info_to_extack(&info->info);
4398 
4399 	switch (event) {
4400 	case NETDEV_PRECHANGEUPPER:
4401 		upper_dev = info->upper_dev;
4402 		if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) {
4403 			NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4404 			return -EOPNOTSUPP;
4405 		}
4406 		if (!info->linking)
4407 			break;
4408 		if (br_vlan_enabled(br_dev)) {
4409 			br_vlan_get_proto(br_dev, &proto);
4410 			if (proto == ETH_P_8021AD) {
4411 				NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge");
4412 				return -EOPNOTSUPP;
4413 			}
4414 		}
4415 		if (is_vlan_dev(upper_dev) &&
4416 		    ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
4417 			NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
4418 			return -EOPNOTSUPP;
4419 		}
4420 		if (netif_is_macvlan(upper_dev) &&
4421 		    !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) {
4422 			NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4423 			return -EOPNOTSUPP;
4424 		}
4425 		break;
4426 	case NETDEV_CHANGEUPPER:
4427 		upper_dev = info->upper_dev;
4428 		if (info->linking)
4429 			break;
4430 		if (is_vlan_dev(upper_dev))
4431 			mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
4432 		if (netif_is_macvlan(upper_dev))
4433 			mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4434 		break;
4435 	}
4436 
4437 	return 0;
4438 }
4439 
4440 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
4441 					    unsigned long event, void *ptr)
4442 {
4443 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
4444 	struct netdev_notifier_changeupper_info *info = ptr;
4445 	struct netlink_ext_ack *extack;
4446 
4447 	if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
4448 		return 0;
4449 
4450 	extack = netdev_notifier_info_to_extack(&info->info);
4451 
4452 	/* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */
4453 	NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4454 
4455 	return -EOPNOTSUPP;
4456 }
4457 
4458 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
4459 {
4460 	struct netdev_notifier_changeupper_info *info = ptr;
4461 
4462 	if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
4463 		return false;
4464 	return netif_is_l3_master(info->upper_dev);
4465 }
4466 
4467 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
4468 					  struct net_device *dev,
4469 					  unsigned long event, void *ptr)
4470 {
4471 	struct netdev_notifier_changeupper_info *cu_info;
4472 	struct netdev_notifier_info *info = ptr;
4473 	struct netlink_ext_ack *extack;
4474 	struct net_device *upper_dev;
4475 
4476 	extack = netdev_notifier_info_to_extack(info);
4477 
4478 	switch (event) {
4479 	case NETDEV_CHANGEUPPER:
4480 		cu_info = container_of(info,
4481 				       struct netdev_notifier_changeupper_info,
4482 				       info);
4483 		upper_dev = cu_info->upper_dev;
4484 		if (!netif_is_bridge_master(upper_dev))
4485 			return 0;
4486 		if (!mlxsw_sp_lower_get(upper_dev))
4487 			return 0;
4488 		if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4489 			return -EOPNOTSUPP;
4490 		if (cu_info->linking) {
4491 			if (!netif_running(dev))
4492 				return 0;
4493 			/* When the bridge is VLAN-aware, the VNI of the VxLAN
4494 			 * device needs to be mapped to a VLAN, but at this
4495 			 * point no VLANs are configured on the VxLAN device
4496 			 */
4497 			if (br_vlan_enabled(upper_dev))
4498 				return 0;
4499 			return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
4500 							  dev, 0, extack);
4501 		} else {
4502 			/* VLANs were already flushed, which triggered the
4503 			 * necessary cleanup
4504 			 */
4505 			if (br_vlan_enabled(upper_dev))
4506 				return 0;
4507 			mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
4508 		}
4509 		break;
4510 	case NETDEV_PRE_UP:
4511 		upper_dev = netdev_master_upper_dev_get(dev);
4512 		if (!upper_dev)
4513 			return 0;
4514 		if (!netif_is_bridge_master(upper_dev))
4515 			return 0;
4516 		if (!mlxsw_sp_lower_get(upper_dev))
4517 			return 0;
4518 		return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
4519 						  extack);
4520 	case NETDEV_DOWN:
4521 		upper_dev = netdev_master_upper_dev_get(dev);
4522 		if (!upper_dev)
4523 			return 0;
4524 		if (!netif_is_bridge_master(upper_dev))
4525 			return 0;
4526 		if (!mlxsw_sp_lower_get(upper_dev))
4527 			return 0;
4528 		mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
4529 		break;
4530 	}
4531 
4532 	return 0;
4533 }
4534 
4535 static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
4536 				    unsigned long event, void *ptr)
4537 {
4538 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4539 	struct mlxsw_sp_span_entry *span_entry;
4540 	struct mlxsw_sp *mlxsw_sp;
4541 	int err = 0;
4542 
4543 	mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
4544 	if (event == NETDEV_UNREGISTER) {
4545 		span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
4546 		if (span_entry)
4547 			mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
4548 	}
4549 	mlxsw_sp_span_respin(mlxsw_sp);
4550 
4551 	if (netif_is_vxlan(dev))
4552 		err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
4553 	if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
4554 		err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
4555 						       event, ptr);
4556 	else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
4557 		err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
4558 						       event, ptr);
4559 	else if (event == NETDEV_PRE_CHANGEADDR ||
4560 		 event == NETDEV_CHANGEADDR ||
4561 		 event == NETDEV_CHANGEMTU)
4562 		err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
4563 	else if (mlxsw_sp_is_vrf_event(event, ptr))
4564 		err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
4565 	else if (mlxsw_sp_port_dev_check(dev))
4566 		err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
4567 	else if (netif_is_lag_master(dev))
4568 		err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4569 	else if (is_vlan_dev(dev))
4570 		err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4571 	else if (netif_is_bridge_master(dev))
4572 		err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
4573 	else if (netif_is_macvlan(dev))
4574 		err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
4575 
4576 	return notifier_from_errno(err);
4577 }
4578 
4579 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
4580 	.notifier_call = mlxsw_sp_inetaddr_valid_event,
4581 };
4582 
4583 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
4584 	.notifier_call = mlxsw_sp_inet6addr_valid_event,
4585 };
4586 
4587 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
4588 	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4589 	{0, },
4590 };
4591 
4592 static struct pci_driver mlxsw_sp1_pci_driver = {
4593 	.name = mlxsw_sp1_driver_name,
4594 	.id_table = mlxsw_sp1_pci_id_table,
4595 };
4596 
4597 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
4598 	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
4599 	{0, },
4600 };
4601 
4602 static struct pci_driver mlxsw_sp2_pci_driver = {
4603 	.name = mlxsw_sp2_driver_name,
4604 	.id_table = mlxsw_sp2_pci_id_table,
4605 };
4606 
4607 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
4608 	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
4609 	{0, },
4610 };
4611 
4612 static struct pci_driver mlxsw_sp3_pci_driver = {
4613 	.name = mlxsw_sp3_driver_name,
4614 	.id_table = mlxsw_sp3_pci_id_table,
4615 };
4616 
4617 static int __init mlxsw_sp_module_init(void)
4618 {
4619 	int err;
4620 
4621 	register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4622 	register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4623 
4624 	err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
4625 	if (err)
4626 		goto err_sp1_core_driver_register;
4627 
4628 	err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
4629 	if (err)
4630 		goto err_sp2_core_driver_register;
4631 
4632 	err = mlxsw_core_driver_register(&mlxsw_sp3_driver);
4633 	if (err)
4634 		goto err_sp3_core_driver_register;
4635 
4636 	err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
4637 	if (err)
4638 		goto err_sp1_pci_driver_register;
4639 
4640 	err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
4641 	if (err)
4642 		goto err_sp2_pci_driver_register;
4643 
4644 	err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver);
4645 	if (err)
4646 		goto err_sp3_pci_driver_register;
4647 
4648 	return 0;
4649 
4650 err_sp3_pci_driver_register:
4651 	mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
4652 err_sp2_pci_driver_register:
4653 	mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
4654 err_sp1_pci_driver_register:
4655 	mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
4656 err_sp3_core_driver_register:
4657 	mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
4658 err_sp2_core_driver_register:
4659 	mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
4660 err_sp1_core_driver_register:
4661 	unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4662 	unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4663 	return err;
4664 }
4665 
4666 static void __exit mlxsw_sp_module_exit(void)
4667 {
4668 	mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
4669 	mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
4670 	mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
4671 	mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
4672 	mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
4673 	mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
4674 	unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4675 	unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4676 }
4677 
4678 module_init(mlxsw_sp_module_init);
4679 module_exit(mlxsw_sp_module_exit);
4680 
4681 MODULE_LICENSE("Dual BSD/GPL");
4682 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4683 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4684 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
4685 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
4686 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
4687 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
4688 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
4689 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);
4690