1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <linux/list.h>
52 #include <linux/dcbnl.h>
53 #include <net/switchdev.h>
54 #include <generated/utsrelease.h>
55 
56 #include "spectrum.h"
57 #include "core.h"
58 #include "reg.h"
59 #include "port.h"
60 #include "trap.h"
61 #include "txheader.h"
62 
63 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
64 static const char mlxsw_sp_driver_version[] = "1.0";
65 
66 /* tx_hdr_version
67  * Tx header version.
68  * Must be set to 1.
69  */
70 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
71 
72 /* tx_hdr_ctl
73  * Packet control type.
74  * 0 - Ethernet control (e.g. EMADs, LACP)
75  * 1 - Ethernet data
76  */
77 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
78 
79 /* tx_hdr_proto
80  * Packet protocol type. Must be set to 1 (Ethernet).
81  */
82 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
83 
84 /* tx_hdr_rx_is_router
85  * Packet is sent from the router. Valid for data packets only.
86  */
87 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
88 
89 /* tx_hdr_fid_valid
90  * Indicates if the 'fid' field is valid and should be used for
91  * forwarding lookup. Valid for data packets only.
92  */
93 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
94 
95 /* tx_hdr_swid
96  * Switch partition ID. Must be set to 0.
97  */
98 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
99 
100 /* tx_hdr_control_tclass
101  * Indicates if the packet should use the control TClass and not one
102  * of the data TClasses.
103  */
104 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
105 
106 /* tx_hdr_etclass
107  * Egress TClass to be used on the egress device on the egress port.
108  */
109 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
110 
111 /* tx_hdr_port_mid
112  * Destination local port for unicast packets.
113  * Destination multicast ID for multicast packets.
114  *
115  * Control packets are directed to a specific egress port, while data
116  * packets are transmitted through the CPU port (0) into the switch partition,
117  * where forwarding rules are applied.
118  */
119 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
120 
121 /* tx_hdr_fid
122  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
123  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
124  * Valid for data packets only.
125  */
126 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
127 
128 /* tx_hdr_type
129  * 0 - Data packets
130  * 6 - Control packets
131  */
132 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
133 
134 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
135 				     const struct mlxsw_tx_info *tx_info)
136 {
137 	char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
138 
139 	memset(txhdr, 0, MLXSW_TXHDR_LEN);
140 
141 	mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
142 	mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
143 	mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
144 	mlxsw_tx_hdr_swid_set(txhdr, 0);
145 	mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
146 	mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
147 	mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
148 }
149 
150 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
151 {
152 	char spad_pl[MLXSW_REG_SPAD_LEN];
153 	int err;
154 
155 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
156 	if (err)
157 		return err;
158 	mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
159 	return 0;
160 }
161 
162 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
163 					  bool is_up)
164 {
165 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
166 	char paos_pl[MLXSW_REG_PAOS_LEN];
167 
168 	mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
169 			    is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
170 			    MLXSW_PORT_ADMIN_STATUS_DOWN);
171 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
172 }
173 
174 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
175 					 bool *p_is_up)
176 {
177 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
178 	char paos_pl[MLXSW_REG_PAOS_LEN];
179 	u8 oper_status;
180 	int err;
181 
182 	mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
183 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
184 	if (err)
185 		return err;
186 	oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
187 	*p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
188 	return 0;
189 }
190 
191 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
192 				      unsigned char *addr)
193 {
194 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
195 	char ppad_pl[MLXSW_REG_PPAD_LEN];
196 
197 	mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
198 	mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
199 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
200 }
201 
202 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
203 {
204 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
205 	unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
206 
207 	ether_addr_copy(addr, mlxsw_sp->base_mac);
208 	addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
209 	return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
210 }
211 
212 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
213 				       u16 vid, enum mlxsw_reg_spms_state state)
214 {
215 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
216 	char *spms_pl;
217 	int err;
218 
219 	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
220 	if (!spms_pl)
221 		return -ENOMEM;
222 	mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
223 	mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
224 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
225 	kfree(spms_pl);
226 	return err;
227 }
228 
229 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
230 {
231 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
232 	char pmtu_pl[MLXSW_REG_PMTU_LEN];
233 	int max_mtu;
234 	int err;
235 
236 	mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
237 	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
238 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
239 	if (err)
240 		return err;
241 	max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
242 
243 	if (mtu > max_mtu)
244 		return -EINVAL;
245 
246 	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
247 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
248 }
249 
250 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
251 {
252 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
253 	char pspa_pl[MLXSW_REG_PSPA_LEN];
254 
255 	mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
256 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
257 }
258 
259 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
260 				     bool enable)
261 {
262 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
263 	char svpe_pl[MLXSW_REG_SVPE_LEN];
264 
265 	mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
266 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
267 }
268 
269 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
270 				 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
271 				 u16 vid)
272 {
273 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
274 	char svfa_pl[MLXSW_REG_SVFA_LEN];
275 
276 	mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
277 			    fid, vid);
278 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
279 }
280 
281 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
282 					  u16 vid, bool learn_enable)
283 {
284 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
285 	char *spvmlr_pl;
286 	int err;
287 
288 	spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
289 	if (!spvmlr_pl)
290 		return -ENOMEM;
291 	mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
292 			      learn_enable);
293 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
294 	kfree(spvmlr_pl);
295 	return err;
296 }
297 
298 static int
299 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
300 {
301 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
302 	char sspr_pl[MLXSW_REG_SSPR_LEN];
303 
304 	mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
305 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
306 }
307 
308 static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
309 					   u8 local_port, u8 *p_module,
310 					   u8 *p_width, u8 *p_lane)
311 {
312 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
313 	int err;
314 
315 	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
316 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
317 	if (err)
318 		return err;
319 	*p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
320 	*p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
321 	*p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
322 	return 0;
323 }
324 
325 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
326 					 u8 local_port, u8 *p_module,
327 					 u8 *p_width)
328 {
329 	u8 lane;
330 
331 	return __mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, p_module,
332 					       p_width, &lane);
333 }
334 
335 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
336 				    u8 module, u8 width, u8 lane)
337 {
338 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
339 	int i;
340 
341 	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
342 	mlxsw_reg_pmlp_width_set(pmlp_pl, width);
343 	for (i = 0; i < width; i++) {
344 		mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
345 		mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
346 	}
347 
348 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
349 }
350 
351 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
352 {
353 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
354 
355 	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
356 	mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
357 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
358 }
359 
360 static int mlxsw_sp_port_open(struct net_device *dev)
361 {
362 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
363 	int err;
364 
365 	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
366 	if (err)
367 		return err;
368 	netif_start_queue(dev);
369 	return 0;
370 }
371 
372 static int mlxsw_sp_port_stop(struct net_device *dev)
373 {
374 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
375 
376 	netif_stop_queue(dev);
377 	return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
378 }
379 
380 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
381 				      struct net_device *dev)
382 {
383 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
384 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
385 	struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
386 	const struct mlxsw_tx_info tx_info = {
387 		.local_port = mlxsw_sp_port->local_port,
388 		.is_emad = false,
389 	};
390 	u64 len;
391 	int err;
392 
393 	if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
394 		return NETDEV_TX_BUSY;
395 
396 	if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
397 		struct sk_buff *skb_orig = skb;
398 
399 		skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
400 		if (!skb) {
401 			this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
402 			dev_kfree_skb_any(skb_orig);
403 			return NETDEV_TX_OK;
404 		}
405 	}
406 
407 	if (eth_skb_pad(skb)) {
408 		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
409 		return NETDEV_TX_OK;
410 	}
411 
412 	mlxsw_sp_txhdr_construct(skb, &tx_info);
413 	len = skb->len;
414 	/* Due to a race we might fail here because of a full queue. In that
415 	 * unlikely case we simply drop the packet.
416 	 */
417 	err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
418 
419 	if (!err) {
420 		pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
421 		u64_stats_update_begin(&pcpu_stats->syncp);
422 		pcpu_stats->tx_packets++;
423 		pcpu_stats->tx_bytes += len;
424 		u64_stats_update_end(&pcpu_stats->syncp);
425 	} else {
426 		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
427 		dev_kfree_skb_any(skb);
428 	}
429 	return NETDEV_TX_OK;
430 }
431 
432 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
433 {
434 }
435 
436 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
437 {
438 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
439 	struct sockaddr *addr = p;
440 	int err;
441 
442 	if (!is_valid_ether_addr(addr->sa_data))
443 		return -EADDRNOTAVAIL;
444 
445 	err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
446 	if (err)
447 		return err;
448 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
449 	return 0;
450 }
451 
452 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
453 				 bool pause_en, bool pfc_en, u16 delay)
454 {
455 	u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
456 
457 	delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
458 			 MLXSW_SP_PAUSE_DELAY;
459 
460 	if (pause_en || pfc_en)
461 		mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
462 						    pg_size + delay, pg_size);
463 	else
464 		mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
465 }
466 
467 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
468 				 u8 *prio_tc, bool pause_en,
469 				 struct ieee_pfc *my_pfc)
470 {
471 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
472 	u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
473 	u16 delay = !!my_pfc ? my_pfc->delay : 0;
474 	char pbmc_pl[MLXSW_REG_PBMC_LEN];
475 	int i, j, err;
476 
477 	mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
478 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
479 	if (err)
480 		return err;
481 
482 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
483 		bool configure = false;
484 		bool pfc = false;
485 
486 		for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
487 			if (prio_tc[j] == i) {
488 				pfc = pfc_en & BIT(j);
489 				configure = true;
490 				break;
491 			}
492 		}
493 
494 		if (!configure)
495 			continue;
496 		mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
497 	}
498 
499 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
500 }
501 
502 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
503 				      int mtu, bool pause_en)
504 {
505 	u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
506 	bool dcb_en = !!mlxsw_sp_port->dcb.ets;
507 	struct ieee_pfc *my_pfc;
508 	u8 *prio_tc;
509 
510 	prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
511 	my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
512 
513 	return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
514 					    pause_en, my_pfc);
515 }
516 
517 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
518 {
519 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
520 	bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
521 	int err;
522 
523 	err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
524 	if (err)
525 		return err;
526 	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
527 	if (err)
528 		goto err_port_mtu_set;
529 	dev->mtu = mtu;
530 	return 0;
531 
532 err_port_mtu_set:
533 	mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
534 	return err;
535 }
536 
537 static struct rtnl_link_stats64 *
538 mlxsw_sp_port_get_stats64(struct net_device *dev,
539 			  struct rtnl_link_stats64 *stats)
540 {
541 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
542 	struct mlxsw_sp_port_pcpu_stats *p;
543 	u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
544 	u32 tx_dropped = 0;
545 	unsigned int start;
546 	int i;
547 
548 	for_each_possible_cpu(i) {
549 		p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
550 		do {
551 			start = u64_stats_fetch_begin_irq(&p->syncp);
552 			rx_packets	= p->rx_packets;
553 			rx_bytes	= p->rx_bytes;
554 			tx_packets	= p->tx_packets;
555 			tx_bytes	= p->tx_bytes;
556 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
557 
558 		stats->rx_packets	+= rx_packets;
559 		stats->rx_bytes		+= rx_bytes;
560 		stats->tx_packets	+= tx_packets;
561 		stats->tx_bytes		+= tx_bytes;
562 		/* tx_dropped is u32, updated without syncp protection. */
563 		tx_dropped	+= p->tx_dropped;
564 	}
565 	stats->tx_dropped	= tx_dropped;
566 	return stats;
567 }
568 
569 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
570 			   u16 vid_end, bool is_member, bool untagged)
571 {
572 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
573 	char *spvm_pl;
574 	int err;
575 
576 	spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
577 	if (!spvm_pl)
578 		return -ENOMEM;
579 
580 	mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port,	vid_begin,
581 			    vid_end, is_member, untagged);
582 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
583 	kfree(spvm_pl);
584 	return err;
585 }
586 
587 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
588 {
589 	enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
590 	u16 vid, last_visited_vid;
591 	int err;
592 
593 	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
594 		err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
595 						   vid);
596 		if (err) {
597 			last_visited_vid = vid;
598 			goto err_port_vid_to_fid_set;
599 		}
600 	}
601 
602 	err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
603 	if (err) {
604 		last_visited_vid = VLAN_N_VID;
605 		goto err_port_vid_to_fid_set;
606 	}
607 
608 	return 0;
609 
610 err_port_vid_to_fid_set:
611 	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
612 		mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
613 					     vid);
614 	return err;
615 }
616 
617 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
618 {
619 	enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
620 	u16 vid;
621 	int err;
622 
623 	err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
624 	if (err)
625 		return err;
626 
627 	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
628 		err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
629 						   vid, vid);
630 		if (err)
631 			return err;
632 	}
633 
634 	return 0;
635 }
636 
637 static struct mlxsw_sp_vfid *
638 mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
639 {
640 	struct mlxsw_sp_vfid *vfid;
641 
642 	list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) {
643 		if (vfid->vid == vid)
644 			return vfid;
645 	}
646 
647 	return NULL;
648 }
649 
650 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
651 {
652 	return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
653 				   MLXSW_SP_VFID_PORT_MAX);
654 }
655 
656 static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
657 {
658 	u16 fid = mlxsw_sp_vfid_to_fid(vfid);
659 	char sfmr_pl[MLXSW_REG_SFMR_LEN];
660 
661 	mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0);
662 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
663 }
664 
665 static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
666 {
667 	u16 fid = mlxsw_sp_vfid_to_fid(vfid);
668 	char sfmr_pl[MLXSW_REG_SFMR_LEN];
669 
670 	mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0);
671 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
672 }
673 
674 static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
675 						  u16 vid)
676 {
677 	struct device *dev = mlxsw_sp->bus_info->dev;
678 	struct mlxsw_sp_vfid *vfid;
679 	u16 n_vfid;
680 	int err;
681 
682 	n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
683 	if (n_vfid == MLXSW_SP_VFID_PORT_MAX) {
684 		dev_err(dev, "No available vFIDs\n");
685 		return ERR_PTR(-ERANGE);
686 	}
687 
688 	err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
689 	if (err) {
690 		dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
691 		return ERR_PTR(err);
692 	}
693 
694 	vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
695 	if (!vfid)
696 		goto err_allocate_vfid;
697 
698 	vfid->vfid = n_vfid;
699 	vfid->vid = vid;
700 
701 	list_add(&vfid->list, &mlxsw_sp->port_vfids.list);
702 	set_bit(n_vfid, mlxsw_sp->port_vfids.mapped);
703 
704 	return vfid;
705 
706 err_allocate_vfid:
707 	__mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
708 	return ERR_PTR(-ENOMEM);
709 }
710 
711 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
712 				  struct mlxsw_sp_vfid *vfid)
713 {
714 	clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped);
715 	list_del(&vfid->list);
716 
717 	__mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
718 
719 	kfree(vfid);
720 }
721 
722 static struct mlxsw_sp_port *
723 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port,
724 			   struct mlxsw_sp_vfid *vfid)
725 {
726 	struct mlxsw_sp_port *mlxsw_sp_vport;
727 
728 	mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
729 	if (!mlxsw_sp_vport)
730 		return NULL;
731 
732 	/* dev will be set correctly after the VLAN device is linked
733 	 * with the real device. In case of bridge SELF invocation, dev
734 	 * will remain as is.
735 	 */
736 	mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
737 	mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
738 	mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
739 	mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
740 	mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
741 	mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
742 	mlxsw_sp_vport->vport.vfid = vfid;
743 	mlxsw_sp_vport->vport.vid = vfid->vid;
744 
745 	list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
746 
747 	return mlxsw_sp_vport;
748 }
749 
750 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
751 {
752 	list_del(&mlxsw_sp_vport->vport.list);
753 	kfree(mlxsw_sp_vport);
754 }
755 
756 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
757 			  u16 vid)
758 {
759 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
760 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
761 	struct mlxsw_sp_port *mlxsw_sp_vport;
762 	struct mlxsw_sp_vfid *vfid;
763 	int err;
764 
765 	/* VLAN 0 is added to HW filter when device goes up, but it is
766 	 * reserved in our case, so simply return.
767 	 */
768 	if (!vid)
769 		return 0;
770 
771 	if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
772 		netdev_warn(dev, "VID=%d already configured\n", vid);
773 		return 0;
774 	}
775 
776 	vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
777 	if (!vfid) {
778 		vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
779 		if (IS_ERR(vfid)) {
780 			netdev_err(dev, "Failed to create vFID for VID=%d\n",
781 				   vid);
782 			return PTR_ERR(vfid);
783 		}
784 	}
785 
786 	mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid);
787 	if (!mlxsw_sp_vport) {
788 		netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
789 		err = -ENOMEM;
790 		goto err_port_vport_create;
791 	}
792 
793 	if (!vfid->nr_vports) {
794 		err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid,
795 					       true, false);
796 		if (err) {
797 			netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
798 				   vfid->vfid);
799 			goto err_vport_flood_set;
800 		}
801 	}
802 
803 	/* When adding the first VLAN interface on a bridged port we need to
804 	 * transition all the active 802.1Q bridge VLANs to use explicit
805 	 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
806 	 */
807 	if (list_is_singular(&mlxsw_sp_port->vports_list)) {
808 		err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
809 		if (err) {
810 			netdev_err(dev, "Failed to set to Virtual mode\n");
811 			goto err_port_vp_mode_trans;
812 		}
813 	}
814 
815 	err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
816 					   MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
817 					   true,
818 					   mlxsw_sp_vfid_to_fid(vfid->vfid),
819 					   vid);
820 	if (err) {
821 		netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
822 			   vid, vfid->vfid);
823 		goto err_port_vid_to_fid_set;
824 	}
825 
826 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
827 	if (err) {
828 		netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
829 		goto err_port_vid_learning_set;
830 	}
831 
832 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false);
833 	if (err) {
834 		netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
835 			   vid);
836 		goto err_port_add_vid;
837 	}
838 
839 	err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
840 					  MLXSW_REG_SPMS_STATE_FORWARDING);
841 	if (err) {
842 		netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
843 		goto err_port_stp_state_set;
844 	}
845 
846 	vfid->nr_vports++;
847 
848 	return 0;
849 
850 err_port_stp_state_set:
851 	mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
852 err_port_add_vid:
853 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
854 err_port_vid_learning_set:
855 	mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
856 				     MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
857 				     mlxsw_sp_vfid_to_fid(vfid->vfid), vid);
858 err_port_vid_to_fid_set:
859 	if (list_is_singular(&mlxsw_sp_port->vports_list))
860 		mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
861 err_port_vp_mode_trans:
862 	if (!vfid->nr_vports)
863 		mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
864 					 false);
865 err_vport_flood_set:
866 	mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
867 err_port_vport_create:
868 	if (!vfid->nr_vports)
869 		mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
870 	return err;
871 }
872 
873 int mlxsw_sp_port_kill_vid(struct net_device *dev,
874 			   __be16 __always_unused proto, u16 vid)
875 {
876 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
877 	struct mlxsw_sp_port *mlxsw_sp_vport;
878 	struct mlxsw_sp_vfid *vfid;
879 	int err;
880 
881 	/* VLAN 0 is removed from HW filter when device goes down, but
882 	 * it is reserved in our case, so simply return.
883 	 */
884 	if (!vid)
885 		return 0;
886 
887 	mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
888 	if (!mlxsw_sp_vport) {
889 		netdev_warn(dev, "VID=%d does not exist\n", vid);
890 		return 0;
891 	}
892 
893 	vfid = mlxsw_sp_vport->vport.vfid;
894 
895 	err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
896 					  MLXSW_REG_SPMS_STATE_DISCARDING);
897 	if (err) {
898 		netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
899 		return err;
900 	}
901 
902 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
903 	if (err) {
904 		netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
905 			   vid);
906 		return err;
907 	}
908 
909 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
910 	if (err) {
911 		netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
912 		return err;
913 	}
914 
915 	err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
916 					   MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
917 					   false,
918 					   mlxsw_sp_vfid_to_fid(vfid->vfid),
919 					   vid);
920 	if (err) {
921 		netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
922 			   vid, vfid->vfid);
923 		return err;
924 	}
925 
926 	/* When removing the last VLAN interface on a bridged port we need to
927 	 * transition all active 802.1Q bridge VLANs to use VID to FID
928 	 * mappings and set port's mode to VLAN mode.
929 	 */
930 	if (list_is_singular(&mlxsw_sp_port->vports_list)) {
931 		err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
932 		if (err) {
933 			netdev_err(dev, "Failed to set to VLAN mode\n");
934 			return err;
935 		}
936 	}
937 
938 	vfid->nr_vports--;
939 	mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
940 
941 	/* Destroy the vFID if no vPorts are assigned to it anymore. */
942 	if (!vfid->nr_vports)
943 		mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid);
944 
945 	return 0;
946 }
947 
948 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
949 					    size_t len)
950 {
951 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
952 	u8 module, width, lane;
953 	int err;
954 
955 	err = __mlxsw_sp_port_module_info_get(mlxsw_sp_port->mlxsw_sp,
956 					      mlxsw_sp_port->local_port,
957 					      &module, &width, &lane);
958 	if (err) {
959 		netdev_err(dev, "Failed to retrieve module information\n");
960 		return err;
961 	}
962 
963 	if (!mlxsw_sp_port->split)
964 		err = snprintf(name, len, "p%d", module + 1);
965 	else
966 		err = snprintf(name, len, "p%ds%d", module + 1,
967 			       lane / width);
968 
969 	if (err >= len)
970 		return -EINVAL;
971 
972 	return 0;
973 }
974 
975 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
976 	.ndo_open		= mlxsw_sp_port_open,
977 	.ndo_stop		= mlxsw_sp_port_stop,
978 	.ndo_start_xmit		= mlxsw_sp_port_xmit,
979 	.ndo_set_rx_mode	= mlxsw_sp_set_rx_mode,
980 	.ndo_set_mac_address	= mlxsw_sp_port_set_mac_address,
981 	.ndo_change_mtu		= mlxsw_sp_port_change_mtu,
982 	.ndo_get_stats64	= mlxsw_sp_port_get_stats64,
983 	.ndo_vlan_rx_add_vid	= mlxsw_sp_port_add_vid,
984 	.ndo_vlan_rx_kill_vid	= mlxsw_sp_port_kill_vid,
985 	.ndo_fdb_add		= switchdev_port_fdb_add,
986 	.ndo_fdb_del		= switchdev_port_fdb_del,
987 	.ndo_fdb_dump		= switchdev_port_fdb_dump,
988 	.ndo_bridge_setlink	= switchdev_port_bridge_setlink,
989 	.ndo_bridge_getlink	= switchdev_port_bridge_getlink,
990 	.ndo_bridge_dellink	= switchdev_port_bridge_dellink,
991 	.ndo_get_phys_port_name	= mlxsw_sp_port_get_phys_port_name,
992 };
993 
994 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
995 				      struct ethtool_drvinfo *drvinfo)
996 {
997 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
998 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
999 
1000 	strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
1001 	strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1002 		sizeof(drvinfo->version));
1003 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1004 		 "%d.%d.%d",
1005 		 mlxsw_sp->bus_info->fw_rev.major,
1006 		 mlxsw_sp->bus_info->fw_rev.minor,
1007 		 mlxsw_sp->bus_info->fw_rev.subminor);
1008 	strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1009 		sizeof(drvinfo->bus_info));
1010 }
1011 
1012 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1013 					 struct ethtool_pauseparam *pause)
1014 {
1015 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1016 
1017 	pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1018 	pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1019 }
1020 
1021 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1022 				   struct ethtool_pauseparam *pause)
1023 {
1024 	char pfcc_pl[MLXSW_REG_PFCC_LEN];
1025 
1026 	mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1027 	mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1028 	mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1029 
1030 	return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1031 			       pfcc_pl);
1032 }
1033 
1034 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1035 					struct ethtool_pauseparam *pause)
1036 {
1037 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1038 	bool pause_en = pause->tx_pause || pause->rx_pause;
1039 	int err;
1040 
1041 	if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1042 		netdev_err(dev, "PFC already enabled on port\n");
1043 		return -EINVAL;
1044 	}
1045 
1046 	if (pause->autoneg) {
1047 		netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1048 		return -EINVAL;
1049 	}
1050 
1051 	err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1052 	if (err) {
1053 		netdev_err(dev, "Failed to configure port's headroom\n");
1054 		return err;
1055 	}
1056 
1057 	err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1058 	if (err) {
1059 		netdev_err(dev, "Failed to set PAUSE parameters\n");
1060 		goto err_port_pause_configure;
1061 	}
1062 
1063 	mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1064 	mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1065 
1066 	return 0;
1067 
1068 err_port_pause_configure:
1069 	pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1070 	mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1071 	return err;
1072 }
1073 
1074 struct mlxsw_sp_port_hw_stats {
1075 	char str[ETH_GSTRING_LEN];
1076 	u64 (*getter)(char *payload);
1077 };
1078 
1079 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1080 	{
1081 		.str = "a_frames_transmitted_ok",
1082 		.getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1083 	},
1084 	{
1085 		.str = "a_frames_received_ok",
1086 		.getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1087 	},
1088 	{
1089 		.str = "a_frame_check_sequence_errors",
1090 		.getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1091 	},
1092 	{
1093 		.str = "a_alignment_errors",
1094 		.getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1095 	},
1096 	{
1097 		.str = "a_octets_transmitted_ok",
1098 		.getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1099 	},
1100 	{
1101 		.str = "a_octets_received_ok",
1102 		.getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1103 	},
1104 	{
1105 		.str = "a_multicast_frames_xmitted_ok",
1106 		.getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1107 	},
1108 	{
1109 		.str = "a_broadcast_frames_xmitted_ok",
1110 		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1111 	},
1112 	{
1113 		.str = "a_multicast_frames_received_ok",
1114 		.getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1115 	},
1116 	{
1117 		.str = "a_broadcast_frames_received_ok",
1118 		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1119 	},
1120 	{
1121 		.str = "a_in_range_length_errors",
1122 		.getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1123 	},
1124 	{
1125 		.str = "a_out_of_range_length_field",
1126 		.getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1127 	},
1128 	{
1129 		.str = "a_frame_too_long_errors",
1130 		.getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1131 	},
1132 	{
1133 		.str = "a_symbol_error_during_carrier",
1134 		.getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1135 	},
1136 	{
1137 		.str = "a_mac_control_frames_transmitted",
1138 		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1139 	},
1140 	{
1141 		.str = "a_mac_control_frames_received",
1142 		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1143 	},
1144 	{
1145 		.str = "a_unsupported_opcodes_received",
1146 		.getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1147 	},
1148 	{
1149 		.str = "a_pause_mac_ctrl_frames_received",
1150 		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1151 	},
1152 	{
1153 		.str = "a_pause_mac_ctrl_frames_xmitted",
1154 		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1155 	},
1156 };
1157 
1158 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1159 
1160 static void mlxsw_sp_port_get_strings(struct net_device *dev,
1161 				      u32 stringset, u8 *data)
1162 {
1163 	u8 *p = data;
1164 	int i;
1165 
1166 	switch (stringset) {
1167 	case ETH_SS_STATS:
1168 		for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1169 			memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1170 			       ETH_GSTRING_LEN);
1171 			p += ETH_GSTRING_LEN;
1172 		}
1173 		break;
1174 	}
1175 }
1176 
1177 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1178 				     enum ethtool_phys_id_state state)
1179 {
1180 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1181 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1182 	char mlcr_pl[MLXSW_REG_MLCR_LEN];
1183 	bool active;
1184 
1185 	switch (state) {
1186 	case ETHTOOL_ID_ACTIVE:
1187 		active = true;
1188 		break;
1189 	case ETHTOOL_ID_INACTIVE:
1190 		active = false;
1191 		break;
1192 	default:
1193 		return -EOPNOTSUPP;
1194 	}
1195 
1196 	mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1197 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1198 }
1199 
1200 static void mlxsw_sp_port_get_stats(struct net_device *dev,
1201 				    struct ethtool_stats *stats, u64 *data)
1202 {
1203 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1204 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1205 	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1206 	int i;
1207 	int err;
1208 
1209 	mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port,
1210 			     MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
1211 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1212 	for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
1213 		data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
1214 }
1215 
1216 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1217 {
1218 	switch (sset) {
1219 	case ETH_SS_STATS:
1220 		return MLXSW_SP_PORT_HW_STATS_LEN;
1221 	default:
1222 		return -EOPNOTSUPP;
1223 	}
1224 }
1225 
1226 struct mlxsw_sp_port_link_mode {
1227 	u32 mask;
1228 	u32 supported;
1229 	u32 advertised;
1230 	u32 speed;
1231 };
1232 
1233 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1234 	{
1235 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1236 		.supported	= SUPPORTED_100baseT_Full,
1237 		.advertised	= ADVERTISED_100baseT_Full,
1238 		.speed		= 100,
1239 	},
1240 	{
1241 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
1242 		.speed		= 100,
1243 	},
1244 	{
1245 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1246 				  MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1247 		.supported	= SUPPORTED_1000baseKX_Full,
1248 		.advertised	= ADVERTISED_1000baseKX_Full,
1249 		.speed		= 1000,
1250 	},
1251 	{
1252 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1253 		.supported	= SUPPORTED_10000baseT_Full,
1254 		.advertised	= ADVERTISED_10000baseT_Full,
1255 		.speed		= 10000,
1256 	},
1257 	{
1258 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1259 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1260 		.supported	= SUPPORTED_10000baseKX4_Full,
1261 		.advertised	= ADVERTISED_10000baseKX4_Full,
1262 		.speed		= 10000,
1263 	},
1264 	{
1265 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1266 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1267 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1268 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1269 		.supported	= SUPPORTED_10000baseKR_Full,
1270 		.advertised	= ADVERTISED_10000baseKR_Full,
1271 		.speed		= 10000,
1272 	},
1273 	{
1274 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1275 		.supported	= SUPPORTED_20000baseKR2_Full,
1276 		.advertised	= ADVERTISED_20000baseKR2_Full,
1277 		.speed		= 20000,
1278 	},
1279 	{
1280 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1281 		.supported	= SUPPORTED_40000baseCR4_Full,
1282 		.advertised	= ADVERTISED_40000baseCR4_Full,
1283 		.speed		= 40000,
1284 	},
1285 	{
1286 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1287 		.supported	= SUPPORTED_40000baseKR4_Full,
1288 		.advertised	= ADVERTISED_40000baseKR4_Full,
1289 		.speed		= 40000,
1290 	},
1291 	{
1292 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1293 		.supported	= SUPPORTED_40000baseSR4_Full,
1294 		.advertised	= ADVERTISED_40000baseSR4_Full,
1295 		.speed		= 40000,
1296 	},
1297 	{
1298 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1299 		.supported	= SUPPORTED_40000baseLR4_Full,
1300 		.advertised	= ADVERTISED_40000baseLR4_Full,
1301 		.speed		= 40000,
1302 	},
1303 	{
1304 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
1305 				  MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
1306 				  MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1307 		.speed		= 25000,
1308 	},
1309 	{
1310 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
1311 				  MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
1312 				  MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1313 		.speed		= 50000,
1314 	},
1315 	{
1316 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1317 		.supported	= SUPPORTED_56000baseKR4_Full,
1318 		.advertised	= ADVERTISED_56000baseKR4_Full,
1319 		.speed		= 56000,
1320 	},
1321 	{
1322 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1323 				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1324 				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1325 				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1326 		.speed		= 100000,
1327 	},
1328 };
1329 
1330 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1331 
1332 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1333 {
1334 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1335 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1336 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1337 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1338 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1339 			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1340 		return SUPPORTED_FIBRE;
1341 
1342 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1343 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1344 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1345 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1346 			      MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1347 		return SUPPORTED_Backplane;
1348 	return 0;
1349 }
1350 
1351 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1352 {
1353 	u32 modes = 0;
1354 	int i;
1355 
1356 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1357 		if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1358 			modes |= mlxsw_sp_port_link_mode[i].supported;
1359 	}
1360 	return modes;
1361 }
1362 
1363 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1364 {
1365 	u32 modes = 0;
1366 	int i;
1367 
1368 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1369 		if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1370 			modes |= mlxsw_sp_port_link_mode[i].advertised;
1371 	}
1372 	return modes;
1373 }
1374 
1375 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1376 					    struct ethtool_cmd *cmd)
1377 {
1378 	u32 speed = SPEED_UNKNOWN;
1379 	u8 duplex = DUPLEX_UNKNOWN;
1380 	int i;
1381 
1382 	if (!carrier_ok)
1383 		goto out;
1384 
1385 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1386 		if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1387 			speed = mlxsw_sp_port_link_mode[i].speed;
1388 			duplex = DUPLEX_FULL;
1389 			break;
1390 		}
1391 	}
1392 out:
1393 	ethtool_cmd_speed_set(cmd, speed);
1394 	cmd->duplex = duplex;
1395 }
1396 
1397 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1398 {
1399 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1400 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1401 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1402 			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1403 		return PORT_FIBRE;
1404 
1405 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1406 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1407 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1408 		return PORT_DA;
1409 
1410 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1411 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1412 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1413 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1414 		return PORT_NONE;
1415 
1416 	return PORT_OTHER;
1417 }
1418 
1419 static int mlxsw_sp_port_get_settings(struct net_device *dev,
1420 				      struct ethtool_cmd *cmd)
1421 {
1422 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1423 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1424 	char ptys_pl[MLXSW_REG_PTYS_LEN];
1425 	u32 eth_proto_cap;
1426 	u32 eth_proto_admin;
1427 	u32 eth_proto_oper;
1428 	int err;
1429 
1430 	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1431 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1432 	if (err) {
1433 		netdev_err(dev, "Failed to get proto");
1434 		return err;
1435 	}
1436 	mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
1437 			      &eth_proto_admin, &eth_proto_oper);
1438 
1439 	cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1440 			 mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1441 			 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1442 	cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1443 	mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1444 					eth_proto_oper, cmd);
1445 
1446 	eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1447 	cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1448 	cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1449 
1450 	cmd->transceiver = XCVR_INTERNAL;
1451 	return 0;
1452 }
1453 
1454 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1455 {
1456 	u32 ptys_proto = 0;
1457 	int i;
1458 
1459 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1460 		if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1461 			ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1462 	}
1463 	return ptys_proto;
1464 }
1465 
1466 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1467 {
1468 	u32 ptys_proto = 0;
1469 	int i;
1470 
1471 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1472 		if (speed == mlxsw_sp_port_link_mode[i].speed)
1473 			ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1474 	}
1475 	return ptys_proto;
1476 }
1477 
1478 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
1479 {
1480 	u32 ptys_proto = 0;
1481 	int i;
1482 
1483 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1484 		if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
1485 			ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1486 	}
1487 	return ptys_proto;
1488 }
1489 
1490 static int mlxsw_sp_port_set_settings(struct net_device *dev,
1491 				      struct ethtool_cmd *cmd)
1492 {
1493 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1494 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1495 	char ptys_pl[MLXSW_REG_PTYS_LEN];
1496 	u32 speed;
1497 	u32 eth_proto_new;
1498 	u32 eth_proto_cap;
1499 	u32 eth_proto_admin;
1500 	bool is_up;
1501 	int err;
1502 
1503 	speed = ethtool_cmd_speed(cmd);
1504 
1505 	eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1506 		mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1507 		mlxsw_sp_to_ptys_speed(speed);
1508 
1509 	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1510 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1511 	if (err) {
1512 		netdev_err(dev, "Failed to get proto");
1513 		return err;
1514 	}
1515 	mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
1516 
1517 	eth_proto_new = eth_proto_new & eth_proto_cap;
1518 	if (!eth_proto_new) {
1519 		netdev_err(dev, "Not supported proto admin requested");
1520 		return -EINVAL;
1521 	}
1522 	if (eth_proto_new == eth_proto_admin)
1523 		return 0;
1524 
1525 	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1526 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1527 	if (err) {
1528 		netdev_err(dev, "Failed to set proto admin");
1529 		return err;
1530 	}
1531 
1532 	err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
1533 	if (err) {
1534 		netdev_err(dev, "Failed to get oper status");
1535 		return err;
1536 	}
1537 	if (!is_up)
1538 		return 0;
1539 
1540 	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1541 	if (err) {
1542 		netdev_err(dev, "Failed to set admin status");
1543 		return err;
1544 	}
1545 
1546 	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1547 	if (err) {
1548 		netdev_err(dev, "Failed to set admin status");
1549 		return err;
1550 	}
1551 
1552 	return 0;
1553 }
1554 
1555 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1556 	.get_drvinfo		= mlxsw_sp_port_get_drvinfo,
1557 	.get_link		= ethtool_op_get_link,
1558 	.get_pauseparam		= mlxsw_sp_port_get_pauseparam,
1559 	.set_pauseparam		= mlxsw_sp_port_set_pauseparam,
1560 	.get_strings		= mlxsw_sp_port_get_strings,
1561 	.set_phys_id		= mlxsw_sp_port_set_phys_id,
1562 	.get_ethtool_stats	= mlxsw_sp_port_get_stats,
1563 	.get_sset_count		= mlxsw_sp_port_get_sset_count,
1564 	.get_settings		= mlxsw_sp_port_get_settings,
1565 	.set_settings		= mlxsw_sp_port_set_settings,
1566 };
1567 
1568 static int
1569 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
1570 {
1571 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1572 	u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
1573 	char ptys_pl[MLXSW_REG_PTYS_LEN];
1574 	u32 eth_proto_admin;
1575 
1576 	eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
1577 	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port,
1578 			    eth_proto_admin);
1579 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1580 }
1581 
1582 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1583 			  enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1584 			  bool dwrr, u8 dwrr_weight)
1585 {
1586 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1587 	char qeec_pl[MLXSW_REG_QEEC_LEN];
1588 
1589 	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1590 			    next_index);
1591 	mlxsw_reg_qeec_de_set(qeec_pl, true);
1592 	mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1593 	mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1594 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1595 }
1596 
1597 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1598 				  enum mlxsw_reg_qeec_hr hr, u8 index,
1599 				  u8 next_index, u32 maxrate)
1600 {
1601 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1602 	char qeec_pl[MLXSW_REG_QEEC_LEN];
1603 
1604 	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1605 			    next_index);
1606 	mlxsw_reg_qeec_mase_set(qeec_pl, true);
1607 	mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1608 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1609 }
1610 
1611 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1612 			      u8 switch_prio, u8 tclass)
1613 {
1614 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1615 	char qtct_pl[MLXSW_REG_QTCT_LEN];
1616 
1617 	mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1618 			    tclass);
1619 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1620 }
1621 
1622 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1623 {
1624 	int err, i;
1625 
1626 	/* Setup the elements hierarcy, so that each TC is linked to
1627 	 * one subgroup, which are all member in the same group.
1628 	 */
1629 	err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1630 				    MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
1631 				    0);
1632 	if (err)
1633 		return err;
1634 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1635 		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1636 					    MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
1637 					    0, false, 0);
1638 		if (err)
1639 			return err;
1640 	}
1641 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1642 		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1643 					    MLXSW_REG_QEEC_HIERARCY_TC, i, i,
1644 					    false, 0);
1645 		if (err)
1646 			return err;
1647 	}
1648 
1649 	/* Make sure the max shaper is disabled in all hierarcies that
1650 	 * support it.
1651 	 */
1652 	err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1653 					    MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
1654 					    MLXSW_REG_QEEC_MAS_DIS);
1655 	if (err)
1656 		return err;
1657 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1658 		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1659 						    MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
1660 						    i, 0,
1661 						    MLXSW_REG_QEEC_MAS_DIS);
1662 		if (err)
1663 			return err;
1664 	}
1665 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1666 		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1667 						    MLXSW_REG_QEEC_HIERARCY_TC,
1668 						    i, i,
1669 						    MLXSW_REG_QEEC_MAS_DIS);
1670 		if (err)
1671 			return err;
1672 	}
1673 
1674 	/* Map all priorities to traffic class 0. */
1675 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1676 		err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1677 		if (err)
1678 			return err;
1679 	}
1680 
1681 	return 0;
1682 }
1683 
1684 static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1685 				  bool split, u8 module, u8 width)
1686 {
1687 	struct mlxsw_sp_port *mlxsw_sp_port;
1688 	struct net_device *dev;
1689 	size_t bytes;
1690 	int err;
1691 
1692 	dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1693 	if (!dev)
1694 		return -ENOMEM;
1695 	mlxsw_sp_port = netdev_priv(dev);
1696 	mlxsw_sp_port->dev = dev;
1697 	mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1698 	mlxsw_sp_port->local_port = local_port;
1699 	mlxsw_sp_port->split = split;
1700 	bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
1701 	mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
1702 	if (!mlxsw_sp_port->active_vlans) {
1703 		err = -ENOMEM;
1704 		goto err_port_active_vlans_alloc;
1705 	}
1706 	mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
1707 	if (!mlxsw_sp_port->untagged_vlans) {
1708 		err = -ENOMEM;
1709 		goto err_port_untagged_vlans_alloc;
1710 	}
1711 	INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
1712 
1713 	mlxsw_sp_port->pcpu_stats =
1714 		netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1715 	if (!mlxsw_sp_port->pcpu_stats) {
1716 		err = -ENOMEM;
1717 		goto err_alloc_stats;
1718 	}
1719 
1720 	dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1721 	dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1722 
1723 	err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1724 	if (err) {
1725 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1726 			mlxsw_sp_port->local_port);
1727 		goto err_dev_addr_init;
1728 	}
1729 
1730 	netif_carrier_off(dev);
1731 
1732 	dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1733 			 NETIF_F_HW_VLAN_CTAG_FILTER;
1734 
1735 	/* Each packet needs to have a Tx header (metadata) on top all other
1736 	 * headers.
1737 	 */
1738 	dev->hard_header_len += MLXSW_TXHDR_LEN;
1739 
1740 	err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1741 	if (err) {
1742 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1743 			mlxsw_sp_port->local_port);
1744 		goto err_port_system_port_mapping_set;
1745 	}
1746 
1747 	err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1748 	if (err) {
1749 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1750 			mlxsw_sp_port->local_port);
1751 		goto err_port_swid_set;
1752 	}
1753 
1754 	err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
1755 	if (err) {
1756 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1757 			mlxsw_sp_port->local_port);
1758 		goto err_port_speed_by_width_set;
1759 	}
1760 
1761 	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1762 	if (err) {
1763 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1764 			mlxsw_sp_port->local_port);
1765 		goto err_port_mtu_set;
1766 	}
1767 
1768 	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1769 	if (err)
1770 		goto err_port_admin_status_set;
1771 
1772 	err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1773 	if (err) {
1774 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1775 			mlxsw_sp_port->local_port);
1776 		goto err_port_buffers_init;
1777 	}
1778 
1779 	err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1780 	if (err) {
1781 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1782 			mlxsw_sp_port->local_port);
1783 		goto err_port_ets_init;
1784 	}
1785 
1786 	/* ETS and buffers must be initialized before DCB. */
1787 	err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1788 	if (err) {
1789 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1790 			mlxsw_sp_port->local_port);
1791 		goto err_port_dcb_init;
1792 	}
1793 
1794 	mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1795 	err = register_netdev(dev);
1796 	if (err) {
1797 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1798 			mlxsw_sp_port->local_port);
1799 		goto err_register_netdev;
1800 	}
1801 
1802 	err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port,
1803 				   mlxsw_sp_port->local_port, dev,
1804 				   mlxsw_sp_port->split, module);
1805 	if (err) {
1806 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1807 			mlxsw_sp_port->local_port);
1808 		goto err_core_port_init;
1809 	}
1810 
1811 	err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1812 	if (err)
1813 		goto err_port_vlan_init;
1814 
1815 	mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1816 	return 0;
1817 
1818 err_port_vlan_init:
1819 	mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1820 err_core_port_init:
1821 	unregister_netdev(dev);
1822 err_register_netdev:
1823 err_port_dcb_init:
1824 err_port_ets_init:
1825 err_port_buffers_init:
1826 err_port_admin_status_set:
1827 err_port_mtu_set:
1828 err_port_speed_by_width_set:
1829 err_port_swid_set:
1830 err_port_system_port_mapping_set:
1831 err_dev_addr_init:
1832 	free_percpu(mlxsw_sp_port->pcpu_stats);
1833 err_alloc_stats:
1834 	kfree(mlxsw_sp_port->untagged_vlans);
1835 err_port_untagged_vlans_alloc:
1836 	kfree(mlxsw_sp_port->active_vlans);
1837 err_port_active_vlans_alloc:
1838 	free_netdev(dev);
1839 	return err;
1840 }
1841 
1842 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1843 				bool split, u8 module, u8 width, u8 lane)
1844 {
1845 	int err;
1846 
1847 	err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
1848 				       lane);
1849 	if (err)
1850 		return err;
1851 
1852 	err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module,
1853 				     width);
1854 	if (err)
1855 		goto err_port_create;
1856 
1857 	return 0;
1858 
1859 err_port_create:
1860 	mlxsw_sp_port_module_unmap(mlxsw_sp, local_port);
1861 	return err;
1862 }
1863 
1864 static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1865 {
1866 	struct net_device *dev = mlxsw_sp_port->dev;
1867 	struct mlxsw_sp_port *mlxsw_sp_vport, *tmp;
1868 
1869 	list_for_each_entry_safe(mlxsw_sp_vport, tmp,
1870 				 &mlxsw_sp_port->vports_list, vport.list) {
1871 		u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1872 
1873 		/* vPorts created for VLAN devices should already be gone
1874 		 * by now, since we unregistered the port netdev.
1875 		 */
1876 		WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev));
1877 		mlxsw_sp_port_kill_vid(dev, 0, vid);
1878 	}
1879 }
1880 
1881 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1882 {
1883 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1884 
1885 	if (!mlxsw_sp_port)
1886 		return;
1887 	mlxsw_sp->ports[local_port] = NULL;
1888 	mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1889 	unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1890 	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1891 	mlxsw_sp_port_vports_fini(mlxsw_sp_port);
1892 	mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1893 	mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1894 	mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
1895 	free_percpu(mlxsw_sp_port->pcpu_stats);
1896 	kfree(mlxsw_sp_port->untagged_vlans);
1897 	kfree(mlxsw_sp_port->active_vlans);
1898 	free_netdev(mlxsw_sp_port->dev);
1899 }
1900 
1901 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1902 {
1903 	int i;
1904 
1905 	for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1906 		mlxsw_sp_port_remove(mlxsw_sp, i);
1907 	kfree(mlxsw_sp->ports);
1908 }
1909 
1910 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1911 {
1912 	size_t alloc_size;
1913 	u8 module, width;
1914 	int i;
1915 	int err;
1916 
1917 	alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1918 	mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1919 	if (!mlxsw_sp->ports)
1920 		return -ENOMEM;
1921 
1922 	for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1923 		err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
1924 						    &width);
1925 		if (err)
1926 			goto err_port_module_info_get;
1927 		if (!width)
1928 			continue;
1929 		mlxsw_sp->port_to_module[i] = module;
1930 		err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width);
1931 		if (err)
1932 			goto err_port_create;
1933 	}
1934 	return 0;
1935 
1936 err_port_create:
1937 err_port_module_info_get:
1938 	for (i--; i >= 1; i--)
1939 		mlxsw_sp_port_remove(mlxsw_sp, i);
1940 	kfree(mlxsw_sp->ports);
1941 	return err;
1942 }
1943 
1944 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
1945 {
1946 	u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
1947 
1948 	return local_port - offset;
1949 }
1950 
1951 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
1952 			       unsigned int count)
1953 {
1954 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1955 	struct mlxsw_sp_port *mlxsw_sp_port;
1956 	u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
1957 	u8 module, cur_width, base_port;
1958 	int i;
1959 	int err;
1960 
1961 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
1962 	if (!mlxsw_sp_port) {
1963 		dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
1964 			local_port);
1965 		return -EINVAL;
1966 	}
1967 
1968 	if (count != 2 && count != 4) {
1969 		netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
1970 		return -EINVAL;
1971 	}
1972 
1973 	err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
1974 					    &cur_width);
1975 	if (err) {
1976 		netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
1977 		return err;
1978 	}
1979 
1980 	if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
1981 		netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
1982 		return -EINVAL;
1983 	}
1984 
1985 	/* Make sure we have enough slave (even) ports for the split. */
1986 	if (count == 2) {
1987 		base_port = local_port;
1988 		if (mlxsw_sp->ports[base_port + 1]) {
1989 			netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
1990 			return -EINVAL;
1991 		}
1992 	} else {
1993 		base_port = mlxsw_sp_cluster_base_port_get(local_port);
1994 		if (mlxsw_sp->ports[base_port + 1] ||
1995 		    mlxsw_sp->ports[base_port + 3]) {
1996 			netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
1997 			return -EINVAL;
1998 		}
1999 	}
2000 
2001 	for (i = 0; i < count; i++)
2002 		mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2003 
2004 	for (i = 0; i < count; i++) {
2005 		err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
2006 					   module, width, i * width);
2007 		if (err) {
2008 			dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n");
2009 			goto err_port_create;
2010 		}
2011 	}
2012 
2013 	return 0;
2014 
2015 err_port_create:
2016 	for (i--; i >= 0; i--)
2017 		mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2018 	for (i = 0; i < count / 2; i++) {
2019 		module = mlxsw_sp->port_to_module[base_port + i * 2];
2020 		mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
2021 				     module, MLXSW_PORT_MODULE_MAX_WIDTH, 0);
2022 	}
2023 	return err;
2024 }
2025 
2026 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
2027 {
2028 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2029 	struct mlxsw_sp_port *mlxsw_sp_port;
2030 	u8 module, cur_width, base_port;
2031 	unsigned int count;
2032 	int i;
2033 	int err;
2034 
2035 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2036 	if (!mlxsw_sp_port) {
2037 		dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2038 			local_port);
2039 		return -EINVAL;
2040 	}
2041 
2042 	if (!mlxsw_sp_port->split) {
2043 		netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
2044 		return -EINVAL;
2045 	}
2046 
2047 	err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
2048 					    &cur_width);
2049 	if (err) {
2050 		netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
2051 		return err;
2052 	}
2053 	count = cur_width == 1 ? 4 : 2;
2054 
2055 	base_port = mlxsw_sp_cluster_base_port_get(local_port);
2056 
2057 	/* Determine which ports to remove. */
2058 	if (count == 2 && local_port >= base_port + 2)
2059 		base_port = base_port + 2;
2060 
2061 	for (i = 0; i < count; i++)
2062 		mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2063 
2064 	for (i = 0; i < count / 2; i++) {
2065 		module = mlxsw_sp->port_to_module[base_port + i * 2];
2066 		err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
2067 					   module, MLXSW_PORT_MODULE_MAX_WIDTH,
2068 					   0);
2069 		if (err)
2070 			dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n");
2071 	}
2072 
2073 	return 0;
2074 }
2075 
2076 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2077 				     char *pude_pl, void *priv)
2078 {
2079 	struct mlxsw_sp *mlxsw_sp = priv;
2080 	struct mlxsw_sp_port *mlxsw_sp_port;
2081 	enum mlxsw_reg_pude_oper_status status;
2082 	u8 local_port;
2083 
2084 	local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2085 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2086 	if (!mlxsw_sp_port) {
2087 		dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
2088 			 local_port);
2089 		return;
2090 	}
2091 
2092 	status = mlxsw_reg_pude_oper_status_get(pude_pl);
2093 	if (status == MLXSW_PORT_OPER_STATUS_UP) {
2094 		netdev_info(mlxsw_sp_port->dev, "link up\n");
2095 		netif_carrier_on(mlxsw_sp_port->dev);
2096 	} else {
2097 		netdev_info(mlxsw_sp_port->dev, "link down\n");
2098 		netif_carrier_off(mlxsw_sp_port->dev);
2099 	}
2100 }
2101 
2102 static struct mlxsw_event_listener mlxsw_sp_pude_event = {
2103 	.func = mlxsw_sp_pude_event_func,
2104 	.trap_id = MLXSW_TRAP_ID_PUDE,
2105 };
2106 
2107 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
2108 				   enum mlxsw_event_trap_id trap_id)
2109 {
2110 	struct mlxsw_event_listener *el;
2111 	char hpkt_pl[MLXSW_REG_HPKT_LEN];
2112 	int err;
2113 
2114 	switch (trap_id) {
2115 	case MLXSW_TRAP_ID_PUDE:
2116 		el = &mlxsw_sp_pude_event;
2117 		break;
2118 	}
2119 	err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
2120 	if (err)
2121 		return err;
2122 
2123 	mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
2124 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2125 	if (err)
2126 		goto err_event_trap_set;
2127 
2128 	return 0;
2129 
2130 err_event_trap_set:
2131 	mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2132 	return err;
2133 }
2134 
2135 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
2136 				      enum mlxsw_event_trap_id trap_id)
2137 {
2138 	struct mlxsw_event_listener *el;
2139 
2140 	switch (trap_id) {
2141 	case MLXSW_TRAP_ID_PUDE:
2142 		el = &mlxsw_sp_pude_event;
2143 		break;
2144 	}
2145 	mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2146 }
2147 
2148 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
2149 				      void *priv)
2150 {
2151 	struct mlxsw_sp *mlxsw_sp = priv;
2152 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2153 	struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2154 
2155 	if (unlikely(!mlxsw_sp_port)) {
2156 		dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2157 				     local_port);
2158 		return;
2159 	}
2160 
2161 	skb->dev = mlxsw_sp_port->dev;
2162 
2163 	pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2164 	u64_stats_update_begin(&pcpu_stats->syncp);
2165 	pcpu_stats->rx_packets++;
2166 	pcpu_stats->rx_bytes += skb->len;
2167 	u64_stats_update_end(&pcpu_stats->syncp);
2168 
2169 	skb->protocol = eth_type_trans(skb, skb->dev);
2170 	netif_receive_skb(skb);
2171 }
2172 
2173 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
2174 	{
2175 		.func = mlxsw_sp_rx_listener_func,
2176 		.local_port = MLXSW_PORT_DONT_CARE,
2177 		.trap_id = MLXSW_TRAP_ID_FDB_MC,
2178 	},
2179 	/* Traps for specific L2 packet types, not trapped as FDB MC */
2180 	{
2181 		.func = mlxsw_sp_rx_listener_func,
2182 		.local_port = MLXSW_PORT_DONT_CARE,
2183 		.trap_id = MLXSW_TRAP_ID_STP,
2184 	},
2185 	{
2186 		.func = mlxsw_sp_rx_listener_func,
2187 		.local_port = MLXSW_PORT_DONT_CARE,
2188 		.trap_id = MLXSW_TRAP_ID_LACP,
2189 	},
2190 	{
2191 		.func = mlxsw_sp_rx_listener_func,
2192 		.local_port = MLXSW_PORT_DONT_CARE,
2193 		.trap_id = MLXSW_TRAP_ID_EAPOL,
2194 	},
2195 	{
2196 		.func = mlxsw_sp_rx_listener_func,
2197 		.local_port = MLXSW_PORT_DONT_CARE,
2198 		.trap_id = MLXSW_TRAP_ID_LLDP,
2199 	},
2200 	{
2201 		.func = mlxsw_sp_rx_listener_func,
2202 		.local_port = MLXSW_PORT_DONT_CARE,
2203 		.trap_id = MLXSW_TRAP_ID_MMRP,
2204 	},
2205 	{
2206 		.func = mlxsw_sp_rx_listener_func,
2207 		.local_port = MLXSW_PORT_DONT_CARE,
2208 		.trap_id = MLXSW_TRAP_ID_MVRP,
2209 	},
2210 	{
2211 		.func = mlxsw_sp_rx_listener_func,
2212 		.local_port = MLXSW_PORT_DONT_CARE,
2213 		.trap_id = MLXSW_TRAP_ID_RPVST,
2214 	},
2215 	{
2216 		.func = mlxsw_sp_rx_listener_func,
2217 		.local_port = MLXSW_PORT_DONT_CARE,
2218 		.trap_id = MLXSW_TRAP_ID_DHCP,
2219 	},
2220 	{
2221 		.func = mlxsw_sp_rx_listener_func,
2222 		.local_port = MLXSW_PORT_DONT_CARE,
2223 		.trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
2224 	},
2225 	{
2226 		.func = mlxsw_sp_rx_listener_func,
2227 		.local_port = MLXSW_PORT_DONT_CARE,
2228 		.trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
2229 	},
2230 	{
2231 		.func = mlxsw_sp_rx_listener_func,
2232 		.local_port = MLXSW_PORT_DONT_CARE,
2233 		.trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
2234 	},
2235 	{
2236 		.func = mlxsw_sp_rx_listener_func,
2237 		.local_port = MLXSW_PORT_DONT_CARE,
2238 		.trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
2239 	},
2240 	{
2241 		.func = mlxsw_sp_rx_listener_func,
2242 		.local_port = MLXSW_PORT_DONT_CARE,
2243 		.trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
2244 	},
2245 };
2246 
2247 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2248 {
2249 	char htgt_pl[MLXSW_REG_HTGT_LEN];
2250 	char hpkt_pl[MLXSW_REG_HPKT_LEN];
2251 	int i;
2252 	int err;
2253 
2254 	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
2255 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2256 	if (err)
2257 		return err;
2258 
2259 	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
2260 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2261 	if (err)
2262 		return err;
2263 
2264 	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2265 		err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
2266 						      &mlxsw_sp_rx_listener[i],
2267 						      mlxsw_sp);
2268 		if (err)
2269 			goto err_rx_listener_register;
2270 
2271 		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
2272 				    mlxsw_sp_rx_listener[i].trap_id);
2273 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2274 		if (err)
2275 			goto err_rx_trap_set;
2276 	}
2277 	return 0;
2278 
2279 err_rx_trap_set:
2280 	mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2281 					  &mlxsw_sp_rx_listener[i],
2282 					  mlxsw_sp);
2283 err_rx_listener_register:
2284 	for (i--; i >= 0; i--) {
2285 		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2286 				    mlxsw_sp_rx_listener[i].trap_id);
2287 		mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2288 
2289 		mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2290 						  &mlxsw_sp_rx_listener[i],
2291 						  mlxsw_sp);
2292 	}
2293 	return err;
2294 }
2295 
2296 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2297 {
2298 	char hpkt_pl[MLXSW_REG_HPKT_LEN];
2299 	int i;
2300 
2301 	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2302 		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2303 				    mlxsw_sp_rx_listener[i].trap_id);
2304 		mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2305 
2306 		mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2307 						  &mlxsw_sp_rx_listener[i],
2308 						  mlxsw_sp);
2309 	}
2310 }
2311 
2312 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
2313 				 enum mlxsw_reg_sfgc_type type,
2314 				 enum mlxsw_reg_sfgc_bridge_type bridge_type)
2315 {
2316 	enum mlxsw_flood_table_type table_type;
2317 	enum mlxsw_sp_flood_table flood_table;
2318 	char sfgc_pl[MLXSW_REG_SFGC_LEN];
2319 
2320 	if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
2321 		table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
2322 	else
2323 		table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
2324 
2325 	if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
2326 		flood_table = MLXSW_SP_FLOOD_TABLE_UC;
2327 	else
2328 		flood_table = MLXSW_SP_FLOOD_TABLE_BM;
2329 
2330 	mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
2331 			    flood_table);
2332 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
2333 }
2334 
2335 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
2336 {
2337 	int type, err;
2338 
2339 	for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
2340 		if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
2341 			continue;
2342 
2343 		err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2344 					    MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
2345 		if (err)
2346 			return err;
2347 
2348 		err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2349 					    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
2350 		if (err)
2351 			return err;
2352 	}
2353 
2354 	return 0;
2355 }
2356 
2357 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2358 {
2359 	char slcr_pl[MLXSW_REG_SLCR_LEN];
2360 
2361 	mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2362 				     MLXSW_REG_SLCR_LAG_HASH_DMAC |
2363 				     MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2364 				     MLXSW_REG_SLCR_LAG_HASH_VLANID |
2365 				     MLXSW_REG_SLCR_LAG_HASH_SIP |
2366 				     MLXSW_REG_SLCR_LAG_HASH_DIP |
2367 				     MLXSW_REG_SLCR_LAG_HASH_SPORT |
2368 				     MLXSW_REG_SLCR_LAG_HASH_DPORT |
2369 				     MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
2370 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2371 }
2372 
2373 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2374 			 const struct mlxsw_bus_info *mlxsw_bus_info)
2375 {
2376 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2377 	int err;
2378 
2379 	mlxsw_sp->core = mlxsw_core;
2380 	mlxsw_sp->bus_info = mlxsw_bus_info;
2381 	INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list);
2382 	INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
2383 	INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
2384 
2385 	err = mlxsw_sp_base_mac_get(mlxsw_sp);
2386 	if (err) {
2387 		dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2388 		return err;
2389 	}
2390 
2391 	err = mlxsw_sp_ports_create(mlxsw_sp);
2392 	if (err) {
2393 		dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2394 		return err;
2395 	}
2396 
2397 	err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2398 	if (err) {
2399 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
2400 		goto err_event_register;
2401 	}
2402 
2403 	err = mlxsw_sp_traps_init(mlxsw_sp);
2404 	if (err) {
2405 		dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
2406 		goto err_rx_listener_register;
2407 	}
2408 
2409 	err = mlxsw_sp_flood_init(mlxsw_sp);
2410 	if (err) {
2411 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
2412 		goto err_flood_init;
2413 	}
2414 
2415 	err = mlxsw_sp_buffers_init(mlxsw_sp);
2416 	if (err) {
2417 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2418 		goto err_buffers_init;
2419 	}
2420 
2421 	err = mlxsw_sp_lag_init(mlxsw_sp);
2422 	if (err) {
2423 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2424 		goto err_lag_init;
2425 	}
2426 
2427 	err = mlxsw_sp_switchdev_init(mlxsw_sp);
2428 	if (err) {
2429 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2430 		goto err_switchdev_init;
2431 	}
2432 
2433 	return 0;
2434 
2435 err_switchdev_init:
2436 err_lag_init:
2437 	mlxsw_sp_buffers_fini(mlxsw_sp);
2438 err_buffers_init:
2439 err_flood_init:
2440 	mlxsw_sp_traps_fini(mlxsw_sp);
2441 err_rx_listener_register:
2442 	mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2443 err_event_register:
2444 	mlxsw_sp_ports_remove(mlxsw_sp);
2445 	return err;
2446 }
2447 
2448 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2449 {
2450 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2451 
2452 	mlxsw_sp_switchdev_fini(mlxsw_sp);
2453 	mlxsw_sp_buffers_fini(mlxsw_sp);
2454 	mlxsw_sp_traps_fini(mlxsw_sp);
2455 	mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2456 	mlxsw_sp_ports_remove(mlxsw_sp);
2457 }
2458 
2459 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
2460 	.used_max_vepa_channels		= 1,
2461 	.max_vepa_channels		= 0,
2462 	.used_max_lag			= 1,
2463 	.max_lag			= MLXSW_SP_LAG_MAX,
2464 	.used_max_port_per_lag		= 1,
2465 	.max_port_per_lag		= MLXSW_SP_PORT_PER_LAG_MAX,
2466 	.used_max_mid			= 1,
2467 	.max_mid			= MLXSW_SP_MID_MAX,
2468 	.used_max_pgt			= 1,
2469 	.max_pgt			= 0,
2470 	.used_max_system_port		= 1,
2471 	.max_system_port		= 64,
2472 	.used_max_vlan_groups		= 1,
2473 	.max_vlan_groups		= 127,
2474 	.used_max_regions		= 1,
2475 	.max_regions			= 400,
2476 	.used_flood_tables		= 1,
2477 	.used_flood_mode		= 1,
2478 	.flood_mode			= 3,
2479 	.max_fid_offset_flood_tables	= 2,
2480 	.fid_offset_flood_table_size	= VLAN_N_VID - 1,
2481 	.max_fid_flood_tables		= 2,
2482 	.fid_flood_table_size		= MLXSW_SP_VFID_MAX,
2483 	.used_max_ib_mc			= 1,
2484 	.max_ib_mc			= 0,
2485 	.used_max_pkey			= 1,
2486 	.max_pkey			= 0,
2487 	.swid_config			= {
2488 		{
2489 			.used_type	= 1,
2490 			.type		= MLXSW_PORT_SWID_TYPE_ETH,
2491 		}
2492 	},
2493 };
2494 
2495 static struct mlxsw_driver mlxsw_sp_driver = {
2496 	.kind				= MLXSW_DEVICE_KIND_SPECTRUM,
2497 	.owner				= THIS_MODULE,
2498 	.priv_size			= sizeof(struct mlxsw_sp),
2499 	.init				= mlxsw_sp_init,
2500 	.fini				= mlxsw_sp_fini,
2501 	.port_split			= mlxsw_sp_port_split,
2502 	.port_unsplit			= mlxsw_sp_port_unsplit,
2503 	.sb_pool_get			= mlxsw_sp_sb_pool_get,
2504 	.sb_pool_set			= mlxsw_sp_sb_pool_set,
2505 	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
2506 	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
2507 	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
2508 	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
2509 	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
2510 	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
2511 	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
2512 	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
2513 	.txhdr_construct		= mlxsw_sp_txhdr_construct,
2514 	.txhdr_len			= MLXSW_TXHDR_LEN,
2515 	.profile			= &mlxsw_sp_config_profile,
2516 };
2517 
2518 static int
2519 mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port)
2520 {
2521 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2522 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
2523 
2524 	mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT);
2525 	mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
2526 
2527 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2528 }
2529 
2530 static int
2531 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2532 				    u16 fid)
2533 {
2534 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2535 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
2536 
2537 	mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
2538 	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2539 	mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
2540 						mlxsw_sp_port->local_port);
2541 
2542 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2543 }
2544 
2545 static int
2546 mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port)
2547 {
2548 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2549 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
2550 
2551 	mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG);
2552 	mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2553 
2554 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2555 }
2556 
2557 static int
2558 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2559 				      u16 fid)
2560 {
2561 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2562 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
2563 
2564 	mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
2565 	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2566 	mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2567 
2568 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2569 }
2570 
2571 static int
2572 __mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port)
2573 {
2574 	int err, last_err = 0;
2575 	u16 vid;
2576 
2577 	for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2578 		err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid);
2579 		if (err)
2580 			last_err = err;
2581 	}
2582 
2583 	return last_err;
2584 }
2585 
2586 static int
2587 __mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port)
2588 {
2589 	int err, last_err = 0;
2590 	u16 vid;
2591 
2592 	for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2593 		err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid);
2594 		if (err)
2595 			last_err = err;
2596 	}
2597 
2598 	return last_err;
2599 }
2600 
2601 static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port)
2602 {
2603 	if (!list_empty(&mlxsw_sp_port->vports_list))
2604 		if (mlxsw_sp_port->lagged)
2605 			return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port);
2606 		else
2607 			return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port);
2608 	else
2609 		if (mlxsw_sp_port->lagged)
2610 			return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port);
2611 		else
2612 			return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port);
2613 }
2614 
2615 static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport)
2616 {
2617 	u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport);
2618 	u16 fid = mlxsw_sp_vfid_to_fid(vfid);
2619 
2620 	if (mlxsw_sp_vport->lagged)
2621 		return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport,
2622 							     fid);
2623 	else
2624 		return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid);
2625 }
2626 
2627 static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2628 {
2629 	return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2630 }
2631 
2632 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
2633 {
2634 	struct net_device *dev = mlxsw_sp_port->dev;
2635 	int err;
2636 
2637 	/* When port is not bridged untagged packets are tagged with
2638 	 * PVID=VID=1, thereby creating an implicit VLAN interface in
2639 	 * the device. Remove it and let bridge code take care of its
2640 	 * own VLANs.
2641 	 */
2642 	err = mlxsw_sp_port_kill_vid(dev, 0, 1);
2643 	if (err)
2644 		return err;
2645 
2646 	mlxsw_sp_port->learning = 1;
2647 	mlxsw_sp_port->learning_sync = 1;
2648 	mlxsw_sp_port->uc_flood = 1;
2649 	mlxsw_sp_port->bridged = 1;
2650 
2651 	return 0;
2652 }
2653 
2654 static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2655 				      bool flush_fdb)
2656 {
2657 	struct net_device *dev = mlxsw_sp_port->dev;
2658 
2659 	if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
2660 		netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2661 
2662 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2663 
2664 	mlxsw_sp_port->learning = 0;
2665 	mlxsw_sp_port->learning_sync = 0;
2666 	mlxsw_sp_port->uc_flood = 0;
2667 	mlxsw_sp_port->bridged = 0;
2668 
2669 	/* Add implicit VLAN interface in the device, so that untagged
2670 	 * packets will be classified to the default vFID.
2671 	 */
2672 	return mlxsw_sp_port_add_vid(dev, 0, 1);
2673 }
2674 
2675 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
2676 					 struct net_device *br_dev)
2677 {
2678 	return !mlxsw_sp->master_bridge.dev ||
2679 	       mlxsw_sp->master_bridge.dev == br_dev;
2680 }
2681 
2682 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
2683 				       struct net_device *br_dev)
2684 {
2685 	mlxsw_sp->master_bridge.dev = br_dev;
2686 	mlxsw_sp->master_bridge.ref_count++;
2687 }
2688 
2689 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
2690 				       struct net_device *br_dev)
2691 {
2692 	if (--mlxsw_sp->master_bridge.ref_count == 0)
2693 		mlxsw_sp->master_bridge.dev = NULL;
2694 }
2695 
2696 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2697 {
2698 	char sldr_pl[MLXSW_REG_SLDR_LEN];
2699 
2700 	mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
2701 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2702 }
2703 
2704 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2705 {
2706 	char sldr_pl[MLXSW_REG_SLDR_LEN];
2707 
2708 	mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
2709 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2710 }
2711 
2712 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2713 				     u16 lag_id, u8 port_index)
2714 {
2715 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2716 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
2717 
2718 	mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
2719 				      lag_id, port_index);
2720 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2721 }
2722 
2723 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2724 					u16 lag_id)
2725 {
2726 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2727 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
2728 
2729 	mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
2730 					 lag_id);
2731 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2732 }
2733 
2734 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
2735 					u16 lag_id)
2736 {
2737 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2738 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
2739 
2740 	mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
2741 					lag_id);
2742 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2743 }
2744 
2745 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
2746 					 u16 lag_id)
2747 {
2748 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2749 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
2750 
2751 	mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
2752 					 lag_id);
2753 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2754 }
2755 
2756 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2757 				  struct net_device *lag_dev,
2758 				  u16 *p_lag_id)
2759 {
2760 	struct mlxsw_sp_upper *lag;
2761 	int free_lag_id = -1;
2762 	int i;
2763 
2764 	for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
2765 		lag = mlxsw_sp_lag_get(mlxsw_sp, i);
2766 		if (lag->ref_count) {
2767 			if (lag->dev == lag_dev) {
2768 				*p_lag_id = i;
2769 				return 0;
2770 			}
2771 		} else if (free_lag_id < 0) {
2772 			free_lag_id = i;
2773 		}
2774 	}
2775 	if (free_lag_id < 0)
2776 		return -EBUSY;
2777 	*p_lag_id = free_lag_id;
2778 	return 0;
2779 }
2780 
2781 static bool
2782 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
2783 			  struct net_device *lag_dev,
2784 			  struct netdev_lag_upper_info *lag_upper_info)
2785 {
2786 	u16 lag_id;
2787 
2788 	if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
2789 		return false;
2790 	if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2791 		return false;
2792 	return true;
2793 }
2794 
2795 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2796 				       u16 lag_id, u8 *p_port_index)
2797 {
2798 	int i;
2799 
2800 	for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
2801 		if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
2802 			*p_port_index = i;
2803 			return 0;
2804 		}
2805 	}
2806 	return -EBUSY;
2807 }
2808 
2809 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
2810 				  struct net_device *lag_dev)
2811 {
2812 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2813 	struct mlxsw_sp_upper *lag;
2814 	u16 lag_id;
2815 	u8 port_index;
2816 	int err;
2817 
2818 	err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
2819 	if (err)
2820 		return err;
2821 	lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2822 	if (!lag->ref_count) {
2823 		err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
2824 		if (err)
2825 			return err;
2826 		lag->dev = lag_dev;
2827 	}
2828 
2829 	err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
2830 	if (err)
2831 		return err;
2832 	err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
2833 	if (err)
2834 		goto err_col_port_add;
2835 	err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
2836 	if (err)
2837 		goto err_col_port_enable;
2838 
2839 	mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
2840 				   mlxsw_sp_port->local_port);
2841 	mlxsw_sp_port->lag_id = lag_id;
2842 	mlxsw_sp_port->lagged = 1;
2843 	lag->ref_count++;
2844 	return 0;
2845 
2846 err_col_port_enable:
2847 	mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2848 err_col_port_add:
2849 	if (!lag->ref_count)
2850 		mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2851 	return err;
2852 }
2853 
2854 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2855 				       struct net_device *br_dev,
2856 				       bool flush_fdb);
2857 
2858 static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2859 				   struct net_device *lag_dev)
2860 {
2861 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2862 	struct mlxsw_sp_port *mlxsw_sp_vport;
2863 	struct mlxsw_sp_upper *lag;
2864 	u16 lag_id = mlxsw_sp_port->lag_id;
2865 	int err;
2866 
2867 	if (!mlxsw_sp_port->lagged)
2868 		return 0;
2869 	lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2870 	WARN_ON(lag->ref_count == 0);
2871 
2872 	err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
2873 	if (err)
2874 		return err;
2875 	err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2876 	if (err)
2877 		return err;
2878 
2879 	/* In case we leave a LAG device that has bridges built on top,
2880 	 * then their teardown sequence is never issued and we need to
2881 	 * invoke the necessary cleanup routines ourselves.
2882 	 */
2883 	list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
2884 			    vport.list) {
2885 		struct net_device *br_dev;
2886 
2887 		if (!mlxsw_sp_vport->bridged)
2888 			continue;
2889 
2890 		br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
2891 		mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false);
2892 	}
2893 
2894 	if (mlxsw_sp_port->bridged) {
2895 		mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
2896 		mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
2897 		mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
2898 	}
2899 
2900 	if (lag->ref_count == 1) {
2901 		if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port))
2902 			netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2903 		err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2904 		if (err)
2905 			return err;
2906 	}
2907 
2908 	mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
2909 				     mlxsw_sp_port->local_port);
2910 	mlxsw_sp_port->lagged = 0;
2911 	lag->ref_count--;
2912 	return 0;
2913 }
2914 
2915 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2916 				      u16 lag_id)
2917 {
2918 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2919 	char sldr_pl[MLXSW_REG_SLDR_LEN];
2920 
2921 	mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
2922 					 mlxsw_sp_port->local_port);
2923 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2924 }
2925 
2926 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2927 					 u16 lag_id)
2928 {
2929 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2930 	char sldr_pl[MLXSW_REG_SLDR_LEN];
2931 
2932 	mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
2933 					    mlxsw_sp_port->local_port);
2934 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2935 }
2936 
2937 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
2938 				       bool lag_tx_enabled)
2939 {
2940 	if (lag_tx_enabled)
2941 		return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
2942 						  mlxsw_sp_port->lag_id);
2943 	else
2944 		return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
2945 						     mlxsw_sp_port->lag_id);
2946 }
2947 
2948 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
2949 				     struct netdev_lag_lower_state_info *info)
2950 {
2951 	return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
2952 }
2953 
2954 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
2955 				   struct net_device *vlan_dev)
2956 {
2957 	struct mlxsw_sp_port *mlxsw_sp_vport;
2958 	u16 vid = vlan_dev_vlan_id(vlan_dev);
2959 
2960 	mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2961 	if (!mlxsw_sp_vport) {
2962 		WARN_ON(!mlxsw_sp_vport);
2963 		return -EINVAL;
2964 	}
2965 
2966 	mlxsw_sp_vport->dev = vlan_dev;
2967 
2968 	return 0;
2969 }
2970 
2971 static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
2972 				     struct net_device *vlan_dev)
2973 {
2974 	struct mlxsw_sp_port *mlxsw_sp_vport;
2975 	u16 vid = vlan_dev_vlan_id(vlan_dev);
2976 
2977 	mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2978 	if (!mlxsw_sp_vport) {
2979 		WARN_ON(!mlxsw_sp_vport);
2980 		return -EINVAL;
2981 	}
2982 
2983 	/* When removing a VLAN device while still bridged we should first
2984 	 * remove it from the bridge, as we receive the bridge's notification
2985 	 * when the vPort is already gone.
2986 	 */
2987 	if (mlxsw_sp_vport->bridged) {
2988 		struct net_device *br_dev;
2989 
2990 		br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
2991 		mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true);
2992 	}
2993 
2994 	mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
2995 
2996 	return 0;
2997 }
2998 
2999 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
3000 					       unsigned long event, void *ptr)
3001 {
3002 	struct netdev_notifier_changeupper_info *info;
3003 	struct mlxsw_sp_port *mlxsw_sp_port;
3004 	struct net_device *upper_dev;
3005 	struct mlxsw_sp *mlxsw_sp;
3006 	int err;
3007 
3008 	mlxsw_sp_port = netdev_priv(dev);
3009 	mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3010 	info = ptr;
3011 
3012 	switch (event) {
3013 	case NETDEV_PRECHANGEUPPER:
3014 		upper_dev = info->upper_dev;
3015 		if (!info->master || !info->linking)
3016 			break;
3017 		/* HW limitation forbids to put ports to multiple bridges. */
3018 		if (netif_is_bridge_master(upper_dev) &&
3019 		    !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
3020 			return NOTIFY_BAD;
3021 		if (netif_is_lag_master(upper_dev) &&
3022 		    !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
3023 					       info->upper_info))
3024 			return NOTIFY_BAD;
3025 		break;
3026 	case NETDEV_CHANGEUPPER:
3027 		upper_dev = info->upper_dev;
3028 		if (is_vlan_dev(upper_dev)) {
3029 			if (info->linking) {
3030 				err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
3031 							      upper_dev);
3032 				if (err) {
3033 					netdev_err(dev, "Failed to link VLAN device\n");
3034 					return NOTIFY_BAD;
3035 				}
3036 			} else {
3037 				err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
3038 								upper_dev);
3039 				if (err) {
3040 					netdev_err(dev, "Failed to unlink VLAN device\n");
3041 					return NOTIFY_BAD;
3042 				}
3043 			}
3044 		} else if (netif_is_bridge_master(upper_dev)) {
3045 			if (info->linking) {
3046 				err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
3047 				if (err) {
3048 					netdev_err(dev, "Failed to join bridge\n");
3049 					return NOTIFY_BAD;
3050 				}
3051 				mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
3052 			} else {
3053 				err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
3054 								 true);
3055 				mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
3056 				if (err) {
3057 					netdev_err(dev, "Failed to leave bridge\n");
3058 					return NOTIFY_BAD;
3059 				}
3060 			}
3061 		} else if (netif_is_lag_master(upper_dev)) {
3062 			if (info->linking) {
3063 				err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
3064 							     upper_dev);
3065 				if (err) {
3066 					netdev_err(dev, "Failed to join link aggregation\n");
3067 					return NOTIFY_BAD;
3068 				}
3069 			} else {
3070 				err = mlxsw_sp_port_lag_leave(mlxsw_sp_port,
3071 							      upper_dev);
3072 				if (err) {
3073 					netdev_err(dev, "Failed to leave link aggregation\n");
3074 					return NOTIFY_BAD;
3075 				}
3076 			}
3077 		}
3078 		break;
3079 	}
3080 
3081 	return NOTIFY_DONE;
3082 }
3083 
3084 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
3085 					       unsigned long event, void *ptr)
3086 {
3087 	struct netdev_notifier_changelowerstate_info *info;
3088 	struct mlxsw_sp_port *mlxsw_sp_port;
3089 	int err;
3090 
3091 	mlxsw_sp_port = netdev_priv(dev);
3092 	info = ptr;
3093 
3094 	switch (event) {
3095 	case NETDEV_CHANGELOWERSTATE:
3096 		if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
3097 			err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
3098 							info->lower_state_info);
3099 			if (err)
3100 				netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
3101 		}
3102 		break;
3103 	}
3104 
3105 	return NOTIFY_DONE;
3106 }
3107 
3108 static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
3109 					 unsigned long event, void *ptr)
3110 {
3111 	switch (event) {
3112 	case NETDEV_PRECHANGEUPPER:
3113 	case NETDEV_CHANGEUPPER:
3114 		return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
3115 	case NETDEV_CHANGELOWERSTATE:
3116 		return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
3117 	}
3118 
3119 	return NOTIFY_DONE;
3120 }
3121 
3122 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
3123 					unsigned long event, void *ptr)
3124 {
3125 	struct net_device *dev;
3126 	struct list_head *iter;
3127 	int ret;
3128 
3129 	netdev_for_each_lower_dev(lag_dev, dev, iter) {
3130 		if (mlxsw_sp_port_dev_check(dev)) {
3131 			ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3132 			if (ret == NOTIFY_BAD)
3133 				return ret;
3134 		}
3135 	}
3136 
3137 	return NOTIFY_DONE;
3138 }
3139 
3140 static struct mlxsw_sp_vfid *
3141 mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp,
3142 		      const struct net_device *br_dev)
3143 {
3144 	struct mlxsw_sp_vfid *vfid;
3145 
3146 	list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) {
3147 		if (vfid->br_dev == br_dev)
3148 			return vfid;
3149 	}
3150 
3151 	return NULL;
3152 }
3153 
3154 static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid)
3155 {
3156 	return vfid - MLXSW_SP_VFID_PORT_MAX;
3157 }
3158 
3159 static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid)
3160 {
3161 	return MLXSW_SP_VFID_PORT_MAX + br_vfid;
3162 }
3163 
3164 static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp)
3165 {
3166 	return find_first_zero_bit(mlxsw_sp->br_vfids.mapped,
3167 				   MLXSW_SP_VFID_BR_MAX);
3168 }
3169 
3170 static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
3171 						     struct net_device *br_dev)
3172 {
3173 	struct device *dev = mlxsw_sp->bus_info->dev;
3174 	struct mlxsw_sp_vfid *vfid;
3175 	u16 n_vfid;
3176 	int err;
3177 
3178 	n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp));
3179 	if (n_vfid == MLXSW_SP_VFID_MAX) {
3180 		dev_err(dev, "No available vFIDs\n");
3181 		return ERR_PTR(-ERANGE);
3182 	}
3183 
3184 	err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
3185 	if (err) {
3186 		dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
3187 		return ERR_PTR(err);
3188 	}
3189 
3190 	vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
3191 	if (!vfid)
3192 		goto err_allocate_vfid;
3193 
3194 	vfid->vfid = n_vfid;
3195 	vfid->br_dev = br_dev;
3196 
3197 	list_add(&vfid->list, &mlxsw_sp->br_vfids.list);
3198 	set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped);
3199 
3200 	return vfid;
3201 
3202 err_allocate_vfid:
3203 	__mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
3204 	return ERR_PTR(-ENOMEM);
3205 }
3206 
3207 static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
3208 				     struct mlxsw_sp_vfid *vfid)
3209 {
3210 	u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid);
3211 
3212 	clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped);
3213 	list_del(&vfid->list);
3214 
3215 	__mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
3216 
3217 	kfree(vfid);
3218 }
3219 
3220 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
3221 				       struct net_device *br_dev,
3222 				       bool flush_fdb)
3223 {
3224 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3225 	u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3226 	struct net_device *dev = mlxsw_sp_vport->dev;
3227 	struct mlxsw_sp_vfid *vfid, *new_vfid;
3228 	int err;
3229 
3230 	vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
3231 	if (!vfid) {
3232 		WARN_ON(!vfid);
3233 		return -EINVAL;
3234 	}
3235 
3236 	/* We need a vFID to go back to after leaving the bridge's vFID. */
3237 	new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
3238 	if (!new_vfid) {
3239 		new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
3240 		if (IS_ERR(new_vfid)) {
3241 			netdev_err(dev, "Failed to create vFID for VID=%d\n",
3242 				   vid);
3243 			return PTR_ERR(new_vfid);
3244 		}
3245 	}
3246 
3247 	/* Invalidate existing {Port, VID} to vFID mapping and create a new
3248 	 * one for the new vFID.
3249 	 */
3250 	err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3251 					   MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3252 					   false,
3253 					   mlxsw_sp_vfid_to_fid(vfid->vfid),
3254 					   vid);
3255 	if (err) {
3256 		netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3257 			   vfid->vfid);
3258 		goto err_port_vid_to_fid_invalidate;
3259 	}
3260 
3261 	err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3262 					   MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3263 					   true,
3264 					   mlxsw_sp_vfid_to_fid(new_vfid->vfid),
3265 					   vid);
3266 	if (err) {
3267 		netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
3268 			   new_vfid->vfid);
3269 		goto err_port_vid_to_fid_validate;
3270 	}
3271 
3272 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3273 	if (err) {
3274 		netdev_err(dev, "Failed to disable learning\n");
3275 		goto err_port_vid_learning_set;
3276 	}
3277 
3278 	err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
3279 				       false);
3280 	if (err) {
3281 		netdev_err(dev, "Failed clear to clear flooding\n");
3282 		goto err_vport_flood_set;
3283 	}
3284 
3285 	err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
3286 					  MLXSW_REG_SPMS_STATE_FORWARDING);
3287 	if (err) {
3288 		netdev_err(dev, "Failed to set STP state\n");
3289 		goto err_port_stp_state_set;
3290 	}
3291 
3292 	if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
3293 		netdev_err(dev, "Failed to flush FDB\n");
3294 
3295 	/* Switch between the vFIDs and destroy the old one if needed. */
3296 	new_vfid->nr_vports++;
3297 	mlxsw_sp_vport->vport.vfid = new_vfid;
3298 	vfid->nr_vports--;
3299 	if (!vfid->nr_vports)
3300 		mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
3301 
3302 	mlxsw_sp_vport->learning = 0;
3303 	mlxsw_sp_vport->learning_sync = 0;
3304 	mlxsw_sp_vport->uc_flood = 0;
3305 	mlxsw_sp_vport->bridged = 0;
3306 
3307 	return 0;
3308 
3309 err_port_stp_state_set:
3310 err_vport_flood_set:
3311 err_port_vid_learning_set:
3312 err_port_vid_to_fid_validate:
3313 err_port_vid_to_fid_invalidate:
3314 	/* Rollback vFID only if new. */
3315 	if (!new_vfid->nr_vports)
3316 		mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid);
3317 	return err;
3318 }
3319 
3320 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3321 				      struct net_device *br_dev)
3322 {
3323 	struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid;
3324 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3325 	u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3326 	struct net_device *dev = mlxsw_sp_vport->dev;
3327 	struct mlxsw_sp_vfid *vfid;
3328 	int err;
3329 
3330 	vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
3331 	if (!vfid) {
3332 		vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev);
3333 		if (IS_ERR(vfid)) {
3334 			netdev_err(dev, "Failed to create bridge vFID\n");
3335 			return PTR_ERR(vfid);
3336 		}
3337 	}
3338 
3339 	err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false);
3340 	if (err) {
3341 		netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
3342 			   vfid->vfid);
3343 		goto err_port_flood_set;
3344 	}
3345 
3346 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
3347 	if (err) {
3348 		netdev_err(dev, "Failed to enable learning\n");
3349 		goto err_port_vid_learning_set;
3350 	}
3351 
3352 	/* We need to invalidate existing {Port, VID} to vFID mapping and
3353 	 * create a new one for the bridge's vFID.
3354 	 */
3355 	err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3356 					   MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3357 					   false,
3358 					   mlxsw_sp_vfid_to_fid(old_vfid->vfid),
3359 					   vid);
3360 	if (err) {
3361 		netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3362 			   old_vfid->vfid);
3363 		goto err_port_vid_to_fid_invalidate;
3364 	}
3365 
3366 	err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3367 					   MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3368 					   true,
3369 					   mlxsw_sp_vfid_to_fid(vfid->vfid),
3370 					   vid);
3371 	if (err) {
3372 		netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
3373 			   vfid->vfid);
3374 		goto err_port_vid_to_fid_validate;
3375 	}
3376 
3377 	/* Switch between the vFIDs and destroy the old one if needed. */
3378 	vfid->nr_vports++;
3379 	mlxsw_sp_vport->vport.vfid = vfid;
3380 	old_vfid->nr_vports--;
3381 	if (!old_vfid->nr_vports)
3382 		mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid);
3383 
3384 	mlxsw_sp_vport->learning = 1;
3385 	mlxsw_sp_vport->learning_sync = 1;
3386 	mlxsw_sp_vport->uc_flood = 1;
3387 	mlxsw_sp_vport->bridged = 1;
3388 
3389 	return 0;
3390 
3391 err_port_vid_to_fid_validate:
3392 	mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3393 				     MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
3394 				     mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid);
3395 err_port_vid_to_fid_invalidate:
3396 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3397 err_port_vid_learning_set:
3398 	mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false);
3399 err_port_flood_set:
3400 	if (!vfid->nr_vports)
3401 		mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
3402 	return err;
3403 }
3404 
3405 static bool
3406 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
3407 				  const struct net_device *br_dev)
3408 {
3409 	struct mlxsw_sp_port *mlxsw_sp_vport;
3410 
3411 	list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
3412 			    vport.list) {
3413 		if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev)
3414 			return false;
3415 	}
3416 
3417 	return true;
3418 }
3419 
3420 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
3421 					  unsigned long event, void *ptr,
3422 					  u16 vid)
3423 {
3424 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
3425 	struct netdev_notifier_changeupper_info *info = ptr;
3426 	struct mlxsw_sp_port *mlxsw_sp_vport;
3427 	struct net_device *upper_dev;
3428 	int err;
3429 
3430 	mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3431 
3432 	switch (event) {
3433 	case NETDEV_PRECHANGEUPPER:
3434 		upper_dev = info->upper_dev;
3435 		if (!info->master || !info->linking)
3436 			break;
3437 		if (!netif_is_bridge_master(upper_dev))
3438 			return NOTIFY_BAD;
3439 		/* We can't have multiple VLAN interfaces configured on
3440 		 * the same port and being members in the same bridge.
3441 		 */
3442 		if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
3443 						       upper_dev))
3444 			return NOTIFY_BAD;
3445 		break;
3446 	case NETDEV_CHANGEUPPER:
3447 		upper_dev = info->upper_dev;
3448 		if (!info->master)
3449 			break;
3450 		if (info->linking) {
3451 			if (!mlxsw_sp_vport) {
3452 				WARN_ON(!mlxsw_sp_vport);
3453 				return NOTIFY_BAD;
3454 			}
3455 			err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
3456 							 upper_dev);
3457 			if (err) {
3458 				netdev_err(dev, "Failed to join bridge\n");
3459 				return NOTIFY_BAD;
3460 			}
3461 		} else {
3462 			/* We ignore bridge's unlinking notifications if vPort
3463 			 * is gone, since we already left the bridge when the
3464 			 * VLAN device was unlinked from the real device.
3465 			 */
3466 			if (!mlxsw_sp_vport)
3467 				return NOTIFY_DONE;
3468 			err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport,
3469 							  upper_dev, true);
3470 			if (err) {
3471 				netdev_err(dev, "Failed to leave bridge\n");
3472 				return NOTIFY_BAD;
3473 			}
3474 		}
3475 	}
3476 
3477 	return NOTIFY_DONE;
3478 }
3479 
3480 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
3481 					      unsigned long event, void *ptr,
3482 					      u16 vid)
3483 {
3484 	struct net_device *dev;
3485 	struct list_head *iter;
3486 	int ret;
3487 
3488 	netdev_for_each_lower_dev(lag_dev, dev, iter) {
3489 		if (mlxsw_sp_port_dev_check(dev)) {
3490 			ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
3491 							     vid);
3492 			if (ret == NOTIFY_BAD)
3493 				return ret;
3494 		}
3495 	}
3496 
3497 	return NOTIFY_DONE;
3498 }
3499 
3500 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
3501 					 unsigned long event, void *ptr)
3502 {
3503 	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3504 	u16 vid = vlan_dev_vlan_id(vlan_dev);
3505 
3506 	if (mlxsw_sp_port_dev_check(real_dev))
3507 		return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
3508 						      vid);
3509 	else if (netif_is_lag_master(real_dev))
3510 		return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
3511 							  vid);
3512 
3513 	return NOTIFY_DONE;
3514 }
3515 
3516 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3517 				    unsigned long event, void *ptr)
3518 {
3519 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3520 
3521 	if (mlxsw_sp_port_dev_check(dev))
3522 		return mlxsw_sp_netdevice_port_event(dev, event, ptr);
3523 
3524 	if (netif_is_lag_master(dev))
3525 		return mlxsw_sp_netdevice_lag_event(dev, event, ptr);
3526 
3527 	if (is_vlan_dev(dev))
3528 		return mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
3529 
3530 	return NOTIFY_DONE;
3531 }
3532 
3533 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
3534 	.notifier_call = mlxsw_sp_netdevice_event,
3535 };
3536 
3537 static int __init mlxsw_sp_module_init(void)
3538 {
3539 	int err;
3540 
3541 	register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3542 	err = mlxsw_core_driver_register(&mlxsw_sp_driver);
3543 	if (err)
3544 		goto err_core_driver_register;
3545 	return 0;
3546 
3547 err_core_driver_register:
3548 	unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3549 	return err;
3550 }
3551 
3552 static void __exit mlxsw_sp_module_exit(void)
3553 {
3554 	mlxsw_core_driver_unregister(&mlxsw_sp_driver);
3555 	unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3556 }
3557 
3558 module_init(mlxsw_sp_module_init);
3559 module_exit(mlxsw_sp_module_exit);
3560 
3561 MODULE_LICENSE("Dual BSD/GPL");
3562 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
3563 MODULE_DESCRIPTION("Mellanox Spectrum driver");
3564 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);
3565