1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3  * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/pci.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/ethtool.h>
44 #include <linux/slab.h>
45 #include <linux/device.h>
46 #include <linux/skbuff.h>
47 #include <linux/if_vlan.h>
48 #include <linux/if_bridge.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/bitops.h>
52 #include <linux/list.h>
53 #include <linux/notifier.h>
54 #include <linux/dcbnl.h>
55 #include <linux/inetdevice.h>
56 #include <net/switchdev.h>
57 #include <net/pkt_cls.h>
58 #include <net/tc_act/tc_mirred.h>
59 #include <net/netevent.h>
60 #include <net/tc_act/tc_sample.h>
61 #include <net/addrconf.h>
62 
63 #include "spectrum.h"
64 #include "pci.h"
65 #include "core.h"
66 #include "reg.h"
67 #include "port.h"
68 #include "trap.h"
69 #include "txheader.h"
70 #include "spectrum_cnt.h"
71 #include "spectrum_dpipe.h"
72 #include "../mlxfw/mlxfw.h"
73 
74 #define MLXSW_FWREV_MAJOR 13
75 #define MLXSW_FWREV_MINOR 1420
76 #define MLXSW_FWREV_SUBMINOR 122
77 
78 static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = {
79 	.major = MLXSW_FWREV_MAJOR,
80 	.minor = MLXSW_FWREV_MINOR,
81 	.subminor = MLXSW_FWREV_SUBMINOR
82 };
83 
84 #define MLXSW_SP_FW_FILENAME \
85 	"mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
86 	"." __stringify(MLXSW_FWREV_MINOR) \
87 	"." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2"
88 
89 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
90 static const char mlxsw_sp_driver_version[] = "1.0";
91 
92 /* tx_hdr_version
93  * Tx header version.
94  * Must be set to 1.
95  */
96 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
97 
98 /* tx_hdr_ctl
99  * Packet control type.
100  * 0 - Ethernet control (e.g. EMADs, LACP)
101  * 1 - Ethernet data
102  */
103 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
104 
105 /* tx_hdr_proto
106  * Packet protocol type. Must be set to 1 (Ethernet).
107  */
108 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
109 
110 /* tx_hdr_rx_is_router
111  * Packet is sent from the router. Valid for data packets only.
112  */
113 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
114 
115 /* tx_hdr_fid_valid
116  * Indicates if the 'fid' field is valid and should be used for
117  * forwarding lookup. Valid for data packets only.
118  */
119 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
120 
121 /* tx_hdr_swid
122  * Switch partition ID. Must be set to 0.
123  */
124 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
125 
126 /* tx_hdr_control_tclass
127  * Indicates if the packet should use the control TClass and not one
128  * of the data TClasses.
129  */
130 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
131 
132 /* tx_hdr_etclass
133  * Egress TClass to be used on the egress device on the egress port.
134  */
135 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
136 
137 /* tx_hdr_port_mid
138  * Destination local port for unicast packets.
139  * Destination multicast ID for multicast packets.
140  *
141  * Control packets are directed to a specific egress port, while data
142  * packets are transmitted through the CPU port (0) into the switch partition,
143  * where forwarding rules are applied.
144  */
145 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
146 
147 /* tx_hdr_fid
148  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
149  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
150  * Valid for data packets only.
151  */
152 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
153 
154 /* tx_hdr_type
155  * 0 - Data packets
156  * 6 - Control packets
157  */
158 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
159 
160 struct mlxsw_sp_mlxfw_dev {
161 	struct mlxfw_dev mlxfw_dev;
162 	struct mlxsw_sp *mlxsw_sp;
163 };
164 
165 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
166 				    u16 component_index, u32 *p_max_size,
167 				    u8 *p_align_bits, u16 *p_max_write_size)
168 {
169 	struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
170 		container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
171 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
172 	char mcqi_pl[MLXSW_REG_MCQI_LEN];
173 	int err;
174 
175 	mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
176 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl);
177 	if (err)
178 		return err;
179 	mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
180 			      p_max_write_size);
181 
182 	*p_align_bits = max_t(u8, *p_align_bits, 2);
183 	*p_max_write_size = min_t(u16, *p_max_write_size,
184 				  MLXSW_REG_MCDA_MAX_DATA_LEN);
185 	return 0;
186 }
187 
188 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
189 {
190 	struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
191 		container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
192 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
193 	char mcc_pl[MLXSW_REG_MCC_LEN];
194 	u8 control_state;
195 	int err;
196 
197 	mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
198 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
199 	if (err)
200 		return err;
201 
202 	mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
203 	if (control_state != MLXFW_FSM_STATE_IDLE)
204 		return -EBUSY;
205 
206 	mlxsw_reg_mcc_pack(mcc_pl,
207 			   MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
208 			   0, *fwhandle, 0);
209 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
210 }
211 
212 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
213 					 u32 fwhandle, u16 component_index,
214 					 u32 component_size)
215 {
216 	struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
217 		container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
218 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
219 	char mcc_pl[MLXSW_REG_MCC_LEN];
220 
221 	mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
222 			   component_index, fwhandle, component_size);
223 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
224 }
225 
226 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
227 				       u32 fwhandle, u8 *data, u16 size,
228 				       u32 offset)
229 {
230 	struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
231 		container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
232 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
233 	char mcda_pl[MLXSW_REG_MCDA_LEN];
234 
235 	mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
236 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl);
237 }
238 
239 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
240 					 u32 fwhandle, u16 component_index)
241 {
242 	struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
243 		container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
244 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
245 	char mcc_pl[MLXSW_REG_MCC_LEN];
246 
247 	mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
248 			   component_index, fwhandle, 0);
249 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
250 }
251 
252 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
253 {
254 	struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
255 		container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
256 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
257 	char mcc_pl[MLXSW_REG_MCC_LEN];
258 
259 	mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0,
260 			   fwhandle, 0);
261 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
262 }
263 
264 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
265 				    enum mlxfw_fsm_state *fsm_state,
266 				    enum mlxfw_fsm_state_err *fsm_state_err)
267 {
268 	struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
269 		container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
270 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
271 	char mcc_pl[MLXSW_REG_MCC_LEN];
272 	u8 control_state;
273 	u8 error_code;
274 	int err;
275 
276 	mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
277 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
278 	if (err)
279 		return err;
280 
281 	mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
282 	*fsm_state = control_state;
283 	*fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
284 			       MLXFW_FSM_STATE_ERR_MAX);
285 	return 0;
286 }
287 
288 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
289 {
290 	struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
291 		container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
292 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
293 	char mcc_pl[MLXSW_REG_MCC_LEN];
294 
295 	mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0,
296 			   fwhandle, 0);
297 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
298 }
299 
300 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
301 {
302 	struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
303 		container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
304 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
305 	char mcc_pl[MLXSW_REG_MCC_LEN];
306 
307 	mlxsw_reg_mcc_pack(mcc_pl,
308 			   MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
309 			   fwhandle, 0);
310 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
311 }
312 
313 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
314 	.component_query	= mlxsw_sp_component_query,
315 	.fsm_lock		= mlxsw_sp_fsm_lock,
316 	.fsm_component_update	= mlxsw_sp_fsm_component_update,
317 	.fsm_block_download	= mlxsw_sp_fsm_block_download,
318 	.fsm_component_verify	= mlxsw_sp_fsm_component_verify,
319 	.fsm_activate		= mlxsw_sp_fsm_activate,
320 	.fsm_query_state	= mlxsw_sp_fsm_query_state,
321 	.fsm_cancel		= mlxsw_sp_fsm_cancel,
322 	.fsm_release		= mlxsw_sp_fsm_release
323 };
324 
325 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
326 				   const struct firmware *firmware)
327 {
328 	struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = {
329 		.mlxfw_dev = {
330 			.ops = &mlxsw_sp_mlxfw_dev_ops,
331 			.psid = mlxsw_sp->bus_info->psid,
332 			.psid_size = strlen(mlxsw_sp->bus_info->psid),
333 		},
334 		.mlxsw_sp = mlxsw_sp
335 	};
336 
337 	return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
338 }
339 
340 static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a,
341 			       const struct mlxsw_fw_rev *b)
342 {
343 	if (a->major != b->major)
344 		return a->major > b->major;
345 	if (a->minor != b->minor)
346 		return a->minor > b->minor;
347 	return a->subminor >= b->subminor;
348 }
349 
350 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
351 {
352 	const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
353 	const struct firmware *firmware;
354 	int err;
355 
356 	if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev))
357 		return 0;
358 
359 	dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n",
360 		 rev->major, rev->minor, rev->subminor);
361 	dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n",
362 		 MLXSW_SP_FW_FILENAME);
363 
364 	err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME,
365 				      mlxsw_sp->bus_info->dev);
366 	if (err) {
367 		dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
368 			MLXSW_SP_FW_FILENAME);
369 		return err;
370 	}
371 
372 	err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
373 	release_firmware(firmware);
374 	return err;
375 }
376 
377 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
378 			      unsigned int counter_index, u64 *packets,
379 			      u64 *bytes)
380 {
381 	char mgpc_pl[MLXSW_REG_MGPC_LEN];
382 	int err;
383 
384 	mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
385 			    MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
386 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
387 	if (err)
388 		return err;
389 	if (packets)
390 		*packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
391 	if (bytes)
392 		*bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
393 	return 0;
394 }
395 
396 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
397 				       unsigned int counter_index)
398 {
399 	char mgpc_pl[MLXSW_REG_MGPC_LEN];
400 
401 	mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
402 			    MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
403 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
404 }
405 
406 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
407 				unsigned int *p_counter_index)
408 {
409 	int err;
410 
411 	err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
412 				     p_counter_index);
413 	if (err)
414 		return err;
415 	err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
416 	if (err)
417 		goto err_counter_clear;
418 	return 0;
419 
420 err_counter_clear:
421 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
422 			      *p_counter_index);
423 	return err;
424 }
425 
426 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
427 				unsigned int counter_index)
428 {
429 	 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
430 			       counter_index);
431 }
432 
433 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
434 				     const struct mlxsw_tx_info *tx_info)
435 {
436 	char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
437 
438 	memset(txhdr, 0, MLXSW_TXHDR_LEN);
439 
440 	mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
441 	mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
442 	mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
443 	mlxsw_tx_hdr_swid_set(txhdr, 0);
444 	mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
445 	mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
446 	mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
447 }
448 
449 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
450 			      u8 state)
451 {
452 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
453 	enum mlxsw_reg_spms_state spms_state;
454 	char *spms_pl;
455 	int err;
456 
457 	switch (state) {
458 	case BR_STATE_FORWARDING:
459 		spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
460 		break;
461 	case BR_STATE_LEARNING:
462 		spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
463 		break;
464 	case BR_STATE_LISTENING: /* fall-through */
465 	case BR_STATE_DISABLED: /* fall-through */
466 	case BR_STATE_BLOCKING:
467 		spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
468 		break;
469 	default:
470 		BUG();
471 	}
472 
473 	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
474 	if (!spms_pl)
475 		return -ENOMEM;
476 	mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
477 	mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
478 
479 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
480 	kfree(spms_pl);
481 	return err;
482 }
483 
484 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
485 {
486 	char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
487 	int err;
488 
489 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
490 	if (err)
491 		return err;
492 	mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
493 	return 0;
494 }
495 
496 static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
497 {
498 	int i;
499 
500 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
501 		return -EIO;
502 
503 	mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
504 							  MAX_SPAN);
505 	mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
506 					 sizeof(struct mlxsw_sp_span_entry),
507 					 GFP_KERNEL);
508 	if (!mlxsw_sp->span.entries)
509 		return -ENOMEM;
510 
511 	for (i = 0; i < mlxsw_sp->span.entries_count; i++)
512 		INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
513 
514 	return 0;
515 }
516 
517 static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
518 {
519 	int i;
520 
521 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
522 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
523 
524 		WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
525 	}
526 	kfree(mlxsw_sp->span.entries);
527 }
528 
529 static struct mlxsw_sp_span_entry *
530 mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
531 {
532 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
533 	struct mlxsw_sp_span_entry *span_entry;
534 	char mpat_pl[MLXSW_REG_MPAT_LEN];
535 	u8 local_port = port->local_port;
536 	int index;
537 	int i;
538 	int err;
539 
540 	/* find a free entry to use */
541 	index = -1;
542 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
543 		if (!mlxsw_sp->span.entries[i].used) {
544 			index = i;
545 			span_entry = &mlxsw_sp->span.entries[i];
546 			break;
547 		}
548 	}
549 	if (index < 0)
550 		return NULL;
551 
552 	/* create a new port analayzer entry for local_port */
553 	mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
554 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
555 	if (err)
556 		return NULL;
557 
558 	span_entry->used = true;
559 	span_entry->id = index;
560 	span_entry->ref_count = 1;
561 	span_entry->local_port = local_port;
562 	return span_entry;
563 }
564 
565 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
566 					struct mlxsw_sp_span_entry *span_entry)
567 {
568 	u8 local_port = span_entry->local_port;
569 	char mpat_pl[MLXSW_REG_MPAT_LEN];
570 	int pa_id = span_entry->id;
571 
572 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
573 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
574 	span_entry->used = false;
575 }
576 
577 static struct mlxsw_sp_span_entry *
578 mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port)
579 {
580 	int i;
581 
582 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
583 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
584 
585 		if (curr->used && curr->local_port == local_port)
586 			return curr;
587 	}
588 	return NULL;
589 }
590 
591 static struct mlxsw_sp_span_entry
592 *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
593 {
594 	struct mlxsw_sp_span_entry *span_entry;
595 
596 	span_entry = mlxsw_sp_span_entry_find(port->mlxsw_sp,
597 					      port->local_port);
598 	if (span_entry) {
599 		/* Already exists, just take a reference */
600 		span_entry->ref_count++;
601 		return span_entry;
602 	}
603 
604 	return mlxsw_sp_span_entry_create(port);
605 }
606 
607 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
608 				   struct mlxsw_sp_span_entry *span_entry)
609 {
610 	WARN_ON(!span_entry->ref_count);
611 	if (--span_entry->ref_count == 0)
612 		mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
613 	return 0;
614 }
615 
616 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
617 {
618 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
619 	struct mlxsw_sp_span_inspected_port *p;
620 	int i;
621 
622 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
623 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
624 
625 		list_for_each_entry(p, &curr->bound_ports_list, list)
626 			if (p->local_port == port->local_port &&
627 			    p->type == MLXSW_SP_SPAN_EGRESS)
628 				return true;
629 	}
630 
631 	return false;
632 }
633 
634 static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
635 					 int mtu)
636 {
637 	return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
638 }
639 
640 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
641 {
642 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
643 	char sbib_pl[MLXSW_REG_SBIB_LEN];
644 	int err;
645 
646 	/* If port is egress mirrored, the shared buffer size should be
647 	 * updated according to the mtu value
648 	 */
649 	if (mlxsw_sp_span_is_egress_mirror(port)) {
650 		u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
651 
652 		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
653 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
654 		if (err) {
655 			netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
656 			return err;
657 		}
658 	}
659 
660 	return 0;
661 }
662 
663 static struct mlxsw_sp_span_inspected_port *
664 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
665 				    struct mlxsw_sp_span_entry *span_entry)
666 {
667 	struct mlxsw_sp_span_inspected_port *p;
668 
669 	list_for_each_entry(p, &span_entry->bound_ports_list, list)
670 		if (port->local_port == p->local_port)
671 			return p;
672 	return NULL;
673 }
674 
675 static int
676 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
677 				  struct mlxsw_sp_span_entry *span_entry,
678 				  enum mlxsw_sp_span_type type)
679 {
680 	struct mlxsw_sp_span_inspected_port *inspected_port;
681 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
682 	char mpar_pl[MLXSW_REG_MPAR_LEN];
683 	char sbib_pl[MLXSW_REG_SBIB_LEN];
684 	int pa_id = span_entry->id;
685 	int err;
686 
687 	/* if it is an egress SPAN, bind a shared buffer to it */
688 	if (type == MLXSW_SP_SPAN_EGRESS) {
689 		u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
690 							     port->dev->mtu);
691 
692 		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
693 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
694 		if (err) {
695 			netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
696 			return err;
697 		}
698 	}
699 
700 	/* bind the port to the SPAN entry */
701 	mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
702 			    (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
703 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
704 	if (err)
705 		goto err_mpar_reg_write;
706 
707 	inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
708 	if (!inspected_port) {
709 		err = -ENOMEM;
710 		goto err_inspected_port_alloc;
711 	}
712 	inspected_port->local_port = port->local_port;
713 	inspected_port->type = type;
714 	list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
715 
716 	return 0;
717 
718 err_mpar_reg_write:
719 err_inspected_port_alloc:
720 	if (type == MLXSW_SP_SPAN_EGRESS) {
721 		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
722 		mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
723 	}
724 	return err;
725 }
726 
727 static void
728 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
729 				    struct mlxsw_sp_span_entry *span_entry,
730 				    enum mlxsw_sp_span_type type)
731 {
732 	struct mlxsw_sp_span_inspected_port *inspected_port;
733 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
734 	char mpar_pl[MLXSW_REG_MPAR_LEN];
735 	char sbib_pl[MLXSW_REG_SBIB_LEN];
736 	int pa_id = span_entry->id;
737 
738 	inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
739 	if (!inspected_port)
740 		return;
741 
742 	/* remove the inspected port */
743 	mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
744 			    (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
745 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
746 
747 	/* remove the SBIB buffer if it was egress SPAN */
748 	if (type == MLXSW_SP_SPAN_EGRESS) {
749 		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
750 		mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
751 	}
752 
753 	mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
754 
755 	list_del(&inspected_port->list);
756 	kfree(inspected_port);
757 }
758 
759 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
760 				    struct mlxsw_sp_port *to,
761 				    enum mlxsw_sp_span_type type)
762 {
763 	struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
764 	struct mlxsw_sp_span_entry *span_entry;
765 	int err;
766 
767 	span_entry = mlxsw_sp_span_entry_get(to);
768 	if (!span_entry)
769 		return -ENOENT;
770 
771 	netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
772 		   span_entry->id);
773 
774 	err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
775 	if (err)
776 		goto err_port_bind;
777 
778 	return 0;
779 
780 err_port_bind:
781 	mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
782 	return err;
783 }
784 
785 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
786 					u8 destination_port,
787 					enum mlxsw_sp_span_type type)
788 {
789 	struct mlxsw_sp_span_entry *span_entry;
790 
791 	span_entry = mlxsw_sp_span_entry_find(from->mlxsw_sp,
792 					      destination_port);
793 	if (!span_entry) {
794 		netdev_err(from->dev, "no span entry found\n");
795 		return;
796 	}
797 
798 	netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
799 		   span_entry->id);
800 	mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
801 }
802 
803 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
804 				    bool enable, u32 rate)
805 {
806 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
807 	char mpsc_pl[MLXSW_REG_MPSC_LEN];
808 
809 	mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
810 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
811 }
812 
813 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
814 					  bool is_up)
815 {
816 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
817 	char paos_pl[MLXSW_REG_PAOS_LEN];
818 
819 	mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
820 			    is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
821 			    MLXSW_PORT_ADMIN_STATUS_DOWN);
822 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
823 }
824 
825 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
826 				      unsigned char *addr)
827 {
828 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
829 	char ppad_pl[MLXSW_REG_PPAD_LEN];
830 
831 	mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
832 	mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
833 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
834 }
835 
836 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
837 {
838 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
839 	unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
840 
841 	ether_addr_copy(addr, mlxsw_sp->base_mac);
842 	addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
843 	return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
844 }
845 
846 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
847 {
848 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
849 	char pmtu_pl[MLXSW_REG_PMTU_LEN];
850 	int max_mtu;
851 	int err;
852 
853 	mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
854 	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
855 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
856 	if (err)
857 		return err;
858 	max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
859 
860 	if (mtu > max_mtu)
861 		return -EINVAL;
862 
863 	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
864 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
865 }
866 
867 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
868 {
869 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
870 	char pspa_pl[MLXSW_REG_PSPA_LEN];
871 
872 	mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
873 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
874 }
875 
876 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
877 {
878 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
879 	char svpe_pl[MLXSW_REG_SVPE_LEN];
880 
881 	mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
882 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
883 }
884 
885 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
886 				   bool learn_enable)
887 {
888 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
889 	char *spvmlr_pl;
890 	int err;
891 
892 	spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
893 	if (!spvmlr_pl)
894 		return -ENOMEM;
895 	mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
896 			      learn_enable);
897 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
898 	kfree(spvmlr_pl);
899 	return err;
900 }
901 
902 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
903 				    u16 vid)
904 {
905 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
906 	char spvid_pl[MLXSW_REG_SPVID_LEN];
907 
908 	mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
909 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
910 }
911 
912 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
913 					    bool allow)
914 {
915 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
916 	char spaft_pl[MLXSW_REG_SPAFT_LEN];
917 
918 	mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
919 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
920 }
921 
922 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
923 {
924 	int err;
925 
926 	if (!vid) {
927 		err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
928 		if (err)
929 			return err;
930 	} else {
931 		err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
932 		if (err)
933 			return err;
934 		err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
935 		if (err)
936 			goto err_port_allow_untagged_set;
937 	}
938 
939 	mlxsw_sp_port->pvid = vid;
940 	return 0;
941 
942 err_port_allow_untagged_set:
943 	__mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
944 	return err;
945 }
946 
947 static int
948 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
949 {
950 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
951 	char sspr_pl[MLXSW_REG_SSPR_LEN];
952 
953 	mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
954 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
955 }
956 
957 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
958 					 u8 local_port, u8 *p_module,
959 					 u8 *p_width, u8 *p_lane)
960 {
961 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
962 	int err;
963 
964 	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
965 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
966 	if (err)
967 		return err;
968 	*p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
969 	*p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
970 	*p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
971 	return 0;
972 }
973 
974 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port,
975 				    u8 module, u8 width, u8 lane)
976 {
977 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
978 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
979 	int i;
980 
981 	mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
982 	mlxsw_reg_pmlp_width_set(pmlp_pl, width);
983 	for (i = 0; i < width; i++) {
984 		mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
985 		mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
986 	}
987 
988 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
989 }
990 
991 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
992 {
993 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
994 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
995 
996 	mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
997 	mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
998 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
999 }
1000 
1001 static int mlxsw_sp_port_open(struct net_device *dev)
1002 {
1003 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1004 	int err;
1005 
1006 	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1007 	if (err)
1008 		return err;
1009 	netif_start_queue(dev);
1010 	return 0;
1011 }
1012 
1013 static int mlxsw_sp_port_stop(struct net_device *dev)
1014 {
1015 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1016 
1017 	netif_stop_queue(dev);
1018 	return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1019 }
1020 
1021 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
1022 				      struct net_device *dev)
1023 {
1024 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1025 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1026 	struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
1027 	const struct mlxsw_tx_info tx_info = {
1028 		.local_port = mlxsw_sp_port->local_port,
1029 		.is_emad = false,
1030 	};
1031 	u64 len;
1032 	int err;
1033 
1034 	if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
1035 		return NETDEV_TX_BUSY;
1036 
1037 	if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
1038 		struct sk_buff *skb_orig = skb;
1039 
1040 		skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
1041 		if (!skb) {
1042 			this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1043 			dev_kfree_skb_any(skb_orig);
1044 			return NETDEV_TX_OK;
1045 		}
1046 		dev_consume_skb_any(skb_orig);
1047 	}
1048 
1049 	if (eth_skb_pad(skb)) {
1050 		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1051 		return NETDEV_TX_OK;
1052 	}
1053 
1054 	mlxsw_sp_txhdr_construct(skb, &tx_info);
1055 	/* TX header is consumed by HW on the way so we shouldn't count its
1056 	 * bytes as being sent.
1057 	 */
1058 	len = skb->len - MLXSW_TXHDR_LEN;
1059 
1060 	/* Due to a race we might fail here because of a full queue. In that
1061 	 * unlikely case we simply drop the packet.
1062 	 */
1063 	err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
1064 
1065 	if (!err) {
1066 		pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
1067 		u64_stats_update_begin(&pcpu_stats->syncp);
1068 		pcpu_stats->tx_packets++;
1069 		pcpu_stats->tx_bytes += len;
1070 		u64_stats_update_end(&pcpu_stats->syncp);
1071 	} else {
1072 		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1073 		dev_kfree_skb_any(skb);
1074 	}
1075 	return NETDEV_TX_OK;
1076 }
1077 
1078 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
1079 {
1080 }
1081 
1082 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
1083 {
1084 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1085 	struct sockaddr *addr = p;
1086 	int err;
1087 
1088 	if (!is_valid_ether_addr(addr->sa_data))
1089 		return -EADDRNOTAVAIL;
1090 
1091 	err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
1092 	if (err)
1093 		return err;
1094 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1095 	return 0;
1096 }
1097 
1098 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
1099 					 int mtu)
1100 {
1101 	return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
1102 }
1103 
1104 #define MLXSW_SP_CELL_FACTOR 2	/* 2 * cell_size / (IPG + cell_size + 1) */
1105 
1106 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
1107 				  u16 delay)
1108 {
1109 	delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
1110 							    BITS_PER_BYTE));
1111 	return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
1112 								   mtu);
1113 }
1114 
1115 /* Maximum delay buffer needed in case of PAUSE frames, in bytes.
1116  * Assumes 100m cable and maximum MTU.
1117  */
1118 #define MLXSW_SP_PAUSE_DELAY 58752
1119 
1120 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
1121 				     u16 delay, bool pfc, bool pause)
1122 {
1123 	if (pfc)
1124 		return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
1125 	else if (pause)
1126 		return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
1127 	else
1128 		return 0;
1129 }
1130 
1131 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
1132 				 bool lossy)
1133 {
1134 	if (lossy)
1135 		mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
1136 	else
1137 		mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
1138 						    thres);
1139 }
1140 
1141 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
1142 				 u8 *prio_tc, bool pause_en,
1143 				 struct ieee_pfc *my_pfc)
1144 {
1145 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1146 	u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
1147 	u16 delay = !!my_pfc ? my_pfc->delay : 0;
1148 	char pbmc_pl[MLXSW_REG_PBMC_LEN];
1149 	int i, j, err;
1150 
1151 	mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
1152 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
1153 	if (err)
1154 		return err;
1155 
1156 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1157 		bool configure = false;
1158 		bool pfc = false;
1159 		bool lossy;
1160 		u16 thres;
1161 
1162 		for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
1163 			if (prio_tc[j] == i) {
1164 				pfc = pfc_en & BIT(j);
1165 				configure = true;
1166 				break;
1167 			}
1168 		}
1169 
1170 		if (!configure)
1171 			continue;
1172 
1173 		lossy = !(pfc || pause_en);
1174 		thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
1175 		delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
1176 						  pause_en);
1177 		mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
1178 	}
1179 
1180 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
1181 }
1182 
1183 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
1184 				      int mtu, bool pause_en)
1185 {
1186 	u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
1187 	bool dcb_en = !!mlxsw_sp_port->dcb.ets;
1188 	struct ieee_pfc *my_pfc;
1189 	u8 *prio_tc;
1190 
1191 	prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
1192 	my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
1193 
1194 	return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
1195 					    pause_en, my_pfc);
1196 }
1197 
1198 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
1199 {
1200 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1201 	bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1202 	int err;
1203 
1204 	err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
1205 	if (err)
1206 		return err;
1207 	err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
1208 	if (err)
1209 		goto err_span_port_mtu_update;
1210 	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
1211 	if (err)
1212 		goto err_port_mtu_set;
1213 	dev->mtu = mtu;
1214 	return 0;
1215 
1216 err_port_mtu_set:
1217 	mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
1218 err_span_port_mtu_update:
1219 	mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1220 	return err;
1221 }
1222 
1223 static int
1224 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
1225 			     struct rtnl_link_stats64 *stats)
1226 {
1227 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1228 	struct mlxsw_sp_port_pcpu_stats *p;
1229 	u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1230 	u32 tx_dropped = 0;
1231 	unsigned int start;
1232 	int i;
1233 
1234 	for_each_possible_cpu(i) {
1235 		p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
1236 		do {
1237 			start = u64_stats_fetch_begin_irq(&p->syncp);
1238 			rx_packets	= p->rx_packets;
1239 			rx_bytes	= p->rx_bytes;
1240 			tx_packets	= p->tx_packets;
1241 			tx_bytes	= p->tx_bytes;
1242 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
1243 
1244 		stats->rx_packets	+= rx_packets;
1245 		stats->rx_bytes		+= rx_bytes;
1246 		stats->tx_packets	+= tx_packets;
1247 		stats->tx_bytes		+= tx_bytes;
1248 		/* tx_dropped is u32, updated without syncp protection. */
1249 		tx_dropped	+= p->tx_dropped;
1250 	}
1251 	stats->tx_dropped	= tx_dropped;
1252 	return 0;
1253 }
1254 
1255 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
1256 {
1257 	switch (attr_id) {
1258 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1259 		return true;
1260 	}
1261 
1262 	return false;
1263 }
1264 
1265 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
1266 					   void *sp)
1267 {
1268 	switch (attr_id) {
1269 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1270 		return mlxsw_sp_port_get_sw_stats64(dev, sp);
1271 	}
1272 
1273 	return -EINVAL;
1274 }
1275 
1276 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
1277 				       int prio, char *ppcnt_pl)
1278 {
1279 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1280 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1281 
1282 	mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
1283 	return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1284 }
1285 
1286 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
1287 				      struct rtnl_link_stats64 *stats)
1288 {
1289 	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1290 	int err;
1291 
1292 	err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
1293 					  0, ppcnt_pl);
1294 	if (err)
1295 		goto out;
1296 
1297 	stats->tx_packets =
1298 		mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
1299 	stats->rx_packets =
1300 		mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
1301 	stats->tx_bytes =
1302 		mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
1303 	stats->rx_bytes =
1304 		mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
1305 	stats->multicast =
1306 		mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
1307 
1308 	stats->rx_crc_errors =
1309 		mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
1310 	stats->rx_frame_errors =
1311 		mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
1312 
1313 	stats->rx_length_errors = (
1314 		mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
1315 		mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
1316 		mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
1317 
1318 	stats->rx_errors = (stats->rx_crc_errors +
1319 		stats->rx_frame_errors + stats->rx_length_errors);
1320 
1321 out:
1322 	return err;
1323 }
1324 
1325 static void update_stats_cache(struct work_struct *work)
1326 {
1327 	struct mlxsw_sp_port *mlxsw_sp_port =
1328 		container_of(work, struct mlxsw_sp_port,
1329 			     hw_stats.update_dw.work);
1330 
1331 	if (!netif_carrier_ok(mlxsw_sp_port->dev))
1332 		goto out;
1333 
1334 	mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
1335 				   mlxsw_sp_port->hw_stats.cache);
1336 
1337 out:
1338 	mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
1339 			       MLXSW_HW_STATS_UPDATE_TIME);
1340 }
1341 
1342 /* Return the stats from a cache that is updated periodically,
1343  * as this function might get called in an atomic context.
1344  */
1345 static void
1346 mlxsw_sp_port_get_stats64(struct net_device *dev,
1347 			  struct rtnl_link_stats64 *stats)
1348 {
1349 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1350 
1351 	memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
1352 }
1353 
1354 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1355 				    u16 vid_begin, u16 vid_end,
1356 				    bool is_member, bool untagged)
1357 {
1358 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1359 	char *spvm_pl;
1360 	int err;
1361 
1362 	spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1363 	if (!spvm_pl)
1364 		return -ENOMEM;
1365 
1366 	mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port,	vid_begin,
1367 			    vid_end, is_member, untagged);
1368 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1369 	kfree(spvm_pl);
1370 	return err;
1371 }
1372 
1373 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1374 			   u16 vid_end, bool is_member, bool untagged)
1375 {
1376 	u16 vid, vid_e;
1377 	int err;
1378 
1379 	for (vid = vid_begin; vid <= vid_end;
1380 	     vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1381 		vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1382 			    vid_end);
1383 
1384 		err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1385 					       is_member, untagged);
1386 		if (err)
1387 			return err;
1388 	}
1389 
1390 	return 0;
1391 }
1392 
1393 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port)
1394 {
1395 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
1396 
1397 	list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1398 				 &mlxsw_sp_port->vlans_list, list)
1399 		mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1400 }
1401 
1402 static struct mlxsw_sp_port_vlan *
1403 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1404 {
1405 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1406 	bool untagged = vid == 1;
1407 	int err;
1408 
1409 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1410 	if (err)
1411 		return ERR_PTR(err);
1412 
1413 	mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
1414 	if (!mlxsw_sp_port_vlan) {
1415 		err = -ENOMEM;
1416 		goto err_port_vlan_alloc;
1417 	}
1418 
1419 	mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1420 	mlxsw_sp_port_vlan->vid = vid;
1421 	list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1422 
1423 	return mlxsw_sp_port_vlan;
1424 
1425 err_port_vlan_alloc:
1426 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1427 	return ERR_PTR(err);
1428 }
1429 
1430 static void
1431 mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1432 {
1433 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1434 	u16 vid = mlxsw_sp_port_vlan->vid;
1435 
1436 	list_del(&mlxsw_sp_port_vlan->list);
1437 	kfree(mlxsw_sp_port_vlan);
1438 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1439 }
1440 
1441 struct mlxsw_sp_port_vlan *
1442 mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1443 {
1444 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1445 
1446 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1447 	if (mlxsw_sp_port_vlan)
1448 		return mlxsw_sp_port_vlan;
1449 
1450 	return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
1451 }
1452 
1453 void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1454 {
1455 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1456 
1457 	if (mlxsw_sp_port_vlan->bridge_port)
1458 		mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1459 	else if (fid)
1460 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1461 
1462 	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1463 }
1464 
1465 static int mlxsw_sp_port_add_vid(struct net_device *dev,
1466 				 __be16 __always_unused proto, u16 vid)
1467 {
1468 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1469 
1470 	/* VLAN 0 is added to HW filter when device goes up, but it is
1471 	 * reserved in our case, so simply return.
1472 	 */
1473 	if (!vid)
1474 		return 0;
1475 
1476 	return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid));
1477 }
1478 
1479 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1480 				  __be16 __always_unused proto, u16 vid)
1481 {
1482 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1483 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1484 
1485 	/* VLAN 0 is removed from HW filter when device goes down, but
1486 	 * it is reserved in our case, so simply return.
1487 	 */
1488 	if (!vid)
1489 		return 0;
1490 
1491 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1492 	if (!mlxsw_sp_port_vlan)
1493 		return 0;
1494 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1495 
1496 	return 0;
1497 }
1498 
1499 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
1500 					    size_t len)
1501 {
1502 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1503 	u8 module = mlxsw_sp_port->mapping.module;
1504 	u8 width = mlxsw_sp_port->mapping.width;
1505 	u8 lane = mlxsw_sp_port->mapping.lane;
1506 	int err;
1507 
1508 	if (!mlxsw_sp_port->split)
1509 		err = snprintf(name, len, "p%d", module + 1);
1510 	else
1511 		err = snprintf(name, len, "p%ds%d", module + 1,
1512 			       lane / width);
1513 
1514 	if (err >= len)
1515 		return -EINVAL;
1516 
1517 	return 0;
1518 }
1519 
1520 static struct mlxsw_sp_port_mall_tc_entry *
1521 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
1522 				 unsigned long cookie) {
1523 	struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1524 
1525 	list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
1526 		if (mall_tc_entry->cookie == cookie)
1527 			return mall_tc_entry;
1528 
1529 	return NULL;
1530 }
1531 
1532 static int
1533 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1534 				      struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
1535 				      const struct tc_action *a,
1536 				      bool ingress)
1537 {
1538 	struct net *net = dev_net(mlxsw_sp_port->dev);
1539 	enum mlxsw_sp_span_type span_type;
1540 	struct mlxsw_sp_port *to_port;
1541 	struct net_device *to_dev;
1542 	int ifindex;
1543 
1544 	ifindex = tcf_mirred_ifindex(a);
1545 	to_dev = __dev_get_by_index(net, ifindex);
1546 	if (!to_dev) {
1547 		netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
1548 		return -EINVAL;
1549 	}
1550 
1551 	if (!mlxsw_sp_port_dev_check(to_dev)) {
1552 		netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
1553 		return -EOPNOTSUPP;
1554 	}
1555 	to_port = netdev_priv(to_dev);
1556 
1557 	mirror->to_local_port = to_port->local_port;
1558 	mirror->ingress = ingress;
1559 	span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1560 	return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
1561 }
1562 
1563 static void
1564 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1565 				      struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
1566 {
1567 	enum mlxsw_sp_span_type span_type;
1568 
1569 	span_type = mirror->ingress ?
1570 			MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1571 	mlxsw_sp_span_mirror_remove(mlxsw_sp_port, mirror->to_local_port,
1572 				    span_type);
1573 }
1574 
1575 static int
1576 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
1577 				      struct tc_cls_matchall_offload *cls,
1578 				      const struct tc_action *a,
1579 				      bool ingress)
1580 {
1581 	int err;
1582 
1583 	if (!mlxsw_sp_port->sample)
1584 		return -EOPNOTSUPP;
1585 	if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
1586 		netdev_err(mlxsw_sp_port->dev, "sample already active\n");
1587 		return -EEXIST;
1588 	}
1589 	if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
1590 		netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
1591 		return -EOPNOTSUPP;
1592 	}
1593 
1594 	rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
1595 			   tcf_sample_psample_group(a));
1596 	mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
1597 	mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
1598 	mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
1599 
1600 	err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
1601 	if (err)
1602 		goto err_port_sample_set;
1603 	return 0;
1604 
1605 err_port_sample_set:
1606 	RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1607 	return err;
1608 }
1609 
1610 static void
1611 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
1612 {
1613 	if (!mlxsw_sp_port->sample)
1614 		return;
1615 
1616 	mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
1617 	RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1618 }
1619 
1620 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1621 					  struct tc_cls_matchall_offload *f,
1622 					  bool ingress)
1623 {
1624 	struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1625 	__be16 protocol = f->common.protocol;
1626 	const struct tc_action *a;
1627 	LIST_HEAD(actions);
1628 	int err;
1629 
1630 	if (!tcf_exts_has_one_action(f->exts)) {
1631 		netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
1632 		return -EOPNOTSUPP;
1633 	}
1634 
1635 	mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1636 	if (!mall_tc_entry)
1637 		return -ENOMEM;
1638 	mall_tc_entry->cookie = f->cookie;
1639 
1640 	tcf_exts_to_list(f->exts, &actions);
1641 	a = list_first_entry(&actions, struct tc_action, list);
1642 
1643 	if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
1644 		struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
1645 
1646 		mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
1647 		mirror = &mall_tc_entry->mirror;
1648 		err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
1649 							    mirror, a, ingress);
1650 	} else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
1651 		mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
1652 		err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f,
1653 							    a, ingress);
1654 	} else {
1655 		err = -EOPNOTSUPP;
1656 	}
1657 
1658 	if (err)
1659 		goto err_add_action;
1660 
1661 	list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
1662 	return 0;
1663 
1664 err_add_action:
1665 	kfree(mall_tc_entry);
1666 	return err;
1667 }
1668 
1669 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1670 					   struct tc_cls_matchall_offload *f)
1671 {
1672 	struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1673 
1674 	mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
1675 							 f->cookie);
1676 	if (!mall_tc_entry) {
1677 		netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
1678 		return;
1679 	}
1680 	list_del(&mall_tc_entry->list);
1681 
1682 	switch (mall_tc_entry->type) {
1683 	case MLXSW_SP_PORT_MALL_MIRROR:
1684 		mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
1685 						      &mall_tc_entry->mirror);
1686 		break;
1687 	case MLXSW_SP_PORT_MALL_SAMPLE:
1688 		mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
1689 		break;
1690 	default:
1691 		WARN_ON(1);
1692 	}
1693 
1694 	kfree(mall_tc_entry);
1695 }
1696 
1697 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1698 					  struct tc_cls_matchall_offload *f)
1699 {
1700 	bool ingress;
1701 
1702 	if (is_classid_clsact_ingress(f->common.classid))
1703 		ingress = true;
1704 	else if (is_classid_clsact_egress(f->common.classid))
1705 		ingress = false;
1706 	else
1707 		return -EOPNOTSUPP;
1708 
1709 	if (f->common.chain_index)
1710 		return -EOPNOTSUPP;
1711 
1712 	switch (f->command) {
1713 	case TC_CLSMATCHALL_REPLACE:
1714 		return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f,
1715 						      ingress);
1716 	case TC_CLSMATCHALL_DESTROY:
1717 		mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f);
1718 		return 0;
1719 	default:
1720 		return -EOPNOTSUPP;
1721 	}
1722 }
1723 
1724 static int
1725 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port,
1726 			     struct tc_cls_flower_offload *f)
1727 {
1728 	bool ingress;
1729 
1730 	if (is_classid_clsact_ingress(f->common.classid))
1731 		ingress = true;
1732 	else if (is_classid_clsact_egress(f->common.classid))
1733 		ingress = false;
1734 	else
1735 		return -EOPNOTSUPP;
1736 
1737 	switch (f->command) {
1738 	case TC_CLSFLOWER_REPLACE:
1739 		return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f);
1740 	case TC_CLSFLOWER_DESTROY:
1741 		mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, f);
1742 		return 0;
1743 	case TC_CLSFLOWER_STATS:
1744 		return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, f);
1745 	default:
1746 		return -EOPNOTSUPP;
1747 	}
1748 }
1749 
1750 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1751 			     void *type_data)
1752 {
1753 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1754 
1755 	switch (type) {
1756 	case TC_SETUP_CLSMATCHALL:
1757 		return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data);
1758 	case TC_SETUP_CLSFLOWER:
1759 		return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data);
1760 	default:
1761 		return -EOPNOTSUPP;
1762 	}
1763 }
1764 
1765 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1766 	.ndo_open		= mlxsw_sp_port_open,
1767 	.ndo_stop		= mlxsw_sp_port_stop,
1768 	.ndo_start_xmit		= mlxsw_sp_port_xmit,
1769 	.ndo_setup_tc           = mlxsw_sp_setup_tc,
1770 	.ndo_set_rx_mode	= mlxsw_sp_set_rx_mode,
1771 	.ndo_set_mac_address	= mlxsw_sp_port_set_mac_address,
1772 	.ndo_change_mtu		= mlxsw_sp_port_change_mtu,
1773 	.ndo_get_stats64	= mlxsw_sp_port_get_stats64,
1774 	.ndo_has_offload_stats	= mlxsw_sp_port_has_offload_stats,
1775 	.ndo_get_offload_stats	= mlxsw_sp_port_get_offload_stats,
1776 	.ndo_vlan_rx_add_vid	= mlxsw_sp_port_add_vid,
1777 	.ndo_vlan_rx_kill_vid	= mlxsw_sp_port_kill_vid,
1778 	.ndo_get_phys_port_name	= mlxsw_sp_port_get_phys_port_name,
1779 };
1780 
1781 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
1782 				      struct ethtool_drvinfo *drvinfo)
1783 {
1784 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1785 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1786 
1787 	strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
1788 	strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1789 		sizeof(drvinfo->version));
1790 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1791 		 "%d.%d.%d",
1792 		 mlxsw_sp->bus_info->fw_rev.major,
1793 		 mlxsw_sp->bus_info->fw_rev.minor,
1794 		 mlxsw_sp->bus_info->fw_rev.subminor);
1795 	strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1796 		sizeof(drvinfo->bus_info));
1797 }
1798 
1799 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1800 					 struct ethtool_pauseparam *pause)
1801 {
1802 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1803 
1804 	pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1805 	pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1806 }
1807 
1808 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1809 				   struct ethtool_pauseparam *pause)
1810 {
1811 	char pfcc_pl[MLXSW_REG_PFCC_LEN];
1812 
1813 	mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1814 	mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1815 	mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1816 
1817 	return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1818 			       pfcc_pl);
1819 }
1820 
1821 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1822 					struct ethtool_pauseparam *pause)
1823 {
1824 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1825 	bool pause_en = pause->tx_pause || pause->rx_pause;
1826 	int err;
1827 
1828 	if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1829 		netdev_err(dev, "PFC already enabled on port\n");
1830 		return -EINVAL;
1831 	}
1832 
1833 	if (pause->autoneg) {
1834 		netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1835 		return -EINVAL;
1836 	}
1837 
1838 	err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1839 	if (err) {
1840 		netdev_err(dev, "Failed to configure port's headroom\n");
1841 		return err;
1842 	}
1843 
1844 	err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1845 	if (err) {
1846 		netdev_err(dev, "Failed to set PAUSE parameters\n");
1847 		goto err_port_pause_configure;
1848 	}
1849 
1850 	mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1851 	mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1852 
1853 	return 0;
1854 
1855 err_port_pause_configure:
1856 	pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1857 	mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1858 	return err;
1859 }
1860 
1861 struct mlxsw_sp_port_hw_stats {
1862 	char str[ETH_GSTRING_LEN];
1863 	u64 (*getter)(const char *payload);
1864 	bool cells_bytes;
1865 };
1866 
1867 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1868 	{
1869 		.str = "a_frames_transmitted_ok",
1870 		.getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1871 	},
1872 	{
1873 		.str = "a_frames_received_ok",
1874 		.getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1875 	},
1876 	{
1877 		.str = "a_frame_check_sequence_errors",
1878 		.getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1879 	},
1880 	{
1881 		.str = "a_alignment_errors",
1882 		.getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1883 	},
1884 	{
1885 		.str = "a_octets_transmitted_ok",
1886 		.getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1887 	},
1888 	{
1889 		.str = "a_octets_received_ok",
1890 		.getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1891 	},
1892 	{
1893 		.str = "a_multicast_frames_xmitted_ok",
1894 		.getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1895 	},
1896 	{
1897 		.str = "a_broadcast_frames_xmitted_ok",
1898 		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1899 	},
1900 	{
1901 		.str = "a_multicast_frames_received_ok",
1902 		.getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1903 	},
1904 	{
1905 		.str = "a_broadcast_frames_received_ok",
1906 		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1907 	},
1908 	{
1909 		.str = "a_in_range_length_errors",
1910 		.getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1911 	},
1912 	{
1913 		.str = "a_out_of_range_length_field",
1914 		.getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1915 	},
1916 	{
1917 		.str = "a_frame_too_long_errors",
1918 		.getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1919 	},
1920 	{
1921 		.str = "a_symbol_error_during_carrier",
1922 		.getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1923 	},
1924 	{
1925 		.str = "a_mac_control_frames_transmitted",
1926 		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1927 	},
1928 	{
1929 		.str = "a_mac_control_frames_received",
1930 		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1931 	},
1932 	{
1933 		.str = "a_unsupported_opcodes_received",
1934 		.getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1935 	},
1936 	{
1937 		.str = "a_pause_mac_ctrl_frames_received",
1938 		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1939 	},
1940 	{
1941 		.str = "a_pause_mac_ctrl_frames_xmitted",
1942 		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1943 	},
1944 };
1945 
1946 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1947 
1948 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
1949 	{
1950 		.str = "rx_octets_prio",
1951 		.getter = mlxsw_reg_ppcnt_rx_octets_get,
1952 	},
1953 	{
1954 		.str = "rx_frames_prio",
1955 		.getter = mlxsw_reg_ppcnt_rx_frames_get,
1956 	},
1957 	{
1958 		.str = "tx_octets_prio",
1959 		.getter = mlxsw_reg_ppcnt_tx_octets_get,
1960 	},
1961 	{
1962 		.str = "tx_frames_prio",
1963 		.getter = mlxsw_reg_ppcnt_tx_frames_get,
1964 	},
1965 	{
1966 		.str = "rx_pause_prio",
1967 		.getter = mlxsw_reg_ppcnt_rx_pause_get,
1968 	},
1969 	{
1970 		.str = "rx_pause_duration_prio",
1971 		.getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
1972 	},
1973 	{
1974 		.str = "tx_pause_prio",
1975 		.getter = mlxsw_reg_ppcnt_tx_pause_get,
1976 	},
1977 	{
1978 		.str = "tx_pause_duration_prio",
1979 		.getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
1980 	},
1981 };
1982 
1983 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1984 
1985 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
1986 	{
1987 		.str = "tc_transmit_queue_tc",
1988 		.getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
1989 		.cells_bytes = true,
1990 	},
1991 	{
1992 		.str = "tc_no_buffer_discard_uc_tc",
1993 		.getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
1994 	},
1995 };
1996 
1997 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
1998 
1999 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
2000 					 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
2001 					  MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
2002 					 IEEE_8021QAZ_MAX_TCS)
2003 
2004 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
2005 {
2006 	int i;
2007 
2008 	for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
2009 		snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
2010 			 mlxsw_sp_port_hw_prio_stats[i].str, prio);
2011 		*p += ETH_GSTRING_LEN;
2012 	}
2013 }
2014 
2015 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
2016 {
2017 	int i;
2018 
2019 	for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
2020 		snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
2021 			 mlxsw_sp_port_hw_tc_stats[i].str, tc);
2022 		*p += ETH_GSTRING_LEN;
2023 	}
2024 }
2025 
2026 static void mlxsw_sp_port_get_strings(struct net_device *dev,
2027 				      u32 stringset, u8 *data)
2028 {
2029 	u8 *p = data;
2030 	int i;
2031 
2032 	switch (stringset) {
2033 	case ETH_SS_STATS:
2034 		for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
2035 			memcpy(p, mlxsw_sp_port_hw_stats[i].str,
2036 			       ETH_GSTRING_LEN);
2037 			p += ETH_GSTRING_LEN;
2038 		}
2039 
2040 		for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
2041 			mlxsw_sp_port_get_prio_strings(&p, i);
2042 
2043 		for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
2044 			mlxsw_sp_port_get_tc_strings(&p, i);
2045 
2046 		break;
2047 	}
2048 }
2049 
2050 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
2051 				     enum ethtool_phys_id_state state)
2052 {
2053 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2054 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2055 	char mlcr_pl[MLXSW_REG_MLCR_LEN];
2056 	bool active;
2057 
2058 	switch (state) {
2059 	case ETHTOOL_ID_ACTIVE:
2060 		active = true;
2061 		break;
2062 	case ETHTOOL_ID_INACTIVE:
2063 		active = false;
2064 		break;
2065 	default:
2066 		return -EOPNOTSUPP;
2067 	}
2068 
2069 	mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
2070 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
2071 }
2072 
2073 static int
2074 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
2075 			       int *p_len, enum mlxsw_reg_ppcnt_grp grp)
2076 {
2077 	switch (grp) {
2078 	case  MLXSW_REG_PPCNT_IEEE_8023_CNT:
2079 		*p_hw_stats = mlxsw_sp_port_hw_stats;
2080 		*p_len = MLXSW_SP_PORT_HW_STATS_LEN;
2081 		break;
2082 	case MLXSW_REG_PPCNT_PRIO_CNT:
2083 		*p_hw_stats = mlxsw_sp_port_hw_prio_stats;
2084 		*p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2085 		break;
2086 	case MLXSW_REG_PPCNT_TC_CNT:
2087 		*p_hw_stats = mlxsw_sp_port_hw_tc_stats;
2088 		*p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
2089 		break;
2090 	default:
2091 		WARN_ON(1);
2092 		return -EOPNOTSUPP;
2093 	}
2094 	return 0;
2095 }
2096 
2097 static void __mlxsw_sp_port_get_stats(struct net_device *dev,
2098 				      enum mlxsw_reg_ppcnt_grp grp, int prio,
2099 				      u64 *data, int data_index)
2100 {
2101 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2102 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2103 	struct mlxsw_sp_port_hw_stats *hw_stats;
2104 	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
2105 	int i, len;
2106 	int err;
2107 
2108 	err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
2109 	if (err)
2110 		return;
2111 	mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
2112 	for (i = 0; i < len; i++) {
2113 		data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
2114 		if (!hw_stats[i].cells_bytes)
2115 			continue;
2116 		data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
2117 							    data[data_index + i]);
2118 	}
2119 }
2120 
2121 static void mlxsw_sp_port_get_stats(struct net_device *dev,
2122 				    struct ethtool_stats *stats, u64 *data)
2123 {
2124 	int i, data_index = 0;
2125 
2126 	/* IEEE 802.3 Counters */
2127 	__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
2128 				  data, data_index);
2129 	data_index = MLXSW_SP_PORT_HW_STATS_LEN;
2130 
2131 	/* Per-Priority Counters */
2132 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2133 		__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
2134 					  data, data_index);
2135 		data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2136 	}
2137 
2138 	/* Per-TC Counters */
2139 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2140 		__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
2141 					  data, data_index);
2142 		data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
2143 	}
2144 }
2145 
2146 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
2147 {
2148 	switch (sset) {
2149 	case ETH_SS_STATS:
2150 		return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
2151 	default:
2152 		return -EOPNOTSUPP;
2153 	}
2154 }
2155 
2156 struct mlxsw_sp_port_link_mode {
2157 	enum ethtool_link_mode_bit_indices mask_ethtool;
2158 	u32 mask;
2159 	u32 speed;
2160 };
2161 
2162 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
2163 	{
2164 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
2165 		.mask_ethtool	= ETHTOOL_LINK_MODE_100baseT_Full_BIT,
2166 		.speed		= SPEED_100,
2167 	},
2168 	{
2169 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_SGMII |
2170 				  MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
2171 		.mask_ethtool	= ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
2172 		.speed		= SPEED_1000,
2173 	},
2174 	{
2175 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
2176 		.mask_ethtool	= ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
2177 		.speed		= SPEED_10000,
2178 	},
2179 	{
2180 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
2181 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
2182 		.mask_ethtool	= ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
2183 		.speed		= SPEED_10000,
2184 	},
2185 	{
2186 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2187 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2188 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2189 				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
2190 		.mask_ethtool	= ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
2191 		.speed		= SPEED_10000,
2192 	},
2193 	{
2194 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
2195 		.mask_ethtool	= ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
2196 		.speed		= SPEED_20000,
2197 	},
2198 	{
2199 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
2200 		.mask_ethtool	= ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
2201 		.speed		= SPEED_40000,
2202 	},
2203 	{
2204 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
2205 		.mask_ethtool	= ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
2206 		.speed		= SPEED_40000,
2207 	},
2208 	{
2209 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
2210 		.mask_ethtool	= ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
2211 		.speed		= SPEED_40000,
2212 	},
2213 	{
2214 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
2215 		.mask_ethtool	= ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
2216 		.speed		= SPEED_40000,
2217 	},
2218 	{
2219 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
2220 		.mask_ethtool	= ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
2221 		.speed		= SPEED_25000,
2222 	},
2223 	{
2224 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
2225 		.mask_ethtool	= ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
2226 		.speed		= SPEED_25000,
2227 	},
2228 	{
2229 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2230 		.mask_ethtool	= ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2231 		.speed		= SPEED_25000,
2232 	},
2233 	{
2234 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2235 		.mask_ethtool	= ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2236 		.speed		= SPEED_25000,
2237 	},
2238 	{
2239 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
2240 		.mask_ethtool	= ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
2241 		.speed		= SPEED_50000,
2242 	},
2243 	{
2244 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
2245 		.mask_ethtool	= ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
2246 		.speed		= SPEED_50000,
2247 	},
2248 	{
2249 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
2250 		.mask_ethtool	= ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
2251 		.speed		= SPEED_50000,
2252 	},
2253 	{
2254 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2255 		.mask_ethtool	= ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
2256 		.speed		= SPEED_56000,
2257 	},
2258 	{
2259 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2260 		.mask_ethtool	= ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
2261 		.speed		= SPEED_56000,
2262 	},
2263 	{
2264 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2265 		.mask_ethtool	= ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
2266 		.speed		= SPEED_56000,
2267 	},
2268 	{
2269 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2270 		.mask_ethtool	= ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
2271 		.speed		= SPEED_56000,
2272 	},
2273 	{
2274 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
2275 		.mask_ethtool	= ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
2276 		.speed		= SPEED_100000,
2277 	},
2278 	{
2279 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
2280 		.mask_ethtool	= ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
2281 		.speed		= SPEED_100000,
2282 	},
2283 	{
2284 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
2285 		.mask_ethtool	= ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
2286 		.speed		= SPEED_100000,
2287 	},
2288 	{
2289 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
2290 		.mask_ethtool	= ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
2291 		.speed		= SPEED_100000,
2292 	},
2293 };
2294 
2295 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
2296 
2297 static void
2298 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
2299 				  struct ethtool_link_ksettings *cmd)
2300 {
2301 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2302 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2303 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2304 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2305 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2306 			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2307 		ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
2308 
2309 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2310 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2311 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2312 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
2313 			      MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
2314 		ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
2315 }
2316 
2317 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
2318 {
2319 	int i;
2320 
2321 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2322 		if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
2323 			__set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2324 				  mode);
2325 	}
2326 }
2327 
2328 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
2329 					    struct ethtool_link_ksettings *cmd)
2330 {
2331 	u32 speed = SPEED_UNKNOWN;
2332 	u8 duplex = DUPLEX_UNKNOWN;
2333 	int i;
2334 
2335 	if (!carrier_ok)
2336 		goto out;
2337 
2338 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2339 		if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
2340 			speed = mlxsw_sp_port_link_mode[i].speed;
2341 			duplex = DUPLEX_FULL;
2342 			break;
2343 		}
2344 	}
2345 out:
2346 	cmd->base.speed = speed;
2347 	cmd->base.duplex = duplex;
2348 }
2349 
2350 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
2351 {
2352 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2353 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2354 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2355 			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2356 		return PORT_FIBRE;
2357 
2358 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2359 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2360 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
2361 		return PORT_DA;
2362 
2363 	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2364 			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2365 			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2366 			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
2367 		return PORT_NONE;
2368 
2369 	return PORT_OTHER;
2370 }
2371 
2372 static u32
2373 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
2374 {
2375 	u32 ptys_proto = 0;
2376 	int i;
2377 
2378 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2379 		if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2380 			     cmd->link_modes.advertising))
2381 			ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2382 	}
2383 	return ptys_proto;
2384 }
2385 
2386 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
2387 {
2388 	u32 ptys_proto = 0;
2389 	int i;
2390 
2391 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2392 		if (speed == mlxsw_sp_port_link_mode[i].speed)
2393 			ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2394 	}
2395 	return ptys_proto;
2396 }
2397 
2398 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
2399 {
2400 	u32 ptys_proto = 0;
2401 	int i;
2402 
2403 	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2404 		if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
2405 			ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2406 	}
2407 	return ptys_proto;
2408 }
2409 
2410 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
2411 					     struct ethtool_link_ksettings *cmd)
2412 {
2413 	ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
2414 	ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
2415 	ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
2416 
2417 	mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
2418 	mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
2419 }
2420 
2421 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
2422 					     struct ethtool_link_ksettings *cmd)
2423 {
2424 	if (!autoneg)
2425 		return;
2426 
2427 	ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
2428 	mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
2429 }
2430 
2431 static void
2432 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
2433 				    struct ethtool_link_ksettings *cmd)
2434 {
2435 	if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
2436 		return;
2437 
2438 	ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
2439 	mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
2440 }
2441 
2442 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
2443 					    struct ethtool_link_ksettings *cmd)
2444 {
2445 	u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
2446 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2447 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2448 	char ptys_pl[MLXSW_REG_PTYS_LEN];
2449 	u8 autoneg_status;
2450 	bool autoneg;
2451 	int err;
2452 
2453 	autoneg = mlxsw_sp_port->link.autoneg;
2454 	mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
2455 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2456 	if (err)
2457 		return err;
2458 	mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
2459 				  &eth_proto_oper);
2460 
2461 	mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
2462 
2463 	mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
2464 
2465 	eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
2466 	autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
2467 	mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
2468 
2469 	cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
2470 	cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
2471 	mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
2472 					cmd);
2473 
2474 	return 0;
2475 }
2476 
2477 static int
2478 mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
2479 				 const struct ethtool_link_ksettings *cmd)
2480 {
2481 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2482 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2483 	char ptys_pl[MLXSW_REG_PTYS_LEN];
2484 	u32 eth_proto_cap, eth_proto_new;
2485 	bool autoneg;
2486 	int err;
2487 
2488 	mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
2489 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2490 	if (err)
2491 		return err;
2492 	mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
2493 
2494 	autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
2495 	eth_proto_new = autoneg ?
2496 		mlxsw_sp_to_ptys_advert_link(cmd) :
2497 		mlxsw_sp_to_ptys_speed(cmd->base.speed);
2498 
2499 	eth_proto_new = eth_proto_new & eth_proto_cap;
2500 	if (!eth_proto_new) {
2501 		netdev_err(dev, "No supported speed requested\n");
2502 		return -EINVAL;
2503 	}
2504 
2505 	mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2506 				eth_proto_new);
2507 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2508 	if (err)
2509 		return err;
2510 
2511 	if (!netif_running(dev))
2512 		return 0;
2513 
2514 	mlxsw_sp_port->link.autoneg = autoneg;
2515 
2516 	mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2517 	mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
2518 
2519 	return 0;
2520 }
2521 
2522 static int mlxsw_sp_flash_device(struct net_device *dev,
2523 				 struct ethtool_flash *flash)
2524 {
2525 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2526 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2527 	const struct firmware *firmware;
2528 	int err;
2529 
2530 	if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
2531 		return -EOPNOTSUPP;
2532 
2533 	dev_hold(dev);
2534 	rtnl_unlock();
2535 
2536 	err = request_firmware_direct(&firmware, flash->data, &dev->dev);
2537 	if (err)
2538 		goto out;
2539 	err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
2540 	release_firmware(firmware);
2541 out:
2542 	rtnl_lock();
2543 	dev_put(dev);
2544 	return err;
2545 }
2546 
2547 #define MLXSW_SP_I2C_ADDR_LOW 0x50
2548 #define MLXSW_SP_I2C_ADDR_HIGH 0x51
2549 #define MLXSW_SP_EEPROM_PAGE_LENGTH 256
2550 
2551 static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port,
2552 					u16 offset, u16 size, void *data,
2553 					unsigned int *p_read_size)
2554 {
2555 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2556 	char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE];
2557 	char mcia_pl[MLXSW_REG_MCIA_LEN];
2558 	u16 i2c_addr;
2559 	int status;
2560 	int err;
2561 
2562 	size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE);
2563 
2564 	if (offset < MLXSW_SP_EEPROM_PAGE_LENGTH &&
2565 	    offset + size > MLXSW_SP_EEPROM_PAGE_LENGTH)
2566 		/* Cross pages read, read until offset 256 in low page */
2567 		size = MLXSW_SP_EEPROM_PAGE_LENGTH - offset;
2568 
2569 	i2c_addr = MLXSW_SP_I2C_ADDR_LOW;
2570 	if (offset >= MLXSW_SP_EEPROM_PAGE_LENGTH) {
2571 		i2c_addr = MLXSW_SP_I2C_ADDR_HIGH;
2572 		offset -= MLXSW_SP_EEPROM_PAGE_LENGTH;
2573 	}
2574 
2575 	mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module,
2576 			    0, 0, offset, size, i2c_addr);
2577 
2578 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl);
2579 	if (err)
2580 		return err;
2581 
2582 	status = mlxsw_reg_mcia_status_get(mcia_pl);
2583 	if (status)
2584 		return -EIO;
2585 
2586 	mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
2587 	memcpy(data, eeprom_tmp, size);
2588 	*p_read_size = size;
2589 
2590 	return 0;
2591 }
2592 
2593 enum mlxsw_sp_eeprom_module_info_rev_id {
2594 	MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC      = 0x00,
2595 	MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436       = 0x01,
2596 	MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636       = 0x03,
2597 };
2598 
2599 enum mlxsw_sp_eeprom_module_info_id {
2600 	MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP              = 0x03,
2601 	MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP             = 0x0C,
2602 	MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS        = 0x0D,
2603 	MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28           = 0x11,
2604 };
2605 
2606 enum mlxsw_sp_eeprom_module_info {
2607 	MLXSW_SP_EEPROM_MODULE_INFO_ID,
2608 	MLXSW_SP_EEPROM_MODULE_INFO_REV_ID,
2609 	MLXSW_SP_EEPROM_MODULE_INFO_SIZE,
2610 };
2611 
2612 static int mlxsw_sp_get_module_info(struct net_device *netdev,
2613 				    struct ethtool_modinfo *modinfo)
2614 {
2615 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
2616 	u8 module_info[MLXSW_SP_EEPROM_MODULE_INFO_SIZE];
2617 	u8 module_rev_id, module_id;
2618 	unsigned int read_size;
2619 	int err;
2620 
2621 	err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0,
2622 					   MLXSW_SP_EEPROM_MODULE_INFO_SIZE,
2623 					   module_info, &read_size);
2624 	if (err)
2625 		return err;
2626 
2627 	if (read_size < MLXSW_SP_EEPROM_MODULE_INFO_SIZE)
2628 		return -EIO;
2629 
2630 	module_rev_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID];
2631 	module_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_ID];
2632 
2633 	switch (module_id) {
2634 	case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP:
2635 		modinfo->type       = ETH_MODULE_SFF_8436;
2636 		modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2637 		break;
2638 	case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS:
2639 	case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28:
2640 		if (module_id  == MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 ||
2641 		    module_rev_id >= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636) {
2642 			modinfo->type       = ETH_MODULE_SFF_8636;
2643 			modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2644 		} else {
2645 			modinfo->type       = ETH_MODULE_SFF_8436;
2646 			modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2647 		}
2648 		break;
2649 	case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP:
2650 		modinfo->type       = ETH_MODULE_SFF_8472;
2651 		modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2652 		break;
2653 	default:
2654 		return -EINVAL;
2655 	}
2656 
2657 	return 0;
2658 }
2659 
2660 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
2661 				      struct ethtool_eeprom *ee,
2662 				      u8 *data)
2663 {
2664 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
2665 	int offset = ee->offset;
2666 	unsigned int read_size;
2667 	int i = 0;
2668 	int err;
2669 
2670 	if (!ee->len)
2671 		return -EINVAL;
2672 
2673 	memset(data, 0, ee->len);
2674 
2675 	while (i < ee->len) {
2676 		err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, offset,
2677 						   ee->len - i, data + i,
2678 						   &read_size);
2679 		if (err) {
2680 			netdev_err(mlxsw_sp_port->dev, "Eeprom query failed\n");
2681 			return err;
2682 		}
2683 
2684 		i += read_size;
2685 		offset += read_size;
2686 	}
2687 
2688 	return 0;
2689 }
2690 
2691 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
2692 	.get_drvinfo		= mlxsw_sp_port_get_drvinfo,
2693 	.get_link		= ethtool_op_get_link,
2694 	.get_pauseparam		= mlxsw_sp_port_get_pauseparam,
2695 	.set_pauseparam		= mlxsw_sp_port_set_pauseparam,
2696 	.get_strings		= mlxsw_sp_port_get_strings,
2697 	.set_phys_id		= mlxsw_sp_port_set_phys_id,
2698 	.get_ethtool_stats	= mlxsw_sp_port_get_stats,
2699 	.get_sset_count		= mlxsw_sp_port_get_sset_count,
2700 	.get_link_ksettings	= mlxsw_sp_port_get_link_ksettings,
2701 	.set_link_ksettings	= mlxsw_sp_port_set_link_ksettings,
2702 	.flash_device		= mlxsw_sp_flash_device,
2703 	.get_module_info	= mlxsw_sp_get_module_info,
2704 	.get_module_eeprom	= mlxsw_sp_get_module_eeprom,
2705 };
2706 
2707 static int
2708 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
2709 {
2710 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2711 	u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
2712 	char ptys_pl[MLXSW_REG_PTYS_LEN];
2713 	u32 eth_proto_admin;
2714 
2715 	eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
2716 	mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2717 				eth_proto_admin);
2718 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2719 }
2720 
2721 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
2722 			  enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
2723 			  bool dwrr, u8 dwrr_weight)
2724 {
2725 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2726 	char qeec_pl[MLXSW_REG_QEEC_LEN];
2727 
2728 	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2729 			    next_index);
2730 	mlxsw_reg_qeec_de_set(qeec_pl, true);
2731 	mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
2732 	mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
2733 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2734 }
2735 
2736 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
2737 				  enum mlxsw_reg_qeec_hr hr, u8 index,
2738 				  u8 next_index, u32 maxrate)
2739 {
2740 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2741 	char qeec_pl[MLXSW_REG_QEEC_LEN];
2742 
2743 	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2744 			    next_index);
2745 	mlxsw_reg_qeec_mase_set(qeec_pl, true);
2746 	mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
2747 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2748 }
2749 
2750 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
2751 			      u8 switch_prio, u8 tclass)
2752 {
2753 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2754 	char qtct_pl[MLXSW_REG_QTCT_LEN];
2755 
2756 	mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
2757 			    tclass);
2758 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
2759 }
2760 
2761 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
2762 {
2763 	int err, i;
2764 
2765 	/* Setup the elements hierarcy, so that each TC is linked to
2766 	 * one subgroup, which are all member in the same group.
2767 	 */
2768 	err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2769 				    MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
2770 				    0);
2771 	if (err)
2772 		return err;
2773 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2774 		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2775 					    MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
2776 					    0, false, 0);
2777 		if (err)
2778 			return err;
2779 	}
2780 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2781 		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2782 					    MLXSW_REG_QEEC_HIERARCY_TC, i, i,
2783 					    false, 0);
2784 		if (err)
2785 			return err;
2786 	}
2787 
2788 	/* Make sure the max shaper is disabled in all hierarcies that
2789 	 * support it.
2790 	 */
2791 	err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2792 					    MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
2793 					    MLXSW_REG_QEEC_MAS_DIS);
2794 	if (err)
2795 		return err;
2796 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2797 		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2798 						    MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
2799 						    i, 0,
2800 						    MLXSW_REG_QEEC_MAS_DIS);
2801 		if (err)
2802 			return err;
2803 	}
2804 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2805 		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2806 						    MLXSW_REG_QEEC_HIERARCY_TC,
2807 						    i, i,
2808 						    MLXSW_REG_QEEC_MAS_DIS);
2809 		if (err)
2810 			return err;
2811 	}
2812 
2813 	/* Map all priorities to traffic class 0. */
2814 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2815 		err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
2816 		if (err)
2817 			return err;
2818 	}
2819 
2820 	return 0;
2821 }
2822 
2823 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2824 				bool split, u8 module, u8 width, u8 lane)
2825 {
2826 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2827 	struct mlxsw_sp_port *mlxsw_sp_port;
2828 	struct net_device *dev;
2829 	int err;
2830 
2831 	err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
2832 	if (err) {
2833 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
2834 			local_port);
2835 		return err;
2836 	}
2837 
2838 	dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
2839 	if (!dev) {
2840 		err = -ENOMEM;
2841 		goto err_alloc_etherdev;
2842 	}
2843 	SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
2844 	mlxsw_sp_port = netdev_priv(dev);
2845 	mlxsw_sp_port->dev = dev;
2846 	mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
2847 	mlxsw_sp_port->local_port = local_port;
2848 	mlxsw_sp_port->pvid = 1;
2849 	mlxsw_sp_port->split = split;
2850 	mlxsw_sp_port->mapping.module = module;
2851 	mlxsw_sp_port->mapping.width = width;
2852 	mlxsw_sp_port->mapping.lane = lane;
2853 	mlxsw_sp_port->link.autoneg = 1;
2854 	INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
2855 	INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
2856 
2857 	mlxsw_sp_port->pcpu_stats =
2858 		netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
2859 	if (!mlxsw_sp_port->pcpu_stats) {
2860 		err = -ENOMEM;
2861 		goto err_alloc_stats;
2862 	}
2863 
2864 	mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
2865 					GFP_KERNEL);
2866 	if (!mlxsw_sp_port->sample) {
2867 		err = -ENOMEM;
2868 		goto err_alloc_sample;
2869 	}
2870 
2871 	mlxsw_sp_port->hw_stats.cache =
2872 		kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
2873 
2874 	if (!mlxsw_sp_port->hw_stats.cache) {
2875 		err = -ENOMEM;
2876 		goto err_alloc_hw_stats;
2877 	}
2878 	INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
2879 			  &update_stats_cache);
2880 
2881 	dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
2882 	dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
2883 
2884 	err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane);
2885 	if (err) {
2886 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
2887 			mlxsw_sp_port->local_port);
2888 		goto err_port_module_map;
2889 	}
2890 
2891 	err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
2892 	if (err) {
2893 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
2894 			mlxsw_sp_port->local_port);
2895 		goto err_port_swid_set;
2896 	}
2897 
2898 	err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
2899 	if (err) {
2900 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
2901 			mlxsw_sp_port->local_port);
2902 		goto err_dev_addr_init;
2903 	}
2904 
2905 	netif_carrier_off(dev);
2906 
2907 	dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
2908 			 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
2909 	dev->hw_features |= NETIF_F_HW_TC;
2910 
2911 	dev->min_mtu = 0;
2912 	dev->max_mtu = ETH_MAX_MTU;
2913 
2914 	/* Each packet needs to have a Tx header (metadata) on top all other
2915 	 * headers.
2916 	 */
2917 	dev->needed_headroom = MLXSW_TXHDR_LEN;
2918 
2919 	err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
2920 	if (err) {
2921 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
2922 			mlxsw_sp_port->local_port);
2923 		goto err_port_system_port_mapping_set;
2924 	}
2925 
2926 	err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
2927 	if (err) {
2928 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
2929 			mlxsw_sp_port->local_port);
2930 		goto err_port_speed_by_width_set;
2931 	}
2932 
2933 	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
2934 	if (err) {
2935 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
2936 			mlxsw_sp_port->local_port);
2937 		goto err_port_mtu_set;
2938 	}
2939 
2940 	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2941 	if (err)
2942 		goto err_port_admin_status_set;
2943 
2944 	err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
2945 	if (err) {
2946 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
2947 			mlxsw_sp_port->local_port);
2948 		goto err_port_buffers_init;
2949 	}
2950 
2951 	err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
2952 	if (err) {
2953 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
2954 			mlxsw_sp_port->local_port);
2955 		goto err_port_ets_init;
2956 	}
2957 
2958 	/* ETS and buffers must be initialized before DCB. */
2959 	err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
2960 	if (err) {
2961 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
2962 			mlxsw_sp_port->local_port);
2963 		goto err_port_dcb_init;
2964 	}
2965 
2966 	err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
2967 	if (err) {
2968 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
2969 			mlxsw_sp_port->local_port);
2970 		goto err_port_fids_init;
2971 	}
2972 
2973 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
2974 	if (IS_ERR(mlxsw_sp_port_vlan)) {
2975 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
2976 			mlxsw_sp_port->local_port);
2977 		goto err_port_vlan_get;
2978 	}
2979 
2980 	mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
2981 	mlxsw_sp->ports[local_port] = mlxsw_sp_port;
2982 	err = register_netdev(dev);
2983 	if (err) {
2984 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
2985 			mlxsw_sp_port->local_port);
2986 		goto err_register_netdev;
2987 	}
2988 
2989 	mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
2990 				mlxsw_sp_port, dev, mlxsw_sp_port->split,
2991 				module);
2992 	mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
2993 	return 0;
2994 
2995 err_register_netdev:
2996 	mlxsw_sp->ports[local_port] = NULL;
2997 	mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
2998 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
2999 err_port_vlan_get:
3000 	mlxsw_sp_port_fids_fini(mlxsw_sp_port);
3001 err_port_fids_init:
3002 	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
3003 err_port_dcb_init:
3004 err_port_ets_init:
3005 err_port_buffers_init:
3006 err_port_admin_status_set:
3007 err_port_mtu_set:
3008 err_port_speed_by_width_set:
3009 err_port_system_port_mapping_set:
3010 err_dev_addr_init:
3011 	mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
3012 err_port_swid_set:
3013 	mlxsw_sp_port_module_unmap(mlxsw_sp_port);
3014 err_port_module_map:
3015 	kfree(mlxsw_sp_port->hw_stats.cache);
3016 err_alloc_hw_stats:
3017 	kfree(mlxsw_sp_port->sample);
3018 err_alloc_sample:
3019 	free_percpu(mlxsw_sp_port->pcpu_stats);
3020 err_alloc_stats:
3021 	free_netdev(dev);
3022 err_alloc_etherdev:
3023 	mlxsw_core_port_fini(mlxsw_sp->core, local_port);
3024 	return err;
3025 }
3026 
3027 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
3028 {
3029 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3030 
3031 	cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
3032 	mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
3033 	unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
3034 	mlxsw_sp->ports[local_port] = NULL;
3035 	mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
3036 	mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
3037 	mlxsw_sp_port_fids_fini(mlxsw_sp_port);
3038 	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
3039 	mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
3040 	mlxsw_sp_port_module_unmap(mlxsw_sp_port);
3041 	kfree(mlxsw_sp_port->hw_stats.cache);
3042 	kfree(mlxsw_sp_port->sample);
3043 	free_percpu(mlxsw_sp_port->pcpu_stats);
3044 	WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
3045 	free_netdev(mlxsw_sp_port->dev);
3046 	mlxsw_core_port_fini(mlxsw_sp->core, local_port);
3047 }
3048 
3049 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
3050 {
3051 	return mlxsw_sp->ports[local_port] != NULL;
3052 }
3053 
3054 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
3055 {
3056 	int i;
3057 
3058 	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
3059 		if (mlxsw_sp_port_created(mlxsw_sp, i))
3060 			mlxsw_sp_port_remove(mlxsw_sp, i);
3061 	kfree(mlxsw_sp->port_to_module);
3062 	kfree(mlxsw_sp->ports);
3063 }
3064 
3065 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
3066 {
3067 	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
3068 	u8 module, width, lane;
3069 	size_t alloc_size;
3070 	int i;
3071 	int err;
3072 
3073 	alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
3074 	mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
3075 	if (!mlxsw_sp->ports)
3076 		return -ENOMEM;
3077 
3078 	mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL);
3079 	if (!mlxsw_sp->port_to_module) {
3080 		err = -ENOMEM;
3081 		goto err_port_to_module_alloc;
3082 	}
3083 
3084 	for (i = 1; i < max_ports; i++) {
3085 		err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
3086 						    &width, &lane);
3087 		if (err)
3088 			goto err_port_module_info_get;
3089 		if (!width)
3090 			continue;
3091 		mlxsw_sp->port_to_module[i] = module;
3092 		err = mlxsw_sp_port_create(mlxsw_sp, i, false,
3093 					   module, width, lane);
3094 		if (err)
3095 			goto err_port_create;
3096 	}
3097 	return 0;
3098 
3099 err_port_create:
3100 err_port_module_info_get:
3101 	for (i--; i >= 1; i--)
3102 		if (mlxsw_sp_port_created(mlxsw_sp, i))
3103 			mlxsw_sp_port_remove(mlxsw_sp, i);
3104 	kfree(mlxsw_sp->port_to_module);
3105 err_port_to_module_alloc:
3106 	kfree(mlxsw_sp->ports);
3107 	return err;
3108 }
3109 
3110 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
3111 {
3112 	u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
3113 
3114 	return local_port - offset;
3115 }
3116 
3117 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
3118 				      u8 module, unsigned int count)
3119 {
3120 	u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
3121 	int err, i;
3122 
3123 	for (i = 0; i < count; i++) {
3124 		err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
3125 					   module, width, i * width);
3126 		if (err)
3127 			goto err_port_create;
3128 	}
3129 
3130 	return 0;
3131 
3132 err_port_create:
3133 	for (i--; i >= 0; i--)
3134 		if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3135 			mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3136 	return err;
3137 }
3138 
3139 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
3140 					 u8 base_port, unsigned int count)
3141 {
3142 	u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
3143 	int i;
3144 
3145 	/* Split by four means we need to re-create two ports, otherwise
3146 	 * only one.
3147 	 */
3148 	count = count / 2;
3149 
3150 	for (i = 0; i < count; i++) {
3151 		local_port = base_port + i * 2;
3152 		module = mlxsw_sp->port_to_module[local_port];
3153 
3154 		mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
3155 				     width, 0);
3156 	}
3157 }
3158 
3159 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
3160 			       unsigned int count)
3161 {
3162 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3163 	struct mlxsw_sp_port *mlxsw_sp_port;
3164 	u8 module, cur_width, base_port;
3165 	int i;
3166 	int err;
3167 
3168 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
3169 	if (!mlxsw_sp_port) {
3170 		dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3171 			local_port);
3172 		return -EINVAL;
3173 	}
3174 
3175 	module = mlxsw_sp_port->mapping.module;
3176 	cur_width = mlxsw_sp_port->mapping.width;
3177 
3178 	if (count != 2 && count != 4) {
3179 		netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
3180 		return -EINVAL;
3181 	}
3182 
3183 	if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
3184 		netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
3185 		return -EINVAL;
3186 	}
3187 
3188 	/* Make sure we have enough slave (even) ports for the split. */
3189 	if (count == 2) {
3190 		base_port = local_port;
3191 		if (mlxsw_sp->ports[base_port + 1]) {
3192 			netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3193 			return -EINVAL;
3194 		}
3195 	} else {
3196 		base_port = mlxsw_sp_cluster_base_port_get(local_port);
3197 		if (mlxsw_sp->ports[base_port + 1] ||
3198 		    mlxsw_sp->ports[base_port + 3]) {
3199 			netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3200 			return -EINVAL;
3201 		}
3202 	}
3203 
3204 	for (i = 0; i < count; i++)
3205 		if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3206 			mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3207 
3208 	err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
3209 	if (err) {
3210 		dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
3211 		goto err_port_split_create;
3212 	}
3213 
3214 	return 0;
3215 
3216 err_port_split_create:
3217 	mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
3218 	return err;
3219 }
3220 
3221 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
3222 {
3223 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3224 	struct mlxsw_sp_port *mlxsw_sp_port;
3225 	u8 cur_width, base_port;
3226 	unsigned int count;
3227 	int i;
3228 
3229 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
3230 	if (!mlxsw_sp_port) {
3231 		dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3232 			local_port);
3233 		return -EINVAL;
3234 	}
3235 
3236 	if (!mlxsw_sp_port->split) {
3237 		netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
3238 		return -EINVAL;
3239 	}
3240 
3241 	cur_width = mlxsw_sp_port->mapping.width;
3242 	count = cur_width == 1 ? 4 : 2;
3243 
3244 	base_port = mlxsw_sp_cluster_base_port_get(local_port);
3245 
3246 	/* Determine which ports to remove. */
3247 	if (count == 2 && local_port >= base_port + 2)
3248 		base_port = base_port + 2;
3249 
3250 	for (i = 0; i < count; i++)
3251 		if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3252 			mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3253 
3254 	mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
3255 
3256 	return 0;
3257 }
3258 
3259 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
3260 				     char *pude_pl, void *priv)
3261 {
3262 	struct mlxsw_sp *mlxsw_sp = priv;
3263 	struct mlxsw_sp_port *mlxsw_sp_port;
3264 	enum mlxsw_reg_pude_oper_status status;
3265 	u8 local_port;
3266 
3267 	local_port = mlxsw_reg_pude_local_port_get(pude_pl);
3268 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
3269 	if (!mlxsw_sp_port)
3270 		return;
3271 
3272 	status = mlxsw_reg_pude_oper_status_get(pude_pl);
3273 	if (status == MLXSW_PORT_OPER_STATUS_UP) {
3274 		netdev_info(mlxsw_sp_port->dev, "link up\n");
3275 		netif_carrier_on(mlxsw_sp_port->dev);
3276 	} else {
3277 		netdev_info(mlxsw_sp_port->dev, "link down\n");
3278 		netif_carrier_off(mlxsw_sp_port->dev);
3279 	}
3280 }
3281 
3282 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
3283 					      u8 local_port, void *priv)
3284 {
3285 	struct mlxsw_sp *mlxsw_sp = priv;
3286 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3287 	struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
3288 
3289 	if (unlikely(!mlxsw_sp_port)) {
3290 		dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
3291 				     local_port);
3292 		return;
3293 	}
3294 
3295 	skb->dev = mlxsw_sp_port->dev;
3296 
3297 	pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
3298 	u64_stats_update_begin(&pcpu_stats->syncp);
3299 	pcpu_stats->rx_packets++;
3300 	pcpu_stats->rx_bytes += skb->len;
3301 	u64_stats_update_end(&pcpu_stats->syncp);
3302 
3303 	skb->protocol = eth_type_trans(skb, skb->dev);
3304 	netif_receive_skb(skb);
3305 }
3306 
3307 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
3308 					   void *priv)
3309 {
3310 	skb->offload_fwd_mark = 1;
3311 	return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
3312 }
3313 
3314 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
3315 					     void *priv)
3316 {
3317 	struct mlxsw_sp *mlxsw_sp = priv;
3318 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3319 	struct psample_group *psample_group;
3320 	u32 size;
3321 
3322 	if (unlikely(!mlxsw_sp_port)) {
3323 		dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
3324 				     local_port);
3325 		goto out;
3326 	}
3327 	if (unlikely(!mlxsw_sp_port->sample)) {
3328 		dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
3329 				     local_port);
3330 		goto out;
3331 	}
3332 
3333 	size = mlxsw_sp_port->sample->truncate ?
3334 		  mlxsw_sp_port->sample->trunc_size : skb->len;
3335 
3336 	rcu_read_lock();
3337 	psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
3338 	if (!psample_group)
3339 		goto out_unlock;
3340 	psample_sample_packet(psample_group, skb, size,
3341 			      mlxsw_sp_port->dev->ifindex, 0,
3342 			      mlxsw_sp_port->sample->rate);
3343 out_unlock:
3344 	rcu_read_unlock();
3345 out:
3346 	consume_skb(skb);
3347 }
3348 
3349 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl)	\
3350 	MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action,	\
3351 		  _is_ctrl, SP_##_trap_group, DISCARD)
3352 
3353 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl)	\
3354 	MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action,	\
3355 		_is_ctrl, SP_##_trap_group, DISCARD)
3356 
3357 #define MLXSW_SP_EVENTL(_func, _trap_id)		\
3358 	MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
3359 
3360 static const struct mlxsw_listener mlxsw_sp_listener[] = {
3361 	/* Events */
3362 	MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
3363 	/* L2 traps */
3364 	MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
3365 	MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
3366 	MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
3367 	MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
3368 	MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
3369 	MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
3370 	MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
3371 	MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
3372 	MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
3373 	MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
3374 	MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
3375 	MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false),
3376 	MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD,
3377 			  false),
3378 	MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
3379 			     false),
3380 	MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD,
3381 			     false),
3382 	MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
3383 			     false),
3384 	/* L3 traps */
3385 	MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3386 	MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3387 	MLXSW_SP_RXL_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3388 	MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
3389 	MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
3390 			  false),
3391 	MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false),
3392 	MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
3393 	MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false),
3394 	MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP,
3395 			  false),
3396 	MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false),
3397 	MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false),
3398 	MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false),
3399 	MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
3400 	MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false),
3401 	MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false),
3402 	MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
3403 			  false),
3404 	MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
3405 			  false),
3406 	MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
3407 			  false),
3408 	MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
3409 			  false),
3410 	MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false),
3411 	MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
3412 			  false),
3413 	MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false),
3414 	MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false),
3415 	MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
3416 	MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
3417 	MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3418 	/* PKT Sample trap */
3419 	MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
3420 		  false, SP_IP2ME, DISCARD),
3421 	/* ACL trap */
3422 	MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false),
3423 };
3424 
3425 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
3426 {
3427 	char qpcr_pl[MLXSW_REG_QPCR_LEN];
3428 	enum mlxsw_reg_qpcr_ir_units ir_units;
3429 	int max_cpu_policers;
3430 	bool is_bytes;
3431 	u8 burst_size;
3432 	u32 rate;
3433 	int i, err;
3434 
3435 	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
3436 		return -EIO;
3437 
3438 	max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3439 
3440 	ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
3441 	for (i = 0; i < max_cpu_policers; i++) {
3442 		is_bytes = false;
3443 		switch (i) {
3444 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3445 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3446 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3447 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3448 			rate = 128;
3449 			burst_size = 7;
3450 			break;
3451 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3452 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
3453 			rate = 16 * 1024;
3454 			burst_size = 10;
3455 			break;
3456 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
3457 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3458 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3459 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
3460 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3461 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3462 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
3463 			rate = 1024;
3464 			burst_size = 7;
3465 			break;
3466 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3467 			is_bytes = true;
3468 			rate = 4 * 1024;
3469 			burst_size = 4;
3470 			break;
3471 		default:
3472 			continue;
3473 		}
3474 
3475 		mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
3476 				    burst_size);
3477 		err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
3478 		if (err)
3479 			return err;
3480 	}
3481 
3482 	return 0;
3483 }
3484 
3485 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
3486 {
3487 	char htgt_pl[MLXSW_REG_HTGT_LEN];
3488 	enum mlxsw_reg_htgt_trap_group i;
3489 	int max_cpu_policers;
3490 	int max_trap_groups;
3491 	u8 priority, tc;
3492 	u16 policer_id;
3493 	int err;
3494 
3495 	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
3496 		return -EIO;
3497 
3498 	max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
3499 	max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3500 
3501 	for (i = 0; i < max_trap_groups; i++) {
3502 		policer_id = i;
3503 		switch (i) {
3504 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3505 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3506 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3507 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3508 			priority = 5;
3509 			tc = 5;
3510 			break;
3511 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
3512 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3513 			priority = 4;
3514 			tc = 4;
3515 			break;
3516 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3517 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3518 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
3519 			priority = 3;
3520 			tc = 3;
3521 			break;
3522 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3523 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
3524 			priority = 2;
3525 			tc = 2;
3526 			break;
3527 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
3528 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3529 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3530 			priority = 1;
3531 			tc = 1;
3532 			break;
3533 		case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
3534 			priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
3535 			tc = MLXSW_REG_HTGT_DEFAULT_TC;
3536 			policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
3537 			break;
3538 		default:
3539 			continue;
3540 		}
3541 
3542 		if (max_cpu_policers <= policer_id &&
3543 		    policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
3544 			return -EIO;
3545 
3546 		mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
3547 		err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3548 		if (err)
3549 			return err;
3550 	}
3551 
3552 	return 0;
3553 }
3554 
3555 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
3556 {
3557 	int i;
3558 	int err;
3559 
3560 	err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
3561 	if (err)
3562 		return err;
3563 
3564 	err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
3565 	if (err)
3566 		return err;
3567 
3568 	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3569 		err = mlxsw_core_trap_register(mlxsw_sp->core,
3570 					       &mlxsw_sp_listener[i],
3571 					       mlxsw_sp);
3572 		if (err)
3573 			goto err_listener_register;
3574 
3575 	}
3576 	return 0;
3577 
3578 err_listener_register:
3579 	for (i--; i >= 0; i--) {
3580 		mlxsw_core_trap_unregister(mlxsw_sp->core,
3581 					   &mlxsw_sp_listener[i],
3582 					   mlxsw_sp);
3583 	}
3584 	return err;
3585 }
3586 
3587 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
3588 {
3589 	int i;
3590 
3591 	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3592 		mlxsw_core_trap_unregister(mlxsw_sp->core,
3593 					   &mlxsw_sp_listener[i],
3594 					   mlxsw_sp);
3595 	}
3596 }
3597 
3598 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
3599 {
3600 	char slcr_pl[MLXSW_REG_SLCR_LEN];
3601 	int err;
3602 
3603 	mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
3604 				     MLXSW_REG_SLCR_LAG_HASH_DMAC |
3605 				     MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
3606 				     MLXSW_REG_SLCR_LAG_HASH_VLANID |
3607 				     MLXSW_REG_SLCR_LAG_HASH_SIP |
3608 				     MLXSW_REG_SLCR_LAG_HASH_DIP |
3609 				     MLXSW_REG_SLCR_LAG_HASH_SPORT |
3610 				     MLXSW_REG_SLCR_LAG_HASH_DPORT |
3611 				     MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
3612 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
3613 	if (err)
3614 		return err;
3615 
3616 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
3617 	    !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
3618 		return -EIO;
3619 
3620 	mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
3621 				 sizeof(struct mlxsw_sp_upper),
3622 				 GFP_KERNEL);
3623 	if (!mlxsw_sp->lags)
3624 		return -ENOMEM;
3625 
3626 	return 0;
3627 }
3628 
3629 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
3630 {
3631 	kfree(mlxsw_sp->lags);
3632 }
3633 
3634 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
3635 {
3636 	char htgt_pl[MLXSW_REG_HTGT_LEN];
3637 
3638 	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
3639 			    MLXSW_REG_HTGT_INVALID_POLICER,
3640 			    MLXSW_REG_HTGT_DEFAULT_PRIORITY,
3641 			    MLXSW_REG_HTGT_DEFAULT_TC);
3642 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3643 }
3644 
3645 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3646 			 const struct mlxsw_bus_info *mlxsw_bus_info)
3647 {
3648 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3649 	int err;
3650 
3651 	mlxsw_sp->core = mlxsw_core;
3652 	mlxsw_sp->bus_info = mlxsw_bus_info;
3653 
3654 	err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
3655 	if (err) {
3656 		dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
3657 		return err;
3658 	}
3659 
3660 	err = mlxsw_sp_base_mac_get(mlxsw_sp);
3661 	if (err) {
3662 		dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3663 		return err;
3664 	}
3665 
3666 	err = mlxsw_sp_fids_init(mlxsw_sp);
3667 	if (err) {
3668 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
3669 		return err;
3670 	}
3671 
3672 	err = mlxsw_sp_traps_init(mlxsw_sp);
3673 	if (err) {
3674 		dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3675 		goto err_traps_init;
3676 	}
3677 
3678 	err = mlxsw_sp_buffers_init(mlxsw_sp);
3679 	if (err) {
3680 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3681 		goto err_buffers_init;
3682 	}
3683 
3684 	err = mlxsw_sp_lag_init(mlxsw_sp);
3685 	if (err) {
3686 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3687 		goto err_lag_init;
3688 	}
3689 
3690 	err = mlxsw_sp_switchdev_init(mlxsw_sp);
3691 	if (err) {
3692 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3693 		goto err_switchdev_init;
3694 	}
3695 
3696 	err = mlxsw_sp_router_init(mlxsw_sp);
3697 	if (err) {
3698 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3699 		goto err_router_init;
3700 	}
3701 
3702 	err = mlxsw_sp_span_init(mlxsw_sp);
3703 	if (err) {
3704 		dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3705 		goto err_span_init;
3706 	}
3707 
3708 	err = mlxsw_sp_acl_init(mlxsw_sp);
3709 	if (err) {
3710 		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3711 		goto err_acl_init;
3712 	}
3713 
3714 	err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3715 	if (err) {
3716 		dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3717 		goto err_counter_pool_init;
3718 	}
3719 
3720 	err = mlxsw_sp_dpipe_init(mlxsw_sp);
3721 	if (err) {
3722 		dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
3723 		goto err_dpipe_init;
3724 	}
3725 
3726 	err = mlxsw_sp_ports_create(mlxsw_sp);
3727 	if (err) {
3728 		dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3729 		goto err_ports_create;
3730 	}
3731 
3732 	return 0;
3733 
3734 err_ports_create:
3735 	mlxsw_sp_dpipe_fini(mlxsw_sp);
3736 err_dpipe_init:
3737 	mlxsw_sp_counter_pool_fini(mlxsw_sp);
3738 err_counter_pool_init:
3739 	mlxsw_sp_acl_fini(mlxsw_sp);
3740 err_acl_init:
3741 	mlxsw_sp_span_fini(mlxsw_sp);
3742 err_span_init:
3743 	mlxsw_sp_router_fini(mlxsw_sp);
3744 err_router_init:
3745 	mlxsw_sp_switchdev_fini(mlxsw_sp);
3746 err_switchdev_init:
3747 	mlxsw_sp_lag_fini(mlxsw_sp);
3748 err_lag_init:
3749 	mlxsw_sp_buffers_fini(mlxsw_sp);
3750 err_buffers_init:
3751 	mlxsw_sp_traps_fini(mlxsw_sp);
3752 err_traps_init:
3753 	mlxsw_sp_fids_fini(mlxsw_sp);
3754 	return err;
3755 }
3756 
3757 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3758 {
3759 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3760 
3761 	mlxsw_sp_ports_remove(mlxsw_sp);
3762 	mlxsw_sp_dpipe_fini(mlxsw_sp);
3763 	mlxsw_sp_counter_pool_fini(mlxsw_sp);
3764 	mlxsw_sp_acl_fini(mlxsw_sp);
3765 	mlxsw_sp_span_fini(mlxsw_sp);
3766 	mlxsw_sp_router_fini(mlxsw_sp);
3767 	mlxsw_sp_switchdev_fini(mlxsw_sp);
3768 	mlxsw_sp_lag_fini(mlxsw_sp);
3769 	mlxsw_sp_buffers_fini(mlxsw_sp);
3770 	mlxsw_sp_traps_fini(mlxsw_sp);
3771 	mlxsw_sp_fids_fini(mlxsw_sp);
3772 }
3773 
3774 static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
3775 	.used_max_vepa_channels		= 1,
3776 	.max_vepa_channels		= 0,
3777 	.used_max_mid			= 1,
3778 	.max_mid			= MLXSW_SP_MID_MAX,
3779 	.used_max_pgt			= 1,
3780 	.max_pgt			= 0,
3781 	.used_flood_tables		= 1,
3782 	.used_flood_mode		= 1,
3783 	.flood_mode			= 3,
3784 	.max_fid_offset_flood_tables	= 3,
3785 	.fid_offset_flood_table_size	= VLAN_N_VID - 1,
3786 	.max_fid_flood_tables		= 3,
3787 	.fid_flood_table_size		= MLXSW_SP_FID_8021D_MAX,
3788 	.used_max_ib_mc			= 1,
3789 	.max_ib_mc			= 0,
3790 	.used_max_pkey			= 1,
3791 	.max_pkey			= 0,
3792 	.used_kvd_split_data		= 1,
3793 	.kvd_hash_granularity		= MLXSW_SP_KVD_GRANULARITY,
3794 	.kvd_hash_single_parts		= 2,
3795 	.kvd_hash_double_parts		= 1,
3796 	.kvd_linear_size		= MLXSW_SP_KVD_LINEAR_SIZE,
3797 	.swid_config			= {
3798 		{
3799 			.used_type	= 1,
3800 			.type		= MLXSW_PORT_SWID_TYPE_ETH,
3801 		}
3802 	},
3803 	.resource_query_enable		= 1,
3804 };
3805 
3806 static struct mlxsw_driver mlxsw_sp_driver = {
3807 	.kind				= mlxsw_sp_driver_name,
3808 	.priv_size			= sizeof(struct mlxsw_sp),
3809 	.init				= mlxsw_sp_init,
3810 	.fini				= mlxsw_sp_fini,
3811 	.basic_trap_groups_set		= mlxsw_sp_basic_trap_groups_set,
3812 	.port_split			= mlxsw_sp_port_split,
3813 	.port_unsplit			= mlxsw_sp_port_unsplit,
3814 	.sb_pool_get			= mlxsw_sp_sb_pool_get,
3815 	.sb_pool_set			= mlxsw_sp_sb_pool_set,
3816 	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
3817 	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
3818 	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
3819 	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
3820 	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
3821 	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
3822 	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
3823 	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
3824 	.txhdr_construct		= mlxsw_sp_txhdr_construct,
3825 	.txhdr_len			= MLXSW_TXHDR_LEN,
3826 	.profile			= &mlxsw_sp_config_profile,
3827 };
3828 
3829 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
3830 {
3831 	return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3832 }
3833 
3834 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
3835 {
3836 	struct mlxsw_sp_port **p_mlxsw_sp_port = data;
3837 	int ret = 0;
3838 
3839 	if (mlxsw_sp_port_dev_check(lower_dev)) {
3840 		*p_mlxsw_sp_port = netdev_priv(lower_dev);
3841 		ret = 1;
3842 	}
3843 
3844 	return ret;
3845 }
3846 
3847 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
3848 {
3849 	struct mlxsw_sp_port *mlxsw_sp_port;
3850 
3851 	if (mlxsw_sp_port_dev_check(dev))
3852 		return netdev_priv(dev);
3853 
3854 	mlxsw_sp_port = NULL;
3855 	netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
3856 
3857 	return mlxsw_sp_port;
3858 }
3859 
3860 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
3861 {
3862 	struct mlxsw_sp_port *mlxsw_sp_port;
3863 
3864 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3865 	return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3866 }
3867 
3868 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
3869 {
3870 	struct mlxsw_sp_port *mlxsw_sp_port;
3871 
3872 	if (mlxsw_sp_port_dev_check(dev))
3873 		return netdev_priv(dev);
3874 
3875 	mlxsw_sp_port = NULL;
3876 	netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3877 				      &mlxsw_sp_port);
3878 
3879 	return mlxsw_sp_port;
3880 }
3881 
3882 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3883 {
3884 	struct mlxsw_sp_port *mlxsw_sp_port;
3885 
3886 	rcu_read_lock();
3887 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3888 	if (mlxsw_sp_port)
3889 		dev_hold(mlxsw_sp_port->dev);
3890 	rcu_read_unlock();
3891 	return mlxsw_sp_port;
3892 }
3893 
3894 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3895 {
3896 	dev_put(mlxsw_sp_port->dev);
3897 }
3898 
3899 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3900 {
3901 	char sldr_pl[MLXSW_REG_SLDR_LEN];
3902 
3903 	mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3904 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3905 }
3906 
3907 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3908 {
3909 	char sldr_pl[MLXSW_REG_SLDR_LEN];
3910 
3911 	mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3912 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3913 }
3914 
3915 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3916 				     u16 lag_id, u8 port_index)
3917 {
3918 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3919 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
3920 
3921 	mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3922 				      lag_id, port_index);
3923 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3924 }
3925 
3926 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3927 					u16 lag_id)
3928 {
3929 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3930 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
3931 
3932 	mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3933 					 lag_id);
3934 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3935 }
3936 
3937 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3938 					u16 lag_id)
3939 {
3940 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3941 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
3942 
3943 	mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3944 					lag_id);
3945 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3946 }
3947 
3948 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3949 					 u16 lag_id)
3950 {
3951 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3952 	char slcor_pl[MLXSW_REG_SLCOR_LEN];
3953 
3954 	mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3955 					 lag_id);
3956 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3957 }
3958 
3959 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3960 				  struct net_device *lag_dev,
3961 				  u16 *p_lag_id)
3962 {
3963 	struct mlxsw_sp_upper *lag;
3964 	int free_lag_id = -1;
3965 	u64 max_lag;
3966 	int i;
3967 
3968 	max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
3969 	for (i = 0; i < max_lag; i++) {
3970 		lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3971 		if (lag->ref_count) {
3972 			if (lag->dev == lag_dev) {
3973 				*p_lag_id = i;
3974 				return 0;
3975 			}
3976 		} else if (free_lag_id < 0) {
3977 			free_lag_id = i;
3978 		}
3979 	}
3980 	if (free_lag_id < 0)
3981 		return -EBUSY;
3982 	*p_lag_id = free_lag_id;
3983 	return 0;
3984 }
3985 
3986 static bool
3987 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3988 			  struct net_device *lag_dev,
3989 			  struct netdev_lag_upper_info *lag_upper_info)
3990 {
3991 	u16 lag_id;
3992 
3993 	if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
3994 		return false;
3995 	if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
3996 		return false;
3997 	return true;
3998 }
3999 
4000 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4001 				       u16 lag_id, u8 *p_port_index)
4002 {
4003 	u64 max_lag_members;
4004 	int i;
4005 
4006 	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
4007 					     MAX_LAG_MEMBERS);
4008 	for (i = 0; i < max_lag_members; i++) {
4009 		if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
4010 			*p_port_index = i;
4011 			return 0;
4012 		}
4013 	}
4014 	return -EBUSY;
4015 }
4016 
4017 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
4018 				  struct net_device *lag_dev)
4019 {
4020 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4021 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
4022 	struct mlxsw_sp_upper *lag;
4023 	u16 lag_id;
4024 	u8 port_index;
4025 	int err;
4026 
4027 	err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
4028 	if (err)
4029 		return err;
4030 	lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4031 	if (!lag->ref_count) {
4032 		err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
4033 		if (err)
4034 			return err;
4035 		lag->dev = lag_dev;
4036 	}
4037 
4038 	err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
4039 	if (err)
4040 		return err;
4041 	err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
4042 	if (err)
4043 		goto err_col_port_add;
4044 	err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
4045 	if (err)
4046 		goto err_col_port_enable;
4047 
4048 	mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
4049 				   mlxsw_sp_port->local_port);
4050 	mlxsw_sp_port->lag_id = lag_id;
4051 	mlxsw_sp_port->lagged = 1;
4052 	lag->ref_count++;
4053 
4054 	/* Port is no longer usable as a router interface */
4055 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
4056 	if (mlxsw_sp_port_vlan->fid)
4057 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
4058 
4059 	return 0;
4060 
4061 err_col_port_enable:
4062 	mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4063 err_col_port_add:
4064 	if (!lag->ref_count)
4065 		mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4066 	return err;
4067 }
4068 
4069 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4070 				    struct net_device *lag_dev)
4071 {
4072 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4073 	u16 lag_id = mlxsw_sp_port->lag_id;
4074 	struct mlxsw_sp_upper *lag;
4075 
4076 	if (!mlxsw_sp_port->lagged)
4077 		return;
4078 	lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4079 	WARN_ON(lag->ref_count == 0);
4080 
4081 	mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
4082 	mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4083 
4084 	/* Any VLANs configured on the port are no longer valid */
4085 	mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
4086 
4087 	if (lag->ref_count == 1)
4088 		mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4089 
4090 	mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4091 				     mlxsw_sp_port->local_port);
4092 	mlxsw_sp_port->lagged = 0;
4093 	lag->ref_count--;
4094 
4095 	mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
4096 	/* Make sure untagged frames are allowed to ingress */
4097 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
4098 }
4099 
4100 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4101 				      u16 lag_id)
4102 {
4103 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4104 	char sldr_pl[MLXSW_REG_SLDR_LEN];
4105 
4106 	mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
4107 					 mlxsw_sp_port->local_port);
4108 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4109 }
4110 
4111 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4112 					 u16 lag_id)
4113 {
4114 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4115 	char sldr_pl[MLXSW_REG_SLDR_LEN];
4116 
4117 	mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
4118 					    mlxsw_sp_port->local_port);
4119 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4120 }
4121 
4122 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
4123 				       bool lag_tx_enabled)
4124 {
4125 	if (lag_tx_enabled)
4126 		return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
4127 						  mlxsw_sp_port->lag_id);
4128 	else
4129 		return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
4130 						     mlxsw_sp_port->lag_id);
4131 }
4132 
4133 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
4134 				     struct netdev_lag_lower_state_info *info)
4135 {
4136 	return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
4137 }
4138 
4139 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4140 				 bool enable)
4141 {
4142 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4143 	enum mlxsw_reg_spms_state spms_state;
4144 	char *spms_pl;
4145 	u16 vid;
4146 	int err;
4147 
4148 	spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
4149 			      MLXSW_REG_SPMS_STATE_DISCARDING;
4150 
4151 	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
4152 	if (!spms_pl)
4153 		return -ENOMEM;
4154 	mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
4155 
4156 	for (vid = 0; vid < VLAN_N_VID; vid++)
4157 		mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
4158 
4159 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
4160 	kfree(spms_pl);
4161 	return err;
4162 }
4163 
4164 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4165 {
4166 	int err;
4167 
4168 	err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
4169 	if (err)
4170 		return err;
4171 	err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
4172 	if (err)
4173 		goto err_port_stp_set;
4174 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
4175 				     true, false);
4176 	if (err)
4177 		goto err_port_vlan_set;
4178 	return 0;
4179 
4180 err_port_vlan_set:
4181 	mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4182 err_port_stp_set:
4183 	mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4184 	return err;
4185 }
4186 
4187 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4188 {
4189 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
4190 			       false, false);
4191 	mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4192 	mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4193 }
4194 
4195 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4196 					       struct net_device *dev,
4197 					       unsigned long event, void *ptr)
4198 {
4199 	struct netdev_notifier_changeupper_info *info;
4200 	struct mlxsw_sp_port *mlxsw_sp_port;
4201 	struct net_device *upper_dev;
4202 	struct mlxsw_sp *mlxsw_sp;
4203 	int err = 0;
4204 
4205 	mlxsw_sp_port = netdev_priv(dev);
4206 	mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4207 	info = ptr;
4208 
4209 	switch (event) {
4210 	case NETDEV_PRECHANGEUPPER:
4211 		upper_dev = info->upper_dev;
4212 		if (!is_vlan_dev(upper_dev) &&
4213 		    !netif_is_lag_master(upper_dev) &&
4214 		    !netif_is_bridge_master(upper_dev) &&
4215 		    !netif_is_ovs_master(upper_dev))
4216 			return -EINVAL;
4217 		if (!info->linking)
4218 			break;
4219 		if (netdev_has_any_upper_dev(upper_dev))
4220 			return -EINVAL;
4221 		if (netif_is_lag_master(upper_dev) &&
4222 		    !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4223 					       info->upper_info))
4224 			return -EINVAL;
4225 		if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
4226 			return -EINVAL;
4227 		if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4228 		    !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
4229 			return -EINVAL;
4230 		if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev))
4231 			return -EINVAL;
4232 		if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev))
4233 			return -EINVAL;
4234 		break;
4235 	case NETDEV_CHANGEUPPER:
4236 		upper_dev = info->upper_dev;
4237 		if (netif_is_bridge_master(upper_dev)) {
4238 			if (info->linking)
4239 				err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4240 								lower_dev,
4241 								upper_dev);
4242 			else
4243 				mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4244 							   lower_dev,
4245 							   upper_dev);
4246 		} else if (netif_is_lag_master(upper_dev)) {
4247 			if (info->linking)
4248 				err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4249 							     upper_dev);
4250 			else
4251 				mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4252 							upper_dev);
4253 		} else if (netif_is_ovs_master(upper_dev)) {
4254 			if (info->linking)
4255 				err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4256 			else
4257 				mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
4258 		}
4259 		break;
4260 	}
4261 
4262 	return err;
4263 }
4264 
4265 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4266 					       unsigned long event, void *ptr)
4267 {
4268 	struct netdev_notifier_changelowerstate_info *info;
4269 	struct mlxsw_sp_port *mlxsw_sp_port;
4270 	int err;
4271 
4272 	mlxsw_sp_port = netdev_priv(dev);
4273 	info = ptr;
4274 
4275 	switch (event) {
4276 	case NETDEV_CHANGELOWERSTATE:
4277 		if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4278 			err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4279 							info->lower_state_info);
4280 			if (err)
4281 				netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4282 		}
4283 		break;
4284 	}
4285 
4286 	return 0;
4287 }
4288 
4289 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
4290 					 struct net_device *port_dev,
4291 					 unsigned long event, void *ptr)
4292 {
4293 	switch (event) {
4294 	case NETDEV_PRECHANGEUPPER:
4295 	case NETDEV_CHANGEUPPER:
4296 		return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
4297 							   event, ptr);
4298 	case NETDEV_CHANGELOWERSTATE:
4299 		return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
4300 							   ptr);
4301 	}
4302 
4303 	return 0;
4304 }
4305 
4306 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4307 					unsigned long event, void *ptr)
4308 {
4309 	struct net_device *dev;
4310 	struct list_head *iter;
4311 	int ret;
4312 
4313 	netdev_for_each_lower_dev(lag_dev, dev, iter) {
4314 		if (mlxsw_sp_port_dev_check(dev)) {
4315 			ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
4316 							    ptr);
4317 			if (ret)
4318 				return ret;
4319 		}
4320 	}
4321 
4322 	return 0;
4323 }
4324 
4325 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4326 					      struct net_device *dev,
4327 					      unsigned long event, void *ptr,
4328 					      u16 vid)
4329 {
4330 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4331 	struct netdev_notifier_changeupper_info *info = ptr;
4332 	struct net_device *upper_dev;
4333 	int err = 0;
4334 
4335 	switch (event) {
4336 	case NETDEV_PRECHANGEUPPER:
4337 		upper_dev = info->upper_dev;
4338 		if (!netif_is_bridge_master(upper_dev))
4339 			return -EINVAL;
4340 		if (!info->linking)
4341 			break;
4342 		if (netdev_has_any_upper_dev(upper_dev))
4343 			return -EINVAL;
4344 		break;
4345 	case NETDEV_CHANGEUPPER:
4346 		upper_dev = info->upper_dev;
4347 		if (netif_is_bridge_master(upper_dev)) {
4348 			if (info->linking)
4349 				err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4350 								vlan_dev,
4351 								upper_dev);
4352 			else
4353 				mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4354 							   vlan_dev,
4355 							   upper_dev);
4356 		} else {
4357 			err = -EINVAL;
4358 			WARN_ON(1);
4359 		}
4360 		break;
4361 	}
4362 
4363 	return err;
4364 }
4365 
4366 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
4367 						  struct net_device *lag_dev,
4368 						  unsigned long event,
4369 						  void *ptr, u16 vid)
4370 {
4371 	struct net_device *dev;
4372 	struct list_head *iter;
4373 	int ret;
4374 
4375 	netdev_for_each_lower_dev(lag_dev, dev, iter) {
4376 		if (mlxsw_sp_port_dev_check(dev)) {
4377 			ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
4378 								 event, ptr,
4379 								 vid);
4380 			if (ret)
4381 				return ret;
4382 		}
4383 	}
4384 
4385 	return 0;
4386 }
4387 
4388 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4389 					 unsigned long event, void *ptr)
4390 {
4391 	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4392 	u16 vid = vlan_dev_vlan_id(vlan_dev);
4393 
4394 	if (mlxsw_sp_port_dev_check(real_dev))
4395 		return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
4396 							  event, ptr, vid);
4397 	else if (netif_is_lag_master(real_dev))
4398 		return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
4399 							      real_dev, event,
4400 							      ptr, vid);
4401 
4402 	return 0;
4403 }
4404 
4405 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
4406 {
4407 	struct netdev_notifier_changeupper_info *info = ptr;
4408 
4409 	if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
4410 		return false;
4411 	return netif_is_l3_master(info->upper_dev);
4412 }
4413 
4414 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
4415 				    unsigned long event, void *ptr)
4416 {
4417 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4418 	int err = 0;
4419 
4420 	if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
4421 		err = mlxsw_sp_netdevice_router_port_event(dev);
4422 	else if (mlxsw_sp_is_vrf_event(event, ptr))
4423 		err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
4424 	else if (mlxsw_sp_port_dev_check(dev))
4425 		err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
4426 	else if (netif_is_lag_master(dev))
4427 		err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4428 	else if (is_vlan_dev(dev))
4429 		err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4430 
4431 	return notifier_from_errno(err);
4432 }
4433 
4434 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
4435 	.notifier_call = mlxsw_sp_netdevice_event,
4436 };
4437 
4438 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
4439 	.notifier_call = mlxsw_sp_inetaddr_event,
4440 	.priority = 10,	/* Must be called before FIB notifier block */
4441 };
4442 
4443 static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = {
4444 	.notifier_call = mlxsw_sp_inet6addr_event,
4445 };
4446 
4447 static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
4448 	.notifier_call = mlxsw_sp_router_netevent_event,
4449 };
4450 
4451 static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
4452 	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4453 	{0, },
4454 };
4455 
4456 static struct pci_driver mlxsw_sp_pci_driver = {
4457 	.name = mlxsw_sp_driver_name,
4458 	.id_table = mlxsw_sp_pci_id_table,
4459 };
4460 
4461 static int __init mlxsw_sp_module_init(void)
4462 {
4463 	int err;
4464 
4465 	register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4466 	register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4467 	register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
4468 	register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4469 
4470 	err = mlxsw_core_driver_register(&mlxsw_sp_driver);
4471 	if (err)
4472 		goto err_core_driver_register;
4473 
4474 	err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver);
4475 	if (err)
4476 		goto err_pci_driver_register;
4477 
4478 	return 0;
4479 
4480 err_pci_driver_register:
4481 	mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4482 err_core_driver_register:
4483 	unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4484 	unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
4485 	unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4486 	unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4487 	return err;
4488 }
4489 
4490 static void __exit mlxsw_sp_module_exit(void)
4491 {
4492 	mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
4493 	mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4494 	unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4495 	unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
4496 	unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4497 	unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4498 }
4499 
4500 module_init(mlxsw_sp_module_init);
4501 module_exit(mlxsw_sp_module_exit);
4502 
4503 MODULE_LICENSE("Dual BSD/GPL");
4504 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4505 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4506 MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);
4507 MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME);
4508