1 // SPDX-License-Identifier: GPL-2.0
2 /* Huawei HiNIC PCI Express Linux driver
3  * Copyright(c) 2017 Huawei Technologies Co., Ltd
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * for more details.
13  *
14  */
15 
16 #include <linux/kernel.h>
17 #include <linux/pci.h>
18 #include <linux/device.h>
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/errno.h>
22 #include <linux/interrupt.h>
23 #include <linux/etherdevice.h>
24 #include <linux/netdevice.h>
25 #include <linux/if_vlan.h>
26 #include <linux/ethtool.h>
27 #include <linux/vmalloc.h>
28 #include <linux/sfp.h>
29 
30 #include "hinic_hw_qp.h"
31 #include "hinic_hw_dev.h"
32 #include "hinic_port.h"
33 #include "hinic_tx.h"
34 #include "hinic_rx.h"
35 #include "hinic_dev.h"
36 
37 #define SET_LINK_STR_MAX_LEN	16
38 
39 #define GET_SUPPORTED_MODE	0
40 #define GET_ADVERTISED_MODE	1
41 
42 #define ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE(ecmd, mode)	\
43 		((ecmd)->supported |=	\
44 		(1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit))
45 #define ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE(ecmd, mode)	\
46 		((ecmd)->advertising |=	\
47 		(1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit))
48 #define ETHTOOL_ADD_SUPPORTED_LINK_MODE(ecmd, mode)	\
49 				((ecmd)->supported |= SUPPORTED_##mode)
50 #define ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode)	\
51 				((ecmd)->advertising |= ADVERTISED_##mode)
52 
53 #define COALESCE_PENDING_LIMIT_UNIT	8
54 #define	COALESCE_TIMER_CFG_UNIT		9
55 #define COALESCE_ALL_QUEUE		0xFFFF
56 #define COALESCE_MAX_PENDING_LIMIT	(255 * COALESCE_PENDING_LIMIT_UNIT)
57 #define COALESCE_MAX_TIMER_CFG		(255 * COALESCE_TIMER_CFG_UNIT)
58 
59 struct hw2ethtool_link_mode {
60 	enum ethtool_link_mode_bit_indices link_mode_bit;
61 	u32 speed;
62 	enum hinic_link_mode hw_link_mode;
63 };
64 
65 struct cmd_link_settings {
66 	u64	supported;
67 	u64	advertising;
68 
69 	u32	speed;
70 	u8	duplex;
71 	u8	port;
72 	u8	autoneg;
73 };
74 
75 static u32 hw_to_ethtool_speed[LINK_SPEED_LEVELS] = {
76 	SPEED_10, SPEED_100,
77 	SPEED_1000, SPEED_10000,
78 	SPEED_25000, SPEED_40000,
79 	SPEED_100000
80 };
81 
82 static struct hw2ethtool_link_mode
83 	hw_to_ethtool_link_mode_table[HINIC_LINK_MODE_NUMBERS] = {
84 	{
85 		.link_mode_bit = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
86 		.speed = SPEED_10000,
87 		.hw_link_mode = HINIC_10GE_BASE_KR,
88 	},
89 	{
90 		.link_mode_bit = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
91 		.speed = SPEED_40000,
92 		.hw_link_mode = HINIC_40GE_BASE_KR4,
93 	},
94 	{
95 		.link_mode_bit = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
96 		.speed = SPEED_40000,
97 		.hw_link_mode = HINIC_40GE_BASE_CR4,
98 	},
99 	{
100 		.link_mode_bit = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
101 		.speed = SPEED_100000,
102 		.hw_link_mode = HINIC_100GE_BASE_KR4,
103 	},
104 	{
105 		.link_mode_bit = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
106 		.speed = SPEED_100000,
107 		.hw_link_mode = HINIC_100GE_BASE_CR4,
108 	},
109 	{
110 		.link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
111 		.speed = SPEED_25000,
112 		.hw_link_mode = HINIC_25GE_BASE_KR_S,
113 	},
114 	{
115 		.link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
116 		.speed = SPEED_25000,
117 		.hw_link_mode = HINIC_25GE_BASE_CR_S,
118 	},
119 	{
120 		.link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
121 		.speed = SPEED_25000,
122 		.hw_link_mode = HINIC_25GE_BASE_KR,
123 	},
124 	{
125 		.link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
126 		.speed = SPEED_25000,
127 		.hw_link_mode = HINIC_25GE_BASE_CR,
128 	},
129 	{
130 		.link_mode_bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
131 		.speed = SPEED_1000,
132 		.hw_link_mode = HINIC_GE_BASE_KX,
133 	},
134 };
135 
136 #define LP_DEFAULT_TIME                 5 /* seconds */
137 #define LP_PKT_LEN                      1514
138 
139 #define PORT_DOWN_ERR_IDX		0
140 enum diag_test_index {
141 	INTERNAL_LP_TEST = 0,
142 	EXTERNAL_LP_TEST = 1,
143 	DIAG_TEST_MAX = 2,
144 };
145 
set_link_speed(struct ethtool_link_ksettings * link_ksettings,enum hinic_speed speed)146 static void set_link_speed(struct ethtool_link_ksettings *link_ksettings,
147 			   enum hinic_speed speed)
148 {
149 	switch (speed) {
150 	case HINIC_SPEED_10MB_LINK:
151 		link_ksettings->base.speed = SPEED_10;
152 		break;
153 
154 	case HINIC_SPEED_100MB_LINK:
155 		link_ksettings->base.speed = SPEED_100;
156 		break;
157 
158 	case HINIC_SPEED_1000MB_LINK:
159 		link_ksettings->base.speed = SPEED_1000;
160 		break;
161 
162 	case HINIC_SPEED_10GB_LINK:
163 		link_ksettings->base.speed = SPEED_10000;
164 		break;
165 
166 	case HINIC_SPEED_25GB_LINK:
167 		link_ksettings->base.speed = SPEED_25000;
168 		break;
169 
170 	case HINIC_SPEED_40GB_LINK:
171 		link_ksettings->base.speed = SPEED_40000;
172 		break;
173 
174 	case HINIC_SPEED_100GB_LINK:
175 		link_ksettings->base.speed = SPEED_100000;
176 		break;
177 
178 	default:
179 		link_ksettings->base.speed = SPEED_UNKNOWN;
180 		break;
181 	}
182 }
183 
hinic_get_link_mode_index(enum hinic_link_mode link_mode)184 static int hinic_get_link_mode_index(enum hinic_link_mode link_mode)
185 {
186 	int i = 0;
187 
188 	for (i = 0; i < HINIC_LINK_MODE_NUMBERS; i++) {
189 		if (link_mode == hw_to_ethtool_link_mode_table[i].hw_link_mode)
190 			break;
191 	}
192 
193 	return i;
194 }
195 
hinic_add_ethtool_link_mode(struct cmd_link_settings * link_settings,enum hinic_link_mode hw_link_mode,u32 name)196 static void hinic_add_ethtool_link_mode(struct cmd_link_settings *link_settings,
197 					enum hinic_link_mode hw_link_mode,
198 					u32 name)
199 {
200 	enum hinic_link_mode link_mode;
201 	int idx = 0;
202 
203 	for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) {
204 		if (hw_link_mode & ((u32)1 << link_mode)) {
205 			idx = hinic_get_link_mode_index(link_mode);
206 			if (idx >= HINIC_LINK_MODE_NUMBERS)
207 				continue;
208 
209 			if (name == GET_SUPPORTED_MODE)
210 				ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE
211 					(link_settings, idx);
212 			else
213 				ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE
214 					(link_settings, idx);
215 		}
216 	}
217 }
218 
hinic_link_port_type(struct cmd_link_settings * link_settings,enum hinic_port_type port_type)219 static void hinic_link_port_type(struct cmd_link_settings *link_settings,
220 				 enum hinic_port_type port_type)
221 {
222 	switch (port_type) {
223 	case HINIC_PORT_ELEC:
224 	case HINIC_PORT_TP:
225 		ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, TP);
226 		ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, TP);
227 		link_settings->port = PORT_TP;
228 		break;
229 
230 	case HINIC_PORT_AOC:
231 	case HINIC_PORT_FIBRE:
232 		ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE);
233 		ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE);
234 		link_settings->port = PORT_FIBRE;
235 		break;
236 
237 	case HINIC_PORT_COPPER:
238 		ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE);
239 		ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE);
240 		link_settings->port = PORT_DA;
241 		break;
242 
243 	case HINIC_PORT_BACKPLANE:
244 		ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Backplane);
245 		ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Backplane);
246 		link_settings->port = PORT_NONE;
247 		break;
248 
249 	default:
250 		link_settings->port = PORT_OTHER;
251 		break;
252 	}
253 }
254 
hinic_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * link_ksettings)255 static int hinic_get_link_ksettings(struct net_device *netdev,
256 				    struct ethtool_link_ksettings
257 				    *link_ksettings)
258 {
259 	struct hinic_dev *nic_dev = netdev_priv(netdev);
260 	struct hinic_link_mode_cmd link_mode = { 0 };
261 	struct hinic_pause_config pause_info = { 0 };
262 	struct cmd_link_settings settings = { 0 };
263 	enum hinic_port_link_state link_state;
264 	struct hinic_port_cap port_cap;
265 	int err;
266 
267 	ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
268 	ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
269 
270 	link_ksettings->base.speed = SPEED_UNKNOWN;
271 	link_ksettings->base.autoneg = AUTONEG_DISABLE;
272 	link_ksettings->base.duplex = DUPLEX_UNKNOWN;
273 
274 	err = hinic_port_get_cap(nic_dev, &port_cap);
275 	if (err)
276 		return err;
277 
278 	hinic_link_port_type(&settings, port_cap.port_type);
279 	link_ksettings->base.port = settings.port;
280 
281 	err = hinic_port_link_state(nic_dev, &link_state);
282 	if (err)
283 		return err;
284 
285 	if (link_state == HINIC_LINK_STATE_UP) {
286 		set_link_speed(link_ksettings, port_cap.speed);
287 		link_ksettings->base.duplex =
288 			(port_cap.duplex == HINIC_DUPLEX_FULL) ?
289 			DUPLEX_FULL : DUPLEX_HALF;
290 	}
291 
292 	if (!!(port_cap.autoneg_cap & HINIC_AUTONEG_SUPPORTED))
293 		ethtool_link_ksettings_add_link_mode(link_ksettings,
294 						     advertising, Autoneg);
295 
296 	if (port_cap.autoneg_state == HINIC_AUTONEG_ACTIVE)
297 		link_ksettings->base.autoneg = AUTONEG_ENABLE;
298 
299 	err = hinic_get_link_mode(nic_dev->hwdev, &link_mode);
300 	if (err || link_mode.supported == HINIC_SUPPORTED_UNKNOWN ||
301 	    link_mode.advertised == HINIC_SUPPORTED_UNKNOWN)
302 		return -EIO;
303 
304 	hinic_add_ethtool_link_mode(&settings, link_mode.supported,
305 				    GET_SUPPORTED_MODE);
306 	hinic_add_ethtool_link_mode(&settings, link_mode.advertised,
307 				    GET_ADVERTISED_MODE);
308 
309 	if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
310 		err = hinic_get_hw_pause_info(nic_dev->hwdev, &pause_info);
311 		if (err)
312 			return err;
313 		ETHTOOL_ADD_SUPPORTED_LINK_MODE(&settings, Pause);
314 		if (pause_info.rx_pause && pause_info.tx_pause) {
315 			ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause);
316 		} else if (pause_info.tx_pause) {
317 			ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause);
318 		} else if (pause_info.rx_pause) {
319 			ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause);
320 			ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause);
321 		}
322 	}
323 
324 	linkmode_copy(link_ksettings->link_modes.supported,
325 		      (unsigned long *)&settings.supported);
326 	linkmode_copy(link_ksettings->link_modes.advertising,
327 		      (unsigned long *)&settings.advertising);
328 
329 	return 0;
330 }
331 
hinic_ethtool_to_hw_speed_level(u32 speed)332 static int hinic_ethtool_to_hw_speed_level(u32 speed)
333 {
334 	int i;
335 
336 	for (i = 0; i < LINK_SPEED_LEVELS; i++) {
337 		if (hw_to_ethtool_speed[i] == speed)
338 			break;
339 	}
340 
341 	return i;
342 }
343 
hinic_is_support_speed(enum hinic_link_mode supported_link,u32 speed)344 static bool hinic_is_support_speed(enum hinic_link_mode supported_link,
345 				   u32 speed)
346 {
347 	enum hinic_link_mode link_mode;
348 	int idx;
349 
350 	for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) {
351 		if (!(supported_link & ((u32)1 << link_mode)))
352 			continue;
353 
354 		idx = hinic_get_link_mode_index(link_mode);
355 		if (idx >= HINIC_LINK_MODE_NUMBERS)
356 			continue;
357 
358 		if (hw_to_ethtool_link_mode_table[idx].speed == speed)
359 			return true;
360 	}
361 
362 	return false;
363 }
364 
hinic_is_speed_legal(struct hinic_dev * nic_dev,u32 speed)365 static bool hinic_is_speed_legal(struct hinic_dev *nic_dev, u32 speed)
366 {
367 	struct hinic_link_mode_cmd link_mode = { 0 };
368 	struct net_device *netdev = nic_dev->netdev;
369 	enum nic_speed_level speed_level = 0;
370 	int err;
371 
372 	err = hinic_get_link_mode(nic_dev->hwdev, &link_mode);
373 	if (err)
374 		return false;
375 
376 	if (link_mode.supported == HINIC_SUPPORTED_UNKNOWN ||
377 	    link_mode.advertised == HINIC_SUPPORTED_UNKNOWN)
378 		return false;
379 
380 	speed_level = hinic_ethtool_to_hw_speed_level(speed);
381 	if (speed_level >= LINK_SPEED_LEVELS ||
382 	    !hinic_is_support_speed(link_mode.supported, speed)) {
383 		netif_err(nic_dev, drv, netdev,
384 			  "Unsupported speed: %d\n", speed);
385 		return false;
386 	}
387 
388 	return true;
389 }
390 
get_link_settings_type(struct hinic_dev * nic_dev,u8 autoneg,u32 speed,u32 * set_settings)391 static int get_link_settings_type(struct hinic_dev *nic_dev,
392 				  u8 autoneg, u32 speed, u32 *set_settings)
393 {
394 	struct hinic_port_cap port_cap = { 0 };
395 	int err;
396 
397 	err = hinic_port_get_cap(nic_dev, &port_cap);
398 	if (err)
399 		return err;
400 
401 	/* always set autonegotiation */
402 	if (port_cap.autoneg_cap)
403 		*set_settings |= HILINK_LINK_SET_AUTONEG;
404 
405 	if (autoneg == AUTONEG_ENABLE) {
406 		if (!port_cap.autoneg_cap) {
407 			netif_err(nic_dev, drv, nic_dev->netdev, "Not support autoneg\n");
408 			return -EOPNOTSUPP;
409 		}
410 	} else if (speed != (u32)SPEED_UNKNOWN) {
411 		/* set speed only when autoneg is disabled */
412 		if (!hinic_is_speed_legal(nic_dev, speed))
413 			return -EINVAL;
414 		*set_settings |= HILINK_LINK_SET_SPEED;
415 	} else {
416 		netif_err(nic_dev, drv, nic_dev->netdev, "Need to set speed when autoneg is off\n");
417 		return -EOPNOTSUPP;
418 	}
419 
420 	return 0;
421 }
422 
set_link_settings_separate_cmd(struct hinic_dev * nic_dev,u32 set_settings,u8 autoneg,u32 speed)423 static int set_link_settings_separate_cmd(struct hinic_dev *nic_dev,
424 					  u32 set_settings, u8 autoneg,
425 					  u32 speed)
426 {
427 	enum nic_speed_level speed_level = 0;
428 	int err = 0;
429 
430 	if (set_settings & HILINK_LINK_SET_AUTONEG) {
431 		err = hinic_set_autoneg(nic_dev->hwdev,
432 					(autoneg == AUTONEG_ENABLE));
433 		if (err)
434 			netif_err(nic_dev, drv, nic_dev->netdev, "%s autoneg failed\n",
435 				  (autoneg == AUTONEG_ENABLE) ?
436 				  "Enable" : "Disable");
437 		else
438 			netif_info(nic_dev, drv, nic_dev->netdev, "%s autoneg successfully\n",
439 				   (autoneg == AUTONEG_ENABLE) ?
440 				   "Enable" : "Disable");
441 	}
442 
443 	if (!err && (set_settings & HILINK_LINK_SET_SPEED)) {
444 		speed_level = hinic_ethtool_to_hw_speed_level(speed);
445 		err = hinic_set_speed(nic_dev->hwdev, speed_level);
446 		if (err)
447 			netif_err(nic_dev, drv, nic_dev->netdev, "Set speed %d failed\n",
448 				  speed);
449 		else
450 			netif_info(nic_dev, drv, nic_dev->netdev, "Set speed %d successfully\n",
451 				   speed);
452 	}
453 
454 	return err;
455 }
456 
hinic_set_settings_to_hw(struct hinic_dev * nic_dev,u32 set_settings,u8 autoneg,u32 speed)457 static int hinic_set_settings_to_hw(struct hinic_dev *nic_dev,
458 				    u32 set_settings, u8 autoneg, u32 speed)
459 {
460 	struct hinic_link_ksettings_info settings = {0};
461 	char set_link_str[SET_LINK_STR_MAX_LEN] = {0};
462 	const char *autoneg_str;
463 	struct net_device *netdev = nic_dev->netdev;
464 	enum nic_speed_level speed_level = 0;
465 	int err;
466 
467 	autoneg_str = (set_settings & HILINK_LINK_SET_AUTONEG) ?
468 		      (autoneg ? "autong enable " : "autong disable ") : "";
469 
470 	if (set_settings & HILINK_LINK_SET_SPEED) {
471 		speed_level = hinic_ethtool_to_hw_speed_level(speed);
472 		err = snprintf(set_link_str, SET_LINK_STR_MAX_LEN,
473 			       "speed %d ", speed);
474 		if (err >= SET_LINK_STR_MAX_LEN) {
475 			netif_err(nic_dev, drv, netdev, "Failed to snprintf link speed, function return(%d) and dest_len(%d)\n",
476 				  err, SET_LINK_STR_MAX_LEN);
477 			return -EFAULT;
478 		}
479 	}
480 
481 	settings.func_id = HINIC_HWIF_FUNC_IDX(nic_dev->hwdev->hwif);
482 	settings.valid_bitmap = set_settings;
483 	settings.autoneg = autoneg;
484 	settings.speed = speed_level;
485 
486 	err = hinic_set_link_settings(nic_dev->hwdev, &settings);
487 	if (err != HINIC_MGMT_CMD_UNSUPPORTED) {
488 		if (err)
489 			netif_err(nic_dev, drv, netdev, "Set %s%sfailed\n",
490 				  autoneg_str, set_link_str);
491 		else
492 			netif_info(nic_dev, drv, netdev, "Set %s%ssuccessfully\n",
493 				   autoneg_str, set_link_str);
494 
495 		return err;
496 	}
497 
498 	return set_link_settings_separate_cmd(nic_dev, set_settings, autoneg,
499 					      speed);
500 }
501 
set_link_settings(struct net_device * netdev,u8 autoneg,u32 speed)502 static int set_link_settings(struct net_device *netdev, u8 autoneg, u32 speed)
503 {
504 	struct hinic_dev *nic_dev = netdev_priv(netdev);
505 	u32 set_settings = 0;
506 	int err;
507 
508 	err = get_link_settings_type(nic_dev, autoneg, speed, &set_settings);
509 	if (err)
510 		return err;
511 
512 	if (set_settings)
513 		err = hinic_set_settings_to_hw(nic_dev, set_settings,
514 					       autoneg, speed);
515 	else
516 		netif_info(nic_dev, drv, netdev, "Nothing changed, exit without setting anything\n");
517 
518 	return err;
519 }
520 
hinic_set_link_ksettings(struct net_device * netdev,const struct ethtool_link_ksettings * link_settings)521 static int hinic_set_link_ksettings(struct net_device *netdev, const struct
522 				    ethtool_link_ksettings *link_settings)
523 {
524 	/* only support to set autoneg and speed */
525 	return set_link_settings(netdev, link_settings->base.autoneg,
526 				 link_settings->base.speed);
527 }
528 
hinic_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * info)529 static void hinic_get_drvinfo(struct net_device *netdev,
530 			      struct ethtool_drvinfo *info)
531 {
532 	struct hinic_dev *nic_dev = netdev_priv(netdev);
533 	u8 mgmt_ver[HINIC_MGMT_VERSION_MAX_LEN] = {0};
534 	struct hinic_hwdev *hwdev = nic_dev->hwdev;
535 	struct hinic_hwif *hwif = hwdev->hwif;
536 	int err;
537 
538 	strscpy(info->driver, HINIC_DRV_NAME, sizeof(info->driver));
539 	strscpy(info->bus_info, pci_name(hwif->pdev), sizeof(info->bus_info));
540 
541 	err = hinic_get_mgmt_version(nic_dev, mgmt_ver);
542 	if (err)
543 		return;
544 
545 	snprintf(info->fw_version, sizeof(info->fw_version), "%s", mgmt_ver);
546 }
547 
hinic_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)548 static void hinic_get_ringparam(struct net_device *netdev,
549 				struct ethtool_ringparam *ring,
550 				struct kernel_ethtool_ringparam *kernel_ring,
551 				struct netlink_ext_ack *extack)
552 {
553 	struct hinic_dev *nic_dev = netdev_priv(netdev);
554 
555 	ring->rx_max_pending = HINIC_MAX_QUEUE_DEPTH;
556 	ring->tx_max_pending = HINIC_MAX_QUEUE_DEPTH;
557 	ring->rx_pending = nic_dev->rq_depth;
558 	ring->tx_pending = nic_dev->sq_depth;
559 }
560 
check_ringparam_valid(struct hinic_dev * nic_dev,struct ethtool_ringparam * ring)561 static int check_ringparam_valid(struct hinic_dev *nic_dev,
562 				 struct ethtool_ringparam *ring)
563 {
564 	if (ring->rx_jumbo_pending || ring->rx_mini_pending) {
565 		netif_err(nic_dev, drv, nic_dev->netdev,
566 			  "Unsupported rx_jumbo_pending/rx_mini_pending\n");
567 		return -EINVAL;
568 	}
569 
570 	if (ring->tx_pending > HINIC_MAX_QUEUE_DEPTH ||
571 	    ring->tx_pending < HINIC_MIN_QUEUE_DEPTH ||
572 	    ring->rx_pending > HINIC_MAX_QUEUE_DEPTH ||
573 	    ring->rx_pending < HINIC_MIN_QUEUE_DEPTH) {
574 		netif_err(nic_dev, drv, nic_dev->netdev,
575 			  "Queue depth out of range [%d-%d]\n",
576 			  HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH);
577 		return -EINVAL;
578 	}
579 
580 	return 0;
581 }
582 
hinic_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)583 static int hinic_set_ringparam(struct net_device *netdev,
584 			       struct ethtool_ringparam *ring,
585 			       struct kernel_ethtool_ringparam *kernel_ring,
586 			       struct netlink_ext_ack *extack)
587 {
588 	struct hinic_dev *nic_dev = netdev_priv(netdev);
589 	u16 new_sq_depth, new_rq_depth;
590 	int err;
591 
592 	err = check_ringparam_valid(nic_dev, ring);
593 	if (err)
594 		return err;
595 
596 	new_sq_depth = (u16)(1U << (u16)ilog2(ring->tx_pending));
597 	new_rq_depth = (u16)(1U << (u16)ilog2(ring->rx_pending));
598 
599 	if (new_sq_depth == nic_dev->sq_depth &&
600 	    new_rq_depth == nic_dev->rq_depth)
601 		return 0;
602 
603 	netif_info(nic_dev, drv, netdev,
604 		   "Change Tx/Rx ring depth from %d/%d to %d/%d\n",
605 		   nic_dev->sq_depth, nic_dev->rq_depth,
606 		   new_sq_depth, new_rq_depth);
607 
608 	nic_dev->sq_depth = new_sq_depth;
609 	nic_dev->rq_depth = new_rq_depth;
610 
611 	if (netif_running(netdev)) {
612 		netif_info(nic_dev, drv, netdev, "Restarting netdev\n");
613 		err = hinic_close(netdev);
614 		if (err) {
615 			netif_err(nic_dev, drv, netdev,
616 				  "Failed to close netdev\n");
617 			return -EFAULT;
618 		}
619 
620 		err = hinic_open(netdev);
621 		if (err) {
622 			netif_err(nic_dev, drv, netdev,
623 				  "Failed to open netdev\n");
624 			return -EFAULT;
625 		}
626 	}
627 
628 	return 0;
629 }
630 
__hinic_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * coal,u16 queue)631 static int __hinic_get_coalesce(struct net_device *netdev,
632 				struct ethtool_coalesce *coal, u16 queue)
633 {
634 	struct hinic_dev *nic_dev = netdev_priv(netdev);
635 	struct hinic_intr_coal_info *rx_intr_coal_info;
636 	struct hinic_intr_coal_info *tx_intr_coal_info;
637 
638 	if (queue == COALESCE_ALL_QUEUE) {
639 		/* get tx/rx irq0 as default parameters */
640 		rx_intr_coal_info = &nic_dev->rx_intr_coalesce[0];
641 		tx_intr_coal_info = &nic_dev->tx_intr_coalesce[0];
642 	} else {
643 		if (queue >= nic_dev->num_qps) {
644 			netif_err(nic_dev, drv, netdev,
645 				  "Invalid queue_id: %d\n", queue);
646 			return -EINVAL;
647 		}
648 		rx_intr_coal_info = &nic_dev->rx_intr_coalesce[queue];
649 		tx_intr_coal_info = &nic_dev->tx_intr_coalesce[queue];
650 	}
651 
652 	/* coalesce_timer is in unit of 9us */
653 	coal->rx_coalesce_usecs = rx_intr_coal_info->coalesce_timer_cfg *
654 			COALESCE_TIMER_CFG_UNIT;
655 	/* coalesced_frames is in unit of 8 */
656 	coal->rx_max_coalesced_frames = rx_intr_coal_info->pending_limt *
657 			COALESCE_PENDING_LIMIT_UNIT;
658 	coal->tx_coalesce_usecs = tx_intr_coal_info->coalesce_timer_cfg *
659 			COALESCE_TIMER_CFG_UNIT;
660 	coal->tx_max_coalesced_frames = tx_intr_coal_info->pending_limt *
661 			COALESCE_PENDING_LIMIT_UNIT;
662 
663 	return 0;
664 }
665 
is_coalesce_exceed_limit(const struct ethtool_coalesce * coal)666 static int is_coalesce_exceed_limit(const struct ethtool_coalesce *coal)
667 {
668 	if (coal->rx_coalesce_usecs > COALESCE_MAX_TIMER_CFG ||
669 	    coal->rx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT ||
670 	    coal->tx_coalesce_usecs > COALESCE_MAX_TIMER_CFG ||
671 	    coal->tx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT)
672 		return -ERANGE;
673 
674 	return 0;
675 }
676 
set_queue_coalesce(struct hinic_dev * nic_dev,u16 q_id,struct hinic_intr_coal_info * coal,bool set_rx_coal)677 static int set_queue_coalesce(struct hinic_dev *nic_dev, u16 q_id,
678 			      struct hinic_intr_coal_info *coal,
679 			      bool set_rx_coal)
680 {
681 	struct hinic_intr_coal_info *intr_coal = NULL;
682 	struct hinic_msix_config interrupt_info = {0};
683 	struct net_device *netdev = nic_dev->netdev;
684 	u16 msix_idx;
685 	int err;
686 
687 	intr_coal = set_rx_coal ? &nic_dev->rx_intr_coalesce[q_id] :
688 		    &nic_dev->tx_intr_coalesce[q_id];
689 
690 	intr_coal->coalesce_timer_cfg = coal->coalesce_timer_cfg;
691 	intr_coal->pending_limt = coal->pending_limt;
692 
693 	/* netdev not running or qp not in using,
694 	 * don't need to set coalesce to hw
695 	 */
696 	if (!(nic_dev->flags & HINIC_INTF_UP) ||
697 	    q_id >= nic_dev->num_qps)
698 		return 0;
699 
700 	msix_idx = set_rx_coal ? nic_dev->rxqs[q_id].rq->msix_entry :
701 		   nic_dev->txqs[q_id].sq->msix_entry;
702 	interrupt_info.msix_index = msix_idx;
703 	interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg;
704 	interrupt_info.pending_cnt = intr_coal->pending_limt;
705 	interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg;
706 
707 	err = hinic_set_interrupt_cfg(nic_dev->hwdev, &interrupt_info);
708 	if (err)
709 		netif_warn(nic_dev, drv, netdev,
710 			   "Failed to set %s queue%d coalesce",
711 			   set_rx_coal ? "rx" : "tx", q_id);
712 
713 	return err;
714 }
715 
__set_hw_coal_param(struct hinic_dev * nic_dev,struct hinic_intr_coal_info * intr_coal,u16 queue,bool set_rx_coal)716 static int __set_hw_coal_param(struct hinic_dev *nic_dev,
717 			       struct hinic_intr_coal_info *intr_coal,
718 			       u16 queue, bool set_rx_coal)
719 {
720 	int err;
721 	u16 i;
722 
723 	if (queue == COALESCE_ALL_QUEUE) {
724 		for (i = 0; i < nic_dev->max_qps; i++) {
725 			err = set_queue_coalesce(nic_dev, i, intr_coal,
726 						 set_rx_coal);
727 			if (err)
728 				return err;
729 		}
730 	} else {
731 		if (queue >= nic_dev->num_qps) {
732 			netif_err(nic_dev, drv, nic_dev->netdev,
733 				  "Invalid queue_id: %d\n", queue);
734 			return -EINVAL;
735 		}
736 		err = set_queue_coalesce(nic_dev, queue, intr_coal,
737 					 set_rx_coal);
738 		if (err)
739 			return err;
740 	}
741 
742 	return 0;
743 }
744 
__hinic_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * coal,u16 queue)745 static int __hinic_set_coalesce(struct net_device *netdev,
746 				struct ethtool_coalesce *coal, u16 queue)
747 {
748 	struct hinic_dev *nic_dev = netdev_priv(netdev);
749 	struct hinic_intr_coal_info rx_intr_coal = {0};
750 	struct hinic_intr_coal_info tx_intr_coal = {0};
751 	bool set_rx_coal = false;
752 	bool set_tx_coal = false;
753 	int err;
754 
755 	err = is_coalesce_exceed_limit(coal);
756 	if (err)
757 		return err;
758 
759 	if (coal->rx_coalesce_usecs || coal->rx_max_coalesced_frames) {
760 		rx_intr_coal.coalesce_timer_cfg =
761 		(u8)(coal->rx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT);
762 		rx_intr_coal.pending_limt = (u8)(coal->rx_max_coalesced_frames /
763 				COALESCE_PENDING_LIMIT_UNIT);
764 		set_rx_coal = true;
765 	}
766 
767 	if (coal->tx_coalesce_usecs || coal->tx_max_coalesced_frames) {
768 		tx_intr_coal.coalesce_timer_cfg =
769 		(u8)(coal->tx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT);
770 		tx_intr_coal.pending_limt = (u8)(coal->tx_max_coalesced_frames /
771 		COALESCE_PENDING_LIMIT_UNIT);
772 		set_tx_coal = true;
773 	}
774 
775 	/* setting coalesce timer or pending limit to zero will disable
776 	 * coalesce
777 	 */
778 	if (set_rx_coal && (!rx_intr_coal.coalesce_timer_cfg ||
779 			    !rx_intr_coal.pending_limt))
780 		netif_warn(nic_dev, drv, netdev, "RX coalesce will be disabled\n");
781 	if (set_tx_coal && (!tx_intr_coal.coalesce_timer_cfg ||
782 			    !tx_intr_coal.pending_limt))
783 		netif_warn(nic_dev, drv, netdev, "TX coalesce will be disabled\n");
784 
785 	if (set_rx_coal) {
786 		err = __set_hw_coal_param(nic_dev, &rx_intr_coal, queue, true);
787 		if (err)
788 			return err;
789 	}
790 	if (set_tx_coal) {
791 		err = __set_hw_coal_param(nic_dev, &tx_intr_coal, queue, false);
792 		if (err)
793 			return err;
794 	}
795 	return 0;
796 }
797 
hinic_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * coal,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)798 static int hinic_get_coalesce(struct net_device *netdev,
799 			      struct ethtool_coalesce *coal,
800 			      struct kernel_ethtool_coalesce *kernel_coal,
801 			      struct netlink_ext_ack *extack)
802 {
803 	return __hinic_get_coalesce(netdev, coal, COALESCE_ALL_QUEUE);
804 }
805 
hinic_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * coal,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)806 static int hinic_set_coalesce(struct net_device *netdev,
807 			      struct ethtool_coalesce *coal,
808 			      struct kernel_ethtool_coalesce *kernel_coal,
809 			      struct netlink_ext_ack *extack)
810 {
811 	return __hinic_set_coalesce(netdev, coal, COALESCE_ALL_QUEUE);
812 }
813 
hinic_get_per_queue_coalesce(struct net_device * netdev,u32 queue,struct ethtool_coalesce * coal)814 static int hinic_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
815 					struct ethtool_coalesce *coal)
816 {
817 	return __hinic_get_coalesce(netdev, coal, queue);
818 }
819 
hinic_set_per_queue_coalesce(struct net_device * netdev,u32 queue,struct ethtool_coalesce * coal)820 static int hinic_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
821 					struct ethtool_coalesce *coal)
822 {
823 	return __hinic_set_coalesce(netdev, coal, queue);
824 }
825 
hinic_get_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)826 static void hinic_get_pauseparam(struct net_device *netdev,
827 				 struct ethtool_pauseparam *pause)
828 {
829 	struct hinic_dev *nic_dev = netdev_priv(netdev);
830 	struct hinic_pause_config pause_info = {0};
831 	struct hinic_nic_cfg *nic_cfg;
832 	int err;
833 
834 	nic_cfg = &nic_dev->hwdev->func_to_io.nic_cfg;
835 
836 	err = hinic_get_hw_pause_info(nic_dev->hwdev, &pause_info);
837 	if (!err) {
838 		pause->autoneg = pause_info.auto_neg;
839 		if (nic_cfg->pause_set || !pause_info.auto_neg) {
840 			pause->rx_pause = nic_cfg->rx_pause;
841 			pause->tx_pause = nic_cfg->tx_pause;
842 		} else {
843 			pause->rx_pause = pause_info.rx_pause;
844 			pause->tx_pause = pause_info.tx_pause;
845 		}
846 	}
847 }
848 
hinic_set_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)849 static int hinic_set_pauseparam(struct net_device *netdev,
850 				struct ethtool_pauseparam *pause)
851 {
852 	struct hinic_dev *nic_dev = netdev_priv(netdev);
853 	struct hinic_pause_config pause_info = {0};
854 	struct hinic_port_cap port_cap = {0};
855 	int err;
856 
857 	err = hinic_port_get_cap(nic_dev, &port_cap);
858 	if (err)
859 		return -EIO;
860 
861 	if (pause->autoneg != port_cap.autoneg_state)
862 		return -EOPNOTSUPP;
863 
864 	pause_info.auto_neg = pause->autoneg;
865 	pause_info.rx_pause = pause->rx_pause;
866 	pause_info.tx_pause = pause->tx_pause;
867 
868 	mutex_lock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex);
869 	err = hinic_set_hw_pause_info(nic_dev->hwdev, &pause_info);
870 	if (err) {
871 		mutex_unlock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex);
872 		return err;
873 	}
874 	nic_dev->hwdev->func_to_io.nic_cfg.pause_set = true;
875 	nic_dev->hwdev->func_to_io.nic_cfg.auto_neg = pause->autoneg;
876 	nic_dev->hwdev->func_to_io.nic_cfg.rx_pause = pause->rx_pause;
877 	nic_dev->hwdev->func_to_io.nic_cfg.tx_pause = pause->tx_pause;
878 	mutex_unlock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex);
879 
880 	return 0;
881 }
882 
hinic_get_channels(struct net_device * netdev,struct ethtool_channels * channels)883 static void hinic_get_channels(struct net_device *netdev,
884 			       struct ethtool_channels *channels)
885 {
886 	struct hinic_dev *nic_dev = netdev_priv(netdev);
887 	struct hinic_hwdev *hwdev = nic_dev->hwdev;
888 
889 	channels->max_combined = nic_dev->max_qps;
890 	channels->combined_count = hinic_hwdev_num_qps(hwdev);
891 }
892 
hinic_set_channels(struct net_device * netdev,struct ethtool_channels * channels)893 static int hinic_set_channels(struct net_device *netdev,
894 			      struct ethtool_channels *channels)
895 {
896 	struct hinic_dev *nic_dev = netdev_priv(netdev);
897 	unsigned int count = channels->combined_count;
898 	int err;
899 
900 	netif_info(nic_dev, drv, netdev, "Set max combined queue number from %d to %d\n",
901 		   hinic_hwdev_num_qps(nic_dev->hwdev), count);
902 
903 	if (netif_running(netdev)) {
904 		netif_info(nic_dev, drv, netdev, "Restarting netdev\n");
905 		hinic_close(netdev);
906 
907 		nic_dev->hwdev->nic_cap.num_qps = count;
908 
909 		err = hinic_open(netdev);
910 		if (err) {
911 			netif_err(nic_dev, drv, netdev,
912 				  "Failed to open netdev\n");
913 			return -EFAULT;
914 		}
915 	} else {
916 		nic_dev->hwdev->nic_cap.num_qps = count;
917 	}
918 
919 	return 0;
920 }
921 
hinic_get_rss_hash_opts(struct hinic_dev * nic_dev,struct ethtool_rxnfc * cmd)922 static int hinic_get_rss_hash_opts(struct hinic_dev *nic_dev,
923 				   struct ethtool_rxnfc *cmd)
924 {
925 	struct hinic_rss_type rss_type = { 0 };
926 	int err;
927 
928 	cmd->data = 0;
929 
930 	if (!(nic_dev->flags & HINIC_RSS_ENABLE))
931 		return 0;
932 
933 	err = hinic_get_rss_type(nic_dev, nic_dev->rss_tmpl_idx,
934 				 &rss_type);
935 	if (err)
936 		return err;
937 
938 	cmd->data = RXH_IP_SRC | RXH_IP_DST;
939 	switch (cmd->flow_type) {
940 	case TCP_V4_FLOW:
941 		if (rss_type.tcp_ipv4)
942 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
943 		break;
944 	case TCP_V6_FLOW:
945 		if (rss_type.tcp_ipv6)
946 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
947 		break;
948 	case UDP_V4_FLOW:
949 		if (rss_type.udp_ipv4)
950 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
951 		break;
952 	case UDP_V6_FLOW:
953 		if (rss_type.udp_ipv6)
954 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
955 		break;
956 	case IPV4_FLOW:
957 	case IPV6_FLOW:
958 		break;
959 	default:
960 		cmd->data = 0;
961 		return -EINVAL;
962 	}
963 
964 	return 0;
965 }
966 
set_l4_rss_hash_ops(struct ethtool_rxnfc * cmd,struct hinic_rss_type * rss_type)967 static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd,
968 			       struct hinic_rss_type *rss_type)
969 {
970 	u8 rss_l4_en = 0;
971 
972 	switch (cmd->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
973 	case 0:
974 		rss_l4_en = 0;
975 		break;
976 	case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
977 		rss_l4_en = 1;
978 		break;
979 	default:
980 		return -EINVAL;
981 	}
982 
983 	switch (cmd->flow_type) {
984 	case TCP_V4_FLOW:
985 		rss_type->tcp_ipv4 = rss_l4_en;
986 		break;
987 	case TCP_V6_FLOW:
988 		rss_type->tcp_ipv6 = rss_l4_en;
989 		break;
990 	case UDP_V4_FLOW:
991 		rss_type->udp_ipv4 = rss_l4_en;
992 		break;
993 	case UDP_V6_FLOW:
994 		rss_type->udp_ipv6 = rss_l4_en;
995 		break;
996 	default:
997 		return -EINVAL;
998 	}
999 
1000 	return 0;
1001 }
1002 
hinic_set_rss_hash_opts(struct hinic_dev * nic_dev,struct ethtool_rxnfc * cmd)1003 static int hinic_set_rss_hash_opts(struct hinic_dev *nic_dev,
1004 				   struct ethtool_rxnfc *cmd)
1005 {
1006 	struct hinic_rss_type *rss_type = &nic_dev->rss_type;
1007 	int err;
1008 
1009 	if (!(nic_dev->flags & HINIC_RSS_ENABLE)) {
1010 		cmd->data = 0;
1011 		return -EOPNOTSUPP;
1012 	}
1013 
1014 	/* RSS does not support anything other than hashing
1015 	 * to queues on src and dst IPs and ports
1016 	 */
1017 	if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 |
1018 		RXH_L4_B_2_3))
1019 		return -EINVAL;
1020 
1021 	/* We need at least the IP SRC and DEST fields for hashing */
1022 	if (!(cmd->data & RXH_IP_SRC) || !(cmd->data & RXH_IP_DST))
1023 		return -EINVAL;
1024 
1025 	err = hinic_get_rss_type(nic_dev,
1026 				 nic_dev->rss_tmpl_idx, rss_type);
1027 	if (err)
1028 		return -EFAULT;
1029 
1030 	switch (cmd->flow_type) {
1031 	case TCP_V4_FLOW:
1032 	case TCP_V6_FLOW:
1033 	case UDP_V4_FLOW:
1034 	case UDP_V6_FLOW:
1035 		err = set_l4_rss_hash_ops(cmd, rss_type);
1036 		if (err)
1037 			return err;
1038 		break;
1039 	case IPV4_FLOW:
1040 		rss_type->ipv4 = 1;
1041 		break;
1042 	case IPV6_FLOW:
1043 		rss_type->ipv6 = 1;
1044 		break;
1045 	default:
1046 		return -EINVAL;
1047 	}
1048 
1049 	err = hinic_set_rss_type(nic_dev, nic_dev->rss_tmpl_idx,
1050 				 *rss_type);
1051 	if (err)
1052 		return -EFAULT;
1053 
1054 	return 0;
1055 }
1056 
__set_rss_rxfh(struct net_device * netdev,const u32 * indir,const u8 * key)1057 static int __set_rss_rxfh(struct net_device *netdev,
1058 			  const u32 *indir, const u8 *key)
1059 {
1060 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1061 	int err;
1062 
1063 	if (indir) {
1064 		if (!nic_dev->rss_indir_user) {
1065 			nic_dev->rss_indir_user =
1066 				kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE,
1067 					GFP_KERNEL);
1068 			if (!nic_dev->rss_indir_user)
1069 				return -ENOMEM;
1070 		}
1071 
1072 		memcpy(nic_dev->rss_indir_user, indir,
1073 		       sizeof(u32) * HINIC_RSS_INDIR_SIZE);
1074 
1075 		err = hinic_rss_set_indir_tbl(nic_dev,
1076 					      nic_dev->rss_tmpl_idx, indir);
1077 		if (err)
1078 			return -EFAULT;
1079 	}
1080 
1081 	if (key) {
1082 		if (!nic_dev->rss_hkey_user) {
1083 			nic_dev->rss_hkey_user =
1084 				kzalloc(HINIC_RSS_KEY_SIZE * 2, GFP_KERNEL);
1085 
1086 			if (!nic_dev->rss_hkey_user)
1087 				return -ENOMEM;
1088 		}
1089 
1090 		memcpy(nic_dev->rss_hkey_user, key, HINIC_RSS_KEY_SIZE);
1091 
1092 		err = hinic_rss_set_template_tbl(nic_dev,
1093 						 nic_dev->rss_tmpl_idx, key);
1094 		if (err)
1095 			return -EFAULT;
1096 	}
1097 
1098 	return 0;
1099 }
1100 
hinic_get_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd,u32 * rule_locs)1101 static int hinic_get_rxnfc(struct net_device *netdev,
1102 			   struct ethtool_rxnfc *cmd, u32 *rule_locs)
1103 {
1104 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1105 	int err = 0;
1106 
1107 	switch (cmd->cmd) {
1108 	case ETHTOOL_GRXRINGS:
1109 		cmd->data = nic_dev->num_qps;
1110 		break;
1111 	case ETHTOOL_GRXFH:
1112 		err = hinic_get_rss_hash_opts(nic_dev, cmd);
1113 		break;
1114 	default:
1115 		err = -EOPNOTSUPP;
1116 		break;
1117 	}
1118 
1119 	return err;
1120 }
1121 
hinic_set_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd)1122 static int hinic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1123 {
1124 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1125 	int err = 0;
1126 
1127 	switch (cmd->cmd) {
1128 	case ETHTOOL_SRXFH:
1129 		err = hinic_set_rss_hash_opts(nic_dev, cmd);
1130 		break;
1131 	default:
1132 		err = -EOPNOTSUPP;
1133 		break;
1134 	}
1135 
1136 	return err;
1137 }
1138 
hinic_get_rxfh(struct net_device * netdev,u32 * indir,u8 * key,u8 * hfunc)1139 static int hinic_get_rxfh(struct net_device *netdev,
1140 			  u32 *indir, u8 *key, u8 *hfunc)
1141 {
1142 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1143 	u8 hash_engine_type = 0;
1144 	int err = 0;
1145 
1146 	if (!(nic_dev->flags & HINIC_RSS_ENABLE))
1147 		return -EOPNOTSUPP;
1148 
1149 	if (hfunc) {
1150 		err = hinic_rss_get_hash_engine(nic_dev,
1151 						nic_dev->rss_tmpl_idx,
1152 						&hash_engine_type);
1153 		if (err)
1154 			return -EFAULT;
1155 
1156 		*hfunc = hash_engine_type ? ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR;
1157 	}
1158 
1159 	if (indir) {
1160 		err = hinic_rss_get_indir_tbl(nic_dev,
1161 					      nic_dev->rss_tmpl_idx, indir);
1162 		if (err)
1163 			return -EFAULT;
1164 	}
1165 
1166 	if (key)
1167 		err = hinic_rss_get_template_tbl(nic_dev,
1168 						 nic_dev->rss_tmpl_idx, key);
1169 
1170 	return err;
1171 }
1172 
hinic_set_rxfh(struct net_device * netdev,const u32 * indir,const u8 * key,const u8 hfunc)1173 static int hinic_set_rxfh(struct net_device *netdev, const u32 *indir,
1174 			  const u8 *key, const u8 hfunc)
1175 {
1176 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1177 	int err = 0;
1178 
1179 	if (!(nic_dev->flags & HINIC_RSS_ENABLE))
1180 		return -EOPNOTSUPP;
1181 
1182 	if (hfunc != ETH_RSS_HASH_NO_CHANGE) {
1183 		if (hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)
1184 			return -EOPNOTSUPP;
1185 
1186 		nic_dev->rss_hash_engine = (hfunc == ETH_RSS_HASH_XOR) ?
1187 			HINIC_RSS_HASH_ENGINE_TYPE_XOR :
1188 			HINIC_RSS_HASH_ENGINE_TYPE_TOEP;
1189 		err = hinic_rss_set_hash_engine
1190 			(nic_dev, nic_dev->rss_tmpl_idx,
1191 			nic_dev->rss_hash_engine);
1192 		if (err)
1193 			return -EFAULT;
1194 	}
1195 
1196 	err = __set_rss_rxfh(netdev, indir, key);
1197 
1198 	return err;
1199 }
1200 
hinic_get_rxfh_key_size(struct net_device * netdev)1201 static u32 hinic_get_rxfh_key_size(struct net_device *netdev)
1202 {
1203 	return HINIC_RSS_KEY_SIZE;
1204 }
1205 
hinic_get_rxfh_indir_size(struct net_device * netdev)1206 static u32 hinic_get_rxfh_indir_size(struct net_device *netdev)
1207 {
1208 	return HINIC_RSS_INDIR_SIZE;
1209 }
1210 
1211 #define HINIC_FUNC_STAT(_stat_item) {	\
1212 	.name = #_stat_item, \
1213 	.size = sizeof_field(struct hinic_vport_stats, _stat_item), \
1214 	.offset = offsetof(struct hinic_vport_stats, _stat_item) \
1215 }
1216 
1217 static struct hinic_stats hinic_function_stats[] = {
1218 	HINIC_FUNC_STAT(tx_unicast_pkts_vport),
1219 	HINIC_FUNC_STAT(tx_unicast_bytes_vport),
1220 	HINIC_FUNC_STAT(tx_multicast_pkts_vport),
1221 	HINIC_FUNC_STAT(tx_multicast_bytes_vport),
1222 	HINIC_FUNC_STAT(tx_broadcast_pkts_vport),
1223 	HINIC_FUNC_STAT(tx_broadcast_bytes_vport),
1224 
1225 	HINIC_FUNC_STAT(rx_unicast_pkts_vport),
1226 	HINIC_FUNC_STAT(rx_unicast_bytes_vport),
1227 	HINIC_FUNC_STAT(rx_multicast_pkts_vport),
1228 	HINIC_FUNC_STAT(rx_multicast_bytes_vport),
1229 	HINIC_FUNC_STAT(rx_broadcast_pkts_vport),
1230 	HINIC_FUNC_STAT(rx_broadcast_bytes_vport),
1231 
1232 	HINIC_FUNC_STAT(tx_discard_vport),
1233 	HINIC_FUNC_STAT(rx_discard_vport),
1234 	HINIC_FUNC_STAT(tx_err_vport),
1235 	HINIC_FUNC_STAT(rx_err_vport),
1236 };
1237 
1238 static char hinic_test_strings[][ETH_GSTRING_LEN] = {
1239 	"Internal lb test  (on/offline)",
1240 	"External lb test (external_lb)",
1241 };
1242 
1243 #define HINIC_PORT_STAT(_stat_item) { \
1244 	.name = #_stat_item, \
1245 	.size = sizeof_field(struct hinic_phy_port_stats, _stat_item), \
1246 	.offset = offsetof(struct hinic_phy_port_stats, _stat_item) \
1247 }
1248 
1249 static struct hinic_stats hinic_port_stats[] = {
1250 	HINIC_PORT_STAT(mac_rx_total_pkt_num),
1251 	HINIC_PORT_STAT(mac_rx_total_oct_num),
1252 	HINIC_PORT_STAT(mac_rx_bad_pkt_num),
1253 	HINIC_PORT_STAT(mac_rx_bad_oct_num),
1254 	HINIC_PORT_STAT(mac_rx_good_pkt_num),
1255 	HINIC_PORT_STAT(mac_rx_good_oct_num),
1256 	HINIC_PORT_STAT(mac_rx_uni_pkt_num),
1257 	HINIC_PORT_STAT(mac_rx_multi_pkt_num),
1258 	HINIC_PORT_STAT(mac_rx_broad_pkt_num),
1259 	HINIC_PORT_STAT(mac_tx_total_pkt_num),
1260 	HINIC_PORT_STAT(mac_tx_total_oct_num),
1261 	HINIC_PORT_STAT(mac_tx_bad_pkt_num),
1262 	HINIC_PORT_STAT(mac_tx_bad_oct_num),
1263 	HINIC_PORT_STAT(mac_tx_good_pkt_num),
1264 	HINIC_PORT_STAT(mac_tx_good_oct_num),
1265 	HINIC_PORT_STAT(mac_tx_uni_pkt_num),
1266 	HINIC_PORT_STAT(mac_tx_multi_pkt_num),
1267 	HINIC_PORT_STAT(mac_tx_broad_pkt_num),
1268 	HINIC_PORT_STAT(mac_rx_fragment_pkt_num),
1269 	HINIC_PORT_STAT(mac_rx_undersize_pkt_num),
1270 	HINIC_PORT_STAT(mac_rx_undermin_pkt_num),
1271 	HINIC_PORT_STAT(mac_rx_64_oct_pkt_num),
1272 	HINIC_PORT_STAT(mac_rx_65_127_oct_pkt_num),
1273 	HINIC_PORT_STAT(mac_rx_128_255_oct_pkt_num),
1274 	HINIC_PORT_STAT(mac_rx_256_511_oct_pkt_num),
1275 	HINIC_PORT_STAT(mac_rx_512_1023_oct_pkt_num),
1276 	HINIC_PORT_STAT(mac_rx_1024_1518_oct_pkt_num),
1277 	HINIC_PORT_STAT(mac_rx_1519_2047_oct_pkt_num),
1278 	HINIC_PORT_STAT(mac_rx_2048_4095_oct_pkt_num),
1279 	HINIC_PORT_STAT(mac_rx_4096_8191_oct_pkt_num),
1280 	HINIC_PORT_STAT(mac_rx_8192_9216_oct_pkt_num),
1281 	HINIC_PORT_STAT(mac_rx_9217_12287_oct_pkt_num),
1282 	HINIC_PORT_STAT(mac_rx_12288_16383_oct_pkt_num),
1283 	HINIC_PORT_STAT(mac_rx_1519_max_good_pkt_num),
1284 	HINIC_PORT_STAT(mac_rx_1519_max_bad_pkt_num),
1285 	HINIC_PORT_STAT(mac_rx_oversize_pkt_num),
1286 	HINIC_PORT_STAT(mac_rx_jabber_pkt_num),
1287 	HINIC_PORT_STAT(mac_rx_pause_num),
1288 	HINIC_PORT_STAT(mac_rx_pfc_pkt_num),
1289 	HINIC_PORT_STAT(mac_rx_pfc_pri0_pkt_num),
1290 	HINIC_PORT_STAT(mac_rx_pfc_pri1_pkt_num),
1291 	HINIC_PORT_STAT(mac_rx_pfc_pri2_pkt_num),
1292 	HINIC_PORT_STAT(mac_rx_pfc_pri3_pkt_num),
1293 	HINIC_PORT_STAT(mac_rx_pfc_pri4_pkt_num),
1294 	HINIC_PORT_STAT(mac_rx_pfc_pri5_pkt_num),
1295 	HINIC_PORT_STAT(mac_rx_pfc_pri6_pkt_num),
1296 	HINIC_PORT_STAT(mac_rx_pfc_pri7_pkt_num),
1297 	HINIC_PORT_STAT(mac_rx_control_pkt_num),
1298 	HINIC_PORT_STAT(mac_rx_sym_err_pkt_num),
1299 	HINIC_PORT_STAT(mac_rx_fcs_err_pkt_num),
1300 	HINIC_PORT_STAT(mac_rx_send_app_good_pkt_num),
1301 	HINIC_PORT_STAT(mac_rx_send_app_bad_pkt_num),
1302 	HINIC_PORT_STAT(mac_tx_fragment_pkt_num),
1303 	HINIC_PORT_STAT(mac_tx_undersize_pkt_num),
1304 	HINIC_PORT_STAT(mac_tx_undermin_pkt_num),
1305 	HINIC_PORT_STAT(mac_tx_64_oct_pkt_num),
1306 	HINIC_PORT_STAT(mac_tx_65_127_oct_pkt_num),
1307 	HINIC_PORT_STAT(mac_tx_128_255_oct_pkt_num),
1308 	HINIC_PORT_STAT(mac_tx_256_511_oct_pkt_num),
1309 	HINIC_PORT_STAT(mac_tx_512_1023_oct_pkt_num),
1310 	HINIC_PORT_STAT(mac_tx_1024_1518_oct_pkt_num),
1311 	HINIC_PORT_STAT(mac_tx_1519_2047_oct_pkt_num),
1312 	HINIC_PORT_STAT(mac_tx_2048_4095_oct_pkt_num),
1313 	HINIC_PORT_STAT(mac_tx_4096_8191_oct_pkt_num),
1314 	HINIC_PORT_STAT(mac_tx_8192_9216_oct_pkt_num),
1315 	HINIC_PORT_STAT(mac_tx_9217_12287_oct_pkt_num),
1316 	HINIC_PORT_STAT(mac_tx_12288_16383_oct_pkt_num),
1317 	HINIC_PORT_STAT(mac_tx_1519_max_good_pkt_num),
1318 	HINIC_PORT_STAT(mac_tx_1519_max_bad_pkt_num),
1319 	HINIC_PORT_STAT(mac_tx_oversize_pkt_num),
1320 	HINIC_PORT_STAT(mac_tx_jabber_pkt_num),
1321 	HINIC_PORT_STAT(mac_tx_pause_num),
1322 	HINIC_PORT_STAT(mac_tx_pfc_pkt_num),
1323 	HINIC_PORT_STAT(mac_tx_pfc_pri0_pkt_num),
1324 	HINIC_PORT_STAT(mac_tx_pfc_pri1_pkt_num),
1325 	HINIC_PORT_STAT(mac_tx_pfc_pri2_pkt_num),
1326 	HINIC_PORT_STAT(mac_tx_pfc_pri3_pkt_num),
1327 	HINIC_PORT_STAT(mac_tx_pfc_pri4_pkt_num),
1328 	HINIC_PORT_STAT(mac_tx_pfc_pri5_pkt_num),
1329 	HINIC_PORT_STAT(mac_tx_pfc_pri6_pkt_num),
1330 	HINIC_PORT_STAT(mac_tx_pfc_pri7_pkt_num),
1331 	HINIC_PORT_STAT(mac_tx_control_pkt_num),
1332 	HINIC_PORT_STAT(mac_tx_err_all_pkt_num),
1333 	HINIC_PORT_STAT(mac_tx_from_app_good_pkt_num),
1334 	HINIC_PORT_STAT(mac_tx_from_app_bad_pkt_num),
1335 };
1336 
1337 #define HINIC_TXQ_STAT(_stat_item) { \
1338 	.name = "txq%d_"#_stat_item, \
1339 	.size = sizeof_field(struct hinic_txq_stats, _stat_item), \
1340 	.offset = offsetof(struct hinic_txq_stats, _stat_item) \
1341 }
1342 
1343 static struct hinic_stats hinic_tx_queue_stats[] = {
1344 	HINIC_TXQ_STAT(pkts),
1345 	HINIC_TXQ_STAT(bytes),
1346 	HINIC_TXQ_STAT(tx_busy),
1347 	HINIC_TXQ_STAT(tx_wake),
1348 	HINIC_TXQ_STAT(tx_dropped),
1349 	HINIC_TXQ_STAT(big_frags_pkts),
1350 };
1351 
1352 #define HINIC_RXQ_STAT(_stat_item) { \
1353 	.name = "rxq%d_"#_stat_item, \
1354 	.size = sizeof_field(struct hinic_rxq_stats, _stat_item), \
1355 	.offset = offsetof(struct hinic_rxq_stats, _stat_item) \
1356 }
1357 
1358 static struct hinic_stats hinic_rx_queue_stats[] = {
1359 	HINIC_RXQ_STAT(pkts),
1360 	HINIC_RXQ_STAT(bytes),
1361 	HINIC_RXQ_STAT(errors),
1362 	HINIC_RXQ_STAT(csum_errors),
1363 	HINIC_RXQ_STAT(other_errors),
1364 };
1365 
get_drv_queue_stats(struct hinic_dev * nic_dev,u64 * data)1366 static void get_drv_queue_stats(struct hinic_dev *nic_dev, u64 *data)
1367 {
1368 	struct hinic_txq_stats txq_stats;
1369 	struct hinic_rxq_stats rxq_stats;
1370 	u16 i = 0, j = 0, qid = 0;
1371 	char *p;
1372 
1373 	for (qid = 0; qid < nic_dev->num_qps; qid++) {
1374 		if (!nic_dev->txqs)
1375 			break;
1376 
1377 		hinic_txq_get_stats(&nic_dev->txqs[qid], &txq_stats);
1378 		for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++, i++) {
1379 			p = (char *)&txq_stats +
1380 				hinic_tx_queue_stats[j].offset;
1381 			data[i] = (hinic_tx_queue_stats[j].size ==
1382 					sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1383 		}
1384 	}
1385 
1386 	for (qid = 0; qid < nic_dev->num_qps; qid++) {
1387 		if (!nic_dev->rxqs)
1388 			break;
1389 
1390 		hinic_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats);
1391 		for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++, i++) {
1392 			p = (char *)&rxq_stats +
1393 				hinic_rx_queue_stats[j].offset;
1394 			data[i] = (hinic_rx_queue_stats[j].size ==
1395 					sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1396 		}
1397 	}
1398 }
1399 
hinic_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)1400 static void hinic_get_ethtool_stats(struct net_device *netdev,
1401 				    struct ethtool_stats *stats, u64 *data)
1402 {
1403 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1404 	struct hinic_vport_stats vport_stats = {0};
1405 	struct hinic_phy_port_stats *port_stats;
1406 	u16 i = 0, j = 0;
1407 	char *p;
1408 	int err;
1409 
1410 	err = hinic_get_vport_stats(nic_dev, &vport_stats);
1411 	if (err)
1412 		netif_err(nic_dev, drv, netdev,
1413 			  "Failed to get vport stats from firmware\n");
1414 
1415 	for (j = 0; j < ARRAY_SIZE(hinic_function_stats); j++, i++) {
1416 		p = (char *)&vport_stats + hinic_function_stats[j].offset;
1417 		data[i] = (hinic_function_stats[j].size ==
1418 				sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1419 	}
1420 
1421 	port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL);
1422 	if (!port_stats) {
1423 		memset(&data[i], 0,
1424 		       ARRAY_SIZE(hinic_port_stats) * sizeof(*data));
1425 		i += ARRAY_SIZE(hinic_port_stats);
1426 		goto get_drv_stats;
1427 	}
1428 
1429 	err = hinic_get_phy_port_stats(nic_dev, port_stats);
1430 	if (err)
1431 		netif_err(nic_dev, drv, netdev,
1432 			  "Failed to get port stats from firmware\n");
1433 
1434 	for (j = 0; j < ARRAY_SIZE(hinic_port_stats); j++, i++) {
1435 		p = (char *)port_stats + hinic_port_stats[j].offset;
1436 		data[i] = (hinic_port_stats[j].size ==
1437 				sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1438 	}
1439 
1440 	kfree(port_stats);
1441 
1442 get_drv_stats:
1443 	get_drv_queue_stats(nic_dev, data + i);
1444 }
1445 
hinic_get_sset_count(struct net_device * netdev,int sset)1446 static int hinic_get_sset_count(struct net_device *netdev, int sset)
1447 {
1448 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1449 	int count, q_num;
1450 
1451 	switch (sset) {
1452 	case ETH_SS_TEST:
1453 		return ARRAY_SIZE(hinic_test_strings);
1454 	case ETH_SS_STATS:
1455 		q_num = nic_dev->num_qps;
1456 		count = ARRAY_SIZE(hinic_function_stats) +
1457 			(ARRAY_SIZE(hinic_tx_queue_stats) +
1458 			ARRAY_SIZE(hinic_rx_queue_stats)) * q_num;
1459 
1460 		count += ARRAY_SIZE(hinic_port_stats);
1461 
1462 		return count;
1463 	default:
1464 		return -EOPNOTSUPP;
1465 	}
1466 }
1467 
hinic_get_strings(struct net_device * netdev,u32 stringset,u8 * data)1468 static void hinic_get_strings(struct net_device *netdev,
1469 			      u32 stringset, u8 *data)
1470 {
1471 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1472 	char *p = (char *)data;
1473 	u16 i, j;
1474 
1475 	switch (stringset) {
1476 	case ETH_SS_TEST:
1477 		memcpy(data, *hinic_test_strings, sizeof(hinic_test_strings));
1478 		return;
1479 	case ETH_SS_STATS:
1480 		for (i = 0; i < ARRAY_SIZE(hinic_function_stats); i++) {
1481 			memcpy(p, hinic_function_stats[i].name,
1482 			       ETH_GSTRING_LEN);
1483 			p += ETH_GSTRING_LEN;
1484 		}
1485 
1486 		for (i = 0; i < ARRAY_SIZE(hinic_port_stats); i++) {
1487 			memcpy(p, hinic_port_stats[i].name,
1488 			       ETH_GSTRING_LEN);
1489 			p += ETH_GSTRING_LEN;
1490 		}
1491 
1492 		for (i = 0; i < nic_dev->num_qps; i++) {
1493 			for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++) {
1494 				sprintf(p, hinic_tx_queue_stats[j].name, i);
1495 				p += ETH_GSTRING_LEN;
1496 			}
1497 		}
1498 
1499 		for (i = 0; i < nic_dev->num_qps; i++) {
1500 			for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++) {
1501 				sprintf(p, hinic_rx_queue_stats[j].name, i);
1502 				p += ETH_GSTRING_LEN;
1503 			}
1504 		}
1505 
1506 		return;
1507 	default:
1508 		return;
1509 	}
1510 }
1511 
hinic_run_lp_test(struct hinic_dev * nic_dev,u32 test_time)1512 static int hinic_run_lp_test(struct hinic_dev *nic_dev, u32 test_time)
1513 {
1514 	u8 *lb_test_rx_buf = nic_dev->lb_test_rx_buf;
1515 	struct net_device *netdev = nic_dev->netdev;
1516 	struct sk_buff *skb_tmp = NULL;
1517 	struct sk_buff *skb = NULL;
1518 	u32 cnt = test_time * 5;
1519 	u8 *test_data = NULL;
1520 	u32 i;
1521 	u8 j;
1522 
1523 	skb_tmp = alloc_skb(LP_PKT_LEN, GFP_ATOMIC);
1524 	if (!skb_tmp)
1525 		return -ENOMEM;
1526 
1527 	test_data = __skb_put(skb_tmp, LP_PKT_LEN);
1528 
1529 	memset(test_data, 0xFF, 2 * ETH_ALEN);
1530 	test_data[ETH_ALEN] = 0xFE;
1531 	test_data[2 * ETH_ALEN] = 0x08;
1532 	test_data[2 * ETH_ALEN + 1] = 0x0;
1533 
1534 	for (i = ETH_HLEN; i < LP_PKT_LEN; i++)
1535 		test_data[i] = i & 0xFF;
1536 
1537 	skb_tmp->queue_mapping = 0;
1538 	skb_tmp->ip_summed = CHECKSUM_COMPLETE;
1539 	skb_tmp->dev = netdev;
1540 
1541 	for (i = 0; i < cnt; i++) {
1542 		nic_dev->lb_test_rx_idx = 0;
1543 		memset(lb_test_rx_buf, 0, LP_PKT_CNT * LP_PKT_LEN);
1544 
1545 		for (j = 0; j < LP_PKT_CNT; j++) {
1546 			skb = pskb_copy(skb_tmp, GFP_ATOMIC);
1547 			if (!skb) {
1548 				dev_kfree_skb_any(skb_tmp);
1549 				netif_err(nic_dev, drv, netdev,
1550 					  "Copy skb failed for loopback test\n");
1551 				return -ENOMEM;
1552 			}
1553 
1554 			/* mark index for every pkt */
1555 			skb->data[LP_PKT_LEN - 1] = j;
1556 
1557 			if (hinic_lb_xmit_frame(skb, netdev)) {
1558 				dev_kfree_skb_any(skb);
1559 				dev_kfree_skb_any(skb_tmp);
1560 				netif_err(nic_dev, drv, netdev,
1561 					  "Xmit pkt failed for loopback test\n");
1562 				return -EBUSY;
1563 			}
1564 		}
1565 
1566 		/* wait till all pkts received to RX buffer */
1567 		msleep(200);
1568 
1569 		for (j = 0; j < LP_PKT_CNT; j++) {
1570 			if (memcmp(lb_test_rx_buf + j * LP_PKT_LEN,
1571 				   skb_tmp->data, LP_PKT_LEN - 1) ||
1572 			    (*(lb_test_rx_buf + j * LP_PKT_LEN +
1573 			     LP_PKT_LEN - 1) != j)) {
1574 				dev_kfree_skb_any(skb_tmp);
1575 				netif_err(nic_dev, drv, netdev,
1576 					  "Compare pkt failed in loopback test(index=0x%02x, data[%d]=0x%02x)\n",
1577 					  j + i * LP_PKT_CNT,
1578 					  LP_PKT_LEN - 1,
1579 					  *(lb_test_rx_buf + j * LP_PKT_LEN +
1580 					    LP_PKT_LEN - 1));
1581 				return -EIO;
1582 			}
1583 		}
1584 	}
1585 
1586 	dev_kfree_skb_any(skb_tmp);
1587 	return 0;
1588 }
1589 
do_lp_test(struct hinic_dev * nic_dev,u32 flags,u32 test_time,enum diag_test_index * test_index)1590 static int do_lp_test(struct hinic_dev *nic_dev, u32 flags, u32 test_time,
1591 		      enum diag_test_index *test_index)
1592 {
1593 	struct net_device *netdev = nic_dev->netdev;
1594 	u8 *lb_test_rx_buf = NULL;
1595 	int err = 0;
1596 
1597 	if (!(flags & ETH_TEST_FL_EXTERNAL_LB)) {
1598 		*test_index = INTERNAL_LP_TEST;
1599 		if (hinic_set_loopback_mode(nic_dev->hwdev,
1600 					    HINIC_INTERNAL_LP_MODE, true)) {
1601 			netif_err(nic_dev, drv, netdev,
1602 				  "Failed to set port loopback mode before loopback test\n");
1603 			return -EIO;
1604 		}
1605 	} else {
1606 		*test_index = EXTERNAL_LP_TEST;
1607 	}
1608 
1609 	lb_test_rx_buf = vmalloc(LP_PKT_CNT * LP_PKT_LEN);
1610 	if (!lb_test_rx_buf) {
1611 		err = -ENOMEM;
1612 	} else {
1613 		nic_dev->lb_test_rx_buf = lb_test_rx_buf;
1614 		nic_dev->lb_pkt_len = LP_PKT_LEN;
1615 		nic_dev->flags |= HINIC_LP_TEST;
1616 		err = hinic_run_lp_test(nic_dev, test_time);
1617 		nic_dev->flags &= ~HINIC_LP_TEST;
1618 		msleep(100);
1619 		vfree(lb_test_rx_buf);
1620 		nic_dev->lb_test_rx_buf = NULL;
1621 	}
1622 
1623 	if (!(flags & ETH_TEST_FL_EXTERNAL_LB)) {
1624 		if (hinic_set_loopback_mode(nic_dev->hwdev,
1625 					    HINIC_INTERNAL_LP_MODE, false)) {
1626 			netif_err(nic_dev, drv, netdev,
1627 				  "Failed to cancel port loopback mode after loopback test\n");
1628 			err = -EIO;
1629 		}
1630 	}
1631 
1632 	return err;
1633 }
1634 
hinic_diag_test(struct net_device * netdev,struct ethtool_test * eth_test,u64 * data)1635 static void hinic_diag_test(struct net_device *netdev,
1636 			    struct ethtool_test *eth_test, u64 *data)
1637 {
1638 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1639 	enum hinic_port_link_state link_state;
1640 	enum diag_test_index test_index = 0;
1641 	int err = 0;
1642 
1643 	memset(data, 0, DIAG_TEST_MAX * sizeof(u64));
1644 
1645 	/* don't support loopback test when netdev is closed. */
1646 	if (!(nic_dev->flags & HINIC_INTF_UP)) {
1647 		netif_err(nic_dev, drv, netdev,
1648 			  "Do not support loopback test when netdev is closed\n");
1649 		eth_test->flags |= ETH_TEST_FL_FAILED;
1650 		data[PORT_DOWN_ERR_IDX] = 1;
1651 		return;
1652 	}
1653 
1654 	netif_carrier_off(netdev);
1655 	netif_tx_disable(netdev);
1656 
1657 	err = do_lp_test(nic_dev, eth_test->flags, LP_DEFAULT_TIME,
1658 			 &test_index);
1659 	if (err) {
1660 		eth_test->flags |= ETH_TEST_FL_FAILED;
1661 		data[test_index] = 1;
1662 	}
1663 
1664 	netif_tx_wake_all_queues(netdev);
1665 
1666 	err = hinic_port_link_state(nic_dev, &link_state);
1667 	if (!err && link_state == HINIC_LINK_STATE_UP)
1668 		netif_carrier_on(netdev);
1669 }
1670 
hinic_set_phys_id(struct net_device * netdev,enum ethtool_phys_id_state state)1671 static int hinic_set_phys_id(struct net_device *netdev,
1672 			     enum ethtool_phys_id_state state)
1673 {
1674 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1675 	int err = 0;
1676 	u8 port;
1677 
1678 	port = nic_dev->hwdev->port_id;
1679 
1680 	switch (state) {
1681 	case ETHTOOL_ID_ACTIVE:
1682 		err = hinic_set_led_status(nic_dev->hwdev, port,
1683 					   HINIC_LED_TYPE_LINK,
1684 					   HINIC_LED_MODE_FORCE_2HZ);
1685 		if (err)
1686 			netif_err(nic_dev, drv, netdev,
1687 				  "Set LED blinking in 2HZ failed\n");
1688 		break;
1689 
1690 	case ETHTOOL_ID_INACTIVE:
1691 		err = hinic_reset_led_status(nic_dev->hwdev, port);
1692 		if (err)
1693 			netif_err(nic_dev, drv, netdev,
1694 				  "Reset LED to original status failed\n");
1695 		break;
1696 
1697 	default:
1698 		return -EOPNOTSUPP;
1699 	}
1700 
1701 	return err;
1702 }
1703 
hinic_get_module_info(struct net_device * netdev,struct ethtool_modinfo * modinfo)1704 static int hinic_get_module_info(struct net_device *netdev,
1705 				 struct ethtool_modinfo *modinfo)
1706 {
1707 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1708 	u8 sfp_type_ext;
1709 	u8 sfp_type;
1710 	int err;
1711 
1712 	err = hinic_get_sfp_type(nic_dev->hwdev, &sfp_type, &sfp_type_ext);
1713 	if (err)
1714 		return err;
1715 
1716 	switch (sfp_type) {
1717 	case SFF8024_ID_SFP:
1718 		modinfo->type = ETH_MODULE_SFF_8472;
1719 		modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
1720 		break;
1721 	case SFF8024_ID_QSFP_8438:
1722 		modinfo->type = ETH_MODULE_SFF_8436;
1723 		modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
1724 		break;
1725 	case SFF8024_ID_QSFP_8436_8636:
1726 		if (sfp_type_ext >= 0x3) {
1727 			modinfo->type = ETH_MODULE_SFF_8636;
1728 			modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
1729 
1730 		} else {
1731 			modinfo->type = ETH_MODULE_SFF_8436;
1732 			modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
1733 		}
1734 		break;
1735 	case SFF8024_ID_QSFP28_8636:
1736 		modinfo->type = ETH_MODULE_SFF_8636;
1737 		modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
1738 		break;
1739 	default:
1740 		netif_warn(nic_dev, drv, netdev,
1741 			   "Optical module unknown: 0x%x\n", sfp_type);
1742 		return -EINVAL;
1743 	}
1744 
1745 	return 0;
1746 }
1747 
hinic_get_module_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)1748 static int hinic_get_module_eeprom(struct net_device *netdev,
1749 				   struct ethtool_eeprom *ee, u8 *data)
1750 {
1751 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1752 	u8 sfp_data[STD_SFP_INFO_MAX_SIZE];
1753 	u16 len;
1754 	int err;
1755 
1756 	if (!ee->len || ((ee->len + ee->offset) > STD_SFP_INFO_MAX_SIZE))
1757 		return -EINVAL;
1758 
1759 	memset(data, 0, ee->len);
1760 
1761 	err = hinic_get_sfp_eeprom(nic_dev->hwdev, sfp_data, &len);
1762 	if (err)
1763 		return err;
1764 
1765 	memcpy(data, sfp_data + ee->offset, ee->len);
1766 
1767 	return 0;
1768 }
1769 
1770 static int
hinic_get_link_ext_state(struct net_device * netdev,struct ethtool_link_ext_state_info * link_ext_state_info)1771 hinic_get_link_ext_state(struct net_device *netdev,
1772 			 struct ethtool_link_ext_state_info *link_ext_state_info)
1773 {
1774 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1775 
1776 	if (netif_carrier_ok(netdev))
1777 		return -ENODATA;
1778 
1779 	if (nic_dev->cable_unplugged)
1780 		link_ext_state_info->link_ext_state =
1781 			ETHTOOL_LINK_EXT_STATE_NO_CABLE;
1782 	else if (nic_dev->module_unrecognized)
1783 		link_ext_state_info->link_ext_state =
1784 			ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH;
1785 
1786 	return 0;
1787 }
1788 
1789 static const struct ethtool_ops hinic_ethtool_ops = {
1790 	.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
1791 				     ETHTOOL_COALESCE_RX_MAX_FRAMES |
1792 				     ETHTOOL_COALESCE_TX_USECS |
1793 				     ETHTOOL_COALESCE_TX_MAX_FRAMES,
1794 
1795 	.get_link_ksettings = hinic_get_link_ksettings,
1796 	.set_link_ksettings = hinic_set_link_ksettings,
1797 	.get_drvinfo = hinic_get_drvinfo,
1798 	.get_link = ethtool_op_get_link,
1799 	.get_link_ext_state = hinic_get_link_ext_state,
1800 	.get_ringparam = hinic_get_ringparam,
1801 	.set_ringparam = hinic_set_ringparam,
1802 	.get_coalesce = hinic_get_coalesce,
1803 	.set_coalesce = hinic_set_coalesce,
1804 	.get_per_queue_coalesce = hinic_get_per_queue_coalesce,
1805 	.set_per_queue_coalesce = hinic_set_per_queue_coalesce,
1806 	.get_pauseparam = hinic_get_pauseparam,
1807 	.set_pauseparam = hinic_set_pauseparam,
1808 	.get_channels = hinic_get_channels,
1809 	.set_channels = hinic_set_channels,
1810 	.get_rxnfc = hinic_get_rxnfc,
1811 	.set_rxnfc = hinic_set_rxnfc,
1812 	.get_rxfh_key_size = hinic_get_rxfh_key_size,
1813 	.get_rxfh_indir_size = hinic_get_rxfh_indir_size,
1814 	.get_rxfh = hinic_get_rxfh,
1815 	.set_rxfh = hinic_set_rxfh,
1816 	.get_sset_count = hinic_get_sset_count,
1817 	.get_ethtool_stats = hinic_get_ethtool_stats,
1818 	.get_strings = hinic_get_strings,
1819 	.self_test = hinic_diag_test,
1820 	.set_phys_id = hinic_set_phys_id,
1821 	.get_module_info = hinic_get_module_info,
1822 	.get_module_eeprom = hinic_get_module_eeprom,
1823 };
1824 
1825 static const struct ethtool_ops hinicvf_ethtool_ops = {
1826 	.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
1827 				     ETHTOOL_COALESCE_RX_MAX_FRAMES |
1828 				     ETHTOOL_COALESCE_TX_USECS |
1829 				     ETHTOOL_COALESCE_TX_MAX_FRAMES,
1830 
1831 	.get_link_ksettings = hinic_get_link_ksettings,
1832 	.get_drvinfo = hinic_get_drvinfo,
1833 	.get_link = ethtool_op_get_link,
1834 	.get_ringparam = hinic_get_ringparam,
1835 	.set_ringparam = hinic_set_ringparam,
1836 	.get_coalesce = hinic_get_coalesce,
1837 	.set_coalesce = hinic_set_coalesce,
1838 	.get_per_queue_coalesce = hinic_get_per_queue_coalesce,
1839 	.set_per_queue_coalesce = hinic_set_per_queue_coalesce,
1840 	.get_channels = hinic_get_channels,
1841 	.set_channels = hinic_set_channels,
1842 	.get_rxnfc = hinic_get_rxnfc,
1843 	.set_rxnfc = hinic_set_rxnfc,
1844 	.get_rxfh_key_size = hinic_get_rxfh_key_size,
1845 	.get_rxfh_indir_size = hinic_get_rxfh_indir_size,
1846 	.get_rxfh = hinic_get_rxfh,
1847 	.set_rxfh = hinic_set_rxfh,
1848 	.get_sset_count = hinic_get_sset_count,
1849 	.get_ethtool_stats = hinic_get_ethtool_stats,
1850 	.get_strings = hinic_get_strings,
1851 };
1852 
hinic_set_ethtool_ops(struct net_device * netdev)1853 void hinic_set_ethtool_ops(struct net_device *netdev)
1854 {
1855 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1856 
1857 	if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
1858 		netdev->ethtool_ops = &hinic_ethtool_ops;
1859 	else
1860 		netdev->ethtool_ops = &hinicvf_ethtool_ops;
1861 }
1862