1 // SPDX-License-Identifier: GPL-2.0
2 /* Huawei HiNIC PCI Express Linux driver
3  * Copyright(c) 2017 Huawei Technologies Co., Ltd
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * for more details.
13  *
14  */
15 
16 #include <linux/kernel.h>
17 #include <linux/pci.h>
18 #include <linux/device.h>
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/errno.h>
22 #include <linux/interrupt.h>
23 #include <linux/etherdevice.h>
24 #include <linux/netdevice.h>
25 #include <linux/if_vlan.h>
26 #include <linux/ethtool.h>
27 #include <linux/vmalloc.h>
28 #include <linux/sfp.h>
29 
30 #include "hinic_hw_qp.h"
31 #include "hinic_hw_dev.h"
32 #include "hinic_port.h"
33 #include "hinic_tx.h"
34 #include "hinic_rx.h"
35 #include "hinic_dev.h"
36 
37 #define SET_LINK_STR_MAX_LEN	16
38 
39 #define GET_SUPPORTED_MODE	0
40 #define GET_ADVERTISED_MODE	1
41 
42 #define ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE(ecmd, mode)	\
43 		((ecmd)->supported |=	\
44 		(1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit))
45 #define ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE(ecmd, mode)	\
46 		((ecmd)->advertising |=	\
47 		(1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit))
48 #define ETHTOOL_ADD_SUPPORTED_LINK_MODE(ecmd, mode)	\
49 				((ecmd)->supported |= SUPPORTED_##mode)
50 #define ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode)	\
51 				((ecmd)->advertising |= ADVERTISED_##mode)
52 
53 #define COALESCE_PENDING_LIMIT_UNIT	8
54 #define	COALESCE_TIMER_CFG_UNIT		9
55 #define COALESCE_ALL_QUEUE		0xFFFF
56 #define COALESCE_MAX_PENDING_LIMIT	(255 * COALESCE_PENDING_LIMIT_UNIT)
57 #define COALESCE_MAX_TIMER_CFG		(255 * COALESCE_TIMER_CFG_UNIT)
58 #define OBJ_STR_MAX_LEN			32
59 
60 struct hw2ethtool_link_mode {
61 	enum ethtool_link_mode_bit_indices link_mode_bit;
62 	u32 speed;
63 	enum hinic_link_mode hw_link_mode;
64 };
65 
66 struct cmd_link_settings {
67 	u64	supported;
68 	u64	advertising;
69 
70 	u32	speed;
71 	u8	duplex;
72 	u8	port;
73 	u8	autoneg;
74 };
75 
76 static u32 hw_to_ethtool_speed[LINK_SPEED_LEVELS] = {
77 	SPEED_10, SPEED_100,
78 	SPEED_1000, SPEED_10000,
79 	SPEED_25000, SPEED_40000,
80 	SPEED_100000
81 };
82 
83 static struct hw2ethtool_link_mode
84 	hw_to_ethtool_link_mode_table[HINIC_LINK_MODE_NUMBERS] = {
85 	{
86 		.link_mode_bit = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
87 		.speed = SPEED_10000,
88 		.hw_link_mode = HINIC_10GE_BASE_KR,
89 	},
90 	{
91 		.link_mode_bit = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
92 		.speed = SPEED_40000,
93 		.hw_link_mode = HINIC_40GE_BASE_KR4,
94 	},
95 	{
96 		.link_mode_bit = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
97 		.speed = SPEED_40000,
98 		.hw_link_mode = HINIC_40GE_BASE_CR4,
99 	},
100 	{
101 		.link_mode_bit = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
102 		.speed = SPEED_100000,
103 		.hw_link_mode = HINIC_100GE_BASE_KR4,
104 	},
105 	{
106 		.link_mode_bit = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
107 		.speed = SPEED_100000,
108 		.hw_link_mode = HINIC_100GE_BASE_CR4,
109 	},
110 	{
111 		.link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
112 		.speed = SPEED_25000,
113 		.hw_link_mode = HINIC_25GE_BASE_KR_S,
114 	},
115 	{
116 		.link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
117 		.speed = SPEED_25000,
118 		.hw_link_mode = HINIC_25GE_BASE_CR_S,
119 	},
120 	{
121 		.link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
122 		.speed = SPEED_25000,
123 		.hw_link_mode = HINIC_25GE_BASE_KR,
124 	},
125 	{
126 		.link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
127 		.speed = SPEED_25000,
128 		.hw_link_mode = HINIC_25GE_BASE_CR,
129 	},
130 	{
131 		.link_mode_bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
132 		.speed = SPEED_1000,
133 		.hw_link_mode = HINIC_GE_BASE_KX,
134 	},
135 };
136 
137 #define LP_DEFAULT_TIME                 5 /* seconds */
138 #define LP_PKT_LEN                      1514
139 
140 #define PORT_DOWN_ERR_IDX		0
141 enum diag_test_index {
142 	INTERNAL_LP_TEST = 0,
143 	EXTERNAL_LP_TEST = 1,
144 	DIAG_TEST_MAX = 2,
145 };
146 
147 static void set_link_speed(struct ethtool_link_ksettings *link_ksettings,
148 			   enum hinic_speed speed)
149 {
150 	switch (speed) {
151 	case HINIC_SPEED_10MB_LINK:
152 		link_ksettings->base.speed = SPEED_10;
153 		break;
154 
155 	case HINIC_SPEED_100MB_LINK:
156 		link_ksettings->base.speed = SPEED_100;
157 		break;
158 
159 	case HINIC_SPEED_1000MB_LINK:
160 		link_ksettings->base.speed = SPEED_1000;
161 		break;
162 
163 	case HINIC_SPEED_10GB_LINK:
164 		link_ksettings->base.speed = SPEED_10000;
165 		break;
166 
167 	case HINIC_SPEED_25GB_LINK:
168 		link_ksettings->base.speed = SPEED_25000;
169 		break;
170 
171 	case HINIC_SPEED_40GB_LINK:
172 		link_ksettings->base.speed = SPEED_40000;
173 		break;
174 
175 	case HINIC_SPEED_100GB_LINK:
176 		link_ksettings->base.speed = SPEED_100000;
177 		break;
178 
179 	default:
180 		link_ksettings->base.speed = SPEED_UNKNOWN;
181 		break;
182 	}
183 }
184 
185 static int hinic_get_link_mode_index(enum hinic_link_mode link_mode)
186 {
187 	int i = 0;
188 
189 	for (i = 0; i < HINIC_LINK_MODE_NUMBERS; i++) {
190 		if (link_mode == hw_to_ethtool_link_mode_table[i].hw_link_mode)
191 			break;
192 	}
193 
194 	return i;
195 }
196 
197 static void hinic_add_ethtool_link_mode(struct cmd_link_settings *link_settings,
198 					enum hinic_link_mode hw_link_mode,
199 					u32 name)
200 {
201 	enum hinic_link_mode link_mode;
202 	int idx = 0;
203 
204 	for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) {
205 		if (hw_link_mode & ((u32)1 << link_mode)) {
206 			idx = hinic_get_link_mode_index(link_mode);
207 			if (idx >= HINIC_LINK_MODE_NUMBERS)
208 				continue;
209 
210 			if (name == GET_SUPPORTED_MODE)
211 				ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE
212 					(link_settings, idx);
213 			else
214 				ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE
215 					(link_settings, idx);
216 		}
217 	}
218 }
219 
220 static void hinic_link_port_type(struct cmd_link_settings *link_settings,
221 				 enum hinic_port_type port_type)
222 {
223 	switch (port_type) {
224 	case HINIC_PORT_ELEC:
225 	case HINIC_PORT_TP:
226 		ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, TP);
227 		ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, TP);
228 		link_settings->port = PORT_TP;
229 		break;
230 
231 	case HINIC_PORT_AOC:
232 	case HINIC_PORT_FIBRE:
233 		ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE);
234 		ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE);
235 		link_settings->port = PORT_FIBRE;
236 		break;
237 
238 	case HINIC_PORT_COPPER:
239 		ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE);
240 		ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE);
241 		link_settings->port = PORT_DA;
242 		break;
243 
244 	case HINIC_PORT_BACKPLANE:
245 		ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Backplane);
246 		ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Backplane);
247 		link_settings->port = PORT_NONE;
248 		break;
249 
250 	default:
251 		link_settings->port = PORT_OTHER;
252 		break;
253 	}
254 }
255 
256 static int hinic_get_link_ksettings(struct net_device *netdev,
257 				    struct ethtool_link_ksettings
258 				    *link_ksettings)
259 {
260 	struct hinic_dev *nic_dev = netdev_priv(netdev);
261 	struct hinic_link_mode_cmd link_mode = { 0 };
262 	struct hinic_pause_config pause_info = { 0 };
263 	struct cmd_link_settings settings = { 0 };
264 	enum hinic_port_link_state link_state;
265 	struct hinic_port_cap port_cap;
266 	int err;
267 
268 	ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
269 	ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
270 
271 	link_ksettings->base.speed = SPEED_UNKNOWN;
272 	link_ksettings->base.autoneg = AUTONEG_DISABLE;
273 	link_ksettings->base.duplex = DUPLEX_UNKNOWN;
274 
275 	err = hinic_port_get_cap(nic_dev, &port_cap);
276 	if (err)
277 		return err;
278 
279 	hinic_link_port_type(&settings, port_cap.port_type);
280 	link_ksettings->base.port = settings.port;
281 
282 	err = hinic_port_link_state(nic_dev, &link_state);
283 	if (err)
284 		return err;
285 
286 	if (link_state == HINIC_LINK_STATE_UP) {
287 		set_link_speed(link_ksettings, port_cap.speed);
288 		link_ksettings->base.duplex =
289 			(port_cap.duplex == HINIC_DUPLEX_FULL) ?
290 			DUPLEX_FULL : DUPLEX_HALF;
291 	}
292 
293 	if (!!(port_cap.autoneg_cap & HINIC_AUTONEG_SUPPORTED))
294 		ethtool_link_ksettings_add_link_mode(link_ksettings,
295 						     advertising, Autoneg);
296 
297 	if (port_cap.autoneg_state == HINIC_AUTONEG_ACTIVE)
298 		link_ksettings->base.autoneg = AUTONEG_ENABLE;
299 
300 	err = hinic_get_link_mode(nic_dev->hwdev, &link_mode);
301 	if (err || link_mode.supported == HINIC_SUPPORTED_UNKNOWN ||
302 	    link_mode.advertised == HINIC_SUPPORTED_UNKNOWN)
303 		return -EIO;
304 
305 	hinic_add_ethtool_link_mode(&settings, link_mode.supported,
306 				    GET_SUPPORTED_MODE);
307 	hinic_add_ethtool_link_mode(&settings, link_mode.advertised,
308 				    GET_ADVERTISED_MODE);
309 
310 	if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
311 		err = hinic_get_hw_pause_info(nic_dev->hwdev, &pause_info);
312 		if (err)
313 			return err;
314 		ETHTOOL_ADD_SUPPORTED_LINK_MODE(&settings, Pause);
315 		if (pause_info.rx_pause && pause_info.tx_pause) {
316 			ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause);
317 		} else if (pause_info.tx_pause) {
318 			ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause);
319 		} else if (pause_info.rx_pause) {
320 			ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause);
321 			ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause);
322 		}
323 	}
324 
325 	bitmap_copy(link_ksettings->link_modes.supported,
326 		    (unsigned long *)&settings.supported,
327 		    __ETHTOOL_LINK_MODE_MASK_NBITS);
328 	bitmap_copy(link_ksettings->link_modes.advertising,
329 		    (unsigned long *)&settings.advertising,
330 		    __ETHTOOL_LINK_MODE_MASK_NBITS);
331 
332 	return 0;
333 }
334 
335 static int hinic_ethtool_to_hw_speed_level(u32 speed)
336 {
337 	int i;
338 
339 	for (i = 0; i < LINK_SPEED_LEVELS; i++) {
340 		if (hw_to_ethtool_speed[i] == speed)
341 			break;
342 	}
343 
344 	return i;
345 }
346 
347 static bool hinic_is_support_speed(enum hinic_link_mode supported_link,
348 				   u32 speed)
349 {
350 	enum hinic_link_mode link_mode;
351 	int idx;
352 
353 	for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) {
354 		if (!(supported_link & ((u32)1 << link_mode)))
355 			continue;
356 
357 		idx = hinic_get_link_mode_index(link_mode);
358 		if (idx >= HINIC_LINK_MODE_NUMBERS)
359 			continue;
360 
361 		if (hw_to_ethtool_link_mode_table[idx].speed == speed)
362 			return true;
363 	}
364 
365 	return false;
366 }
367 
368 static bool hinic_is_speed_legal(struct hinic_dev *nic_dev, u32 speed)
369 {
370 	struct hinic_link_mode_cmd link_mode = { 0 };
371 	struct net_device *netdev = nic_dev->netdev;
372 	enum nic_speed_level speed_level = 0;
373 	int err;
374 
375 	err = hinic_get_link_mode(nic_dev->hwdev, &link_mode);
376 	if (err)
377 		return false;
378 
379 	if (link_mode.supported == HINIC_SUPPORTED_UNKNOWN ||
380 	    link_mode.advertised == HINIC_SUPPORTED_UNKNOWN)
381 		return false;
382 
383 	speed_level = hinic_ethtool_to_hw_speed_level(speed);
384 	if (speed_level >= LINK_SPEED_LEVELS ||
385 	    !hinic_is_support_speed(link_mode.supported, speed)) {
386 		netif_err(nic_dev, drv, netdev,
387 			  "Unsupported speed: %d\n", speed);
388 		return false;
389 	}
390 
391 	return true;
392 }
393 
394 static int get_link_settings_type(struct hinic_dev *nic_dev,
395 				  u8 autoneg, u32 speed, u32 *set_settings)
396 {
397 	struct hinic_port_cap port_cap = { 0 };
398 	int err;
399 
400 	err = hinic_port_get_cap(nic_dev, &port_cap);
401 	if (err)
402 		return err;
403 
404 	/* always set autonegotiation */
405 	if (port_cap.autoneg_cap)
406 		*set_settings |= HILINK_LINK_SET_AUTONEG;
407 
408 	if (autoneg == AUTONEG_ENABLE) {
409 		if (!port_cap.autoneg_cap) {
410 			netif_err(nic_dev, drv, nic_dev->netdev, "Not support autoneg\n");
411 			return -EOPNOTSUPP;
412 		}
413 	} else if (speed != (u32)SPEED_UNKNOWN) {
414 		/* set speed only when autoneg is disabled */
415 		if (!hinic_is_speed_legal(nic_dev, speed))
416 			return -EINVAL;
417 		*set_settings |= HILINK_LINK_SET_SPEED;
418 	} else {
419 		netif_err(nic_dev, drv, nic_dev->netdev, "Need to set speed when autoneg is off\n");
420 		return -EOPNOTSUPP;
421 	}
422 
423 	return 0;
424 }
425 
426 static int set_link_settings_separate_cmd(struct hinic_dev *nic_dev,
427 					  u32 set_settings, u8 autoneg,
428 					  u32 speed)
429 {
430 	enum nic_speed_level speed_level = 0;
431 	int err = 0;
432 
433 	if (set_settings & HILINK_LINK_SET_AUTONEG) {
434 		err = hinic_set_autoneg(nic_dev->hwdev,
435 					(autoneg == AUTONEG_ENABLE));
436 		if (err)
437 			netif_err(nic_dev, drv, nic_dev->netdev, "%s autoneg failed\n",
438 				  (autoneg == AUTONEG_ENABLE) ?
439 				  "Enable" : "Disable");
440 		else
441 			netif_info(nic_dev, drv, nic_dev->netdev, "%s autoneg successfully\n",
442 				   (autoneg == AUTONEG_ENABLE) ?
443 				   "Enable" : "Disable");
444 	}
445 
446 	if (!err && (set_settings & HILINK_LINK_SET_SPEED)) {
447 		speed_level = hinic_ethtool_to_hw_speed_level(speed);
448 		err = hinic_set_speed(nic_dev->hwdev, speed_level);
449 		if (err)
450 			netif_err(nic_dev, drv, nic_dev->netdev, "Set speed %d failed\n",
451 				  speed);
452 		else
453 			netif_info(nic_dev, drv, nic_dev->netdev, "Set speed %d successfully\n",
454 				   speed);
455 	}
456 
457 	return err;
458 }
459 
460 static int hinic_set_settings_to_hw(struct hinic_dev *nic_dev,
461 				    u32 set_settings, u8 autoneg, u32 speed)
462 {
463 	struct hinic_link_ksettings_info settings = {0};
464 	char set_link_str[SET_LINK_STR_MAX_LEN] = {0};
465 	const char *autoneg_str;
466 	struct net_device *netdev = nic_dev->netdev;
467 	enum nic_speed_level speed_level = 0;
468 	int err;
469 
470 	autoneg_str = (set_settings & HILINK_LINK_SET_AUTONEG) ?
471 		      (autoneg ? "autong enable " : "autong disable ") : "";
472 
473 	if (set_settings & HILINK_LINK_SET_SPEED) {
474 		speed_level = hinic_ethtool_to_hw_speed_level(speed);
475 		err = snprintf(set_link_str, SET_LINK_STR_MAX_LEN,
476 			       "speed %d ", speed);
477 		if (err >= SET_LINK_STR_MAX_LEN) {
478 			netif_err(nic_dev, drv, netdev, "Failed to snprintf link speed, function return(%d) and dest_len(%d)\n",
479 				  err, SET_LINK_STR_MAX_LEN);
480 			return -EFAULT;
481 		}
482 	}
483 
484 	settings.func_id = HINIC_HWIF_FUNC_IDX(nic_dev->hwdev->hwif);
485 	settings.valid_bitmap = set_settings;
486 	settings.autoneg = autoneg;
487 	settings.speed = speed_level;
488 
489 	err = hinic_set_link_settings(nic_dev->hwdev, &settings);
490 	if (err != HINIC_MGMT_CMD_UNSUPPORTED) {
491 		if (err)
492 			netif_err(nic_dev, drv, netdev, "Set %s%sfailed\n",
493 				  autoneg_str, set_link_str);
494 		else
495 			netif_info(nic_dev, drv, netdev, "Set %s%ssuccessfully\n",
496 				   autoneg_str, set_link_str);
497 
498 		return err;
499 	}
500 
501 	return set_link_settings_separate_cmd(nic_dev, set_settings, autoneg,
502 					      speed);
503 }
504 
505 static int set_link_settings(struct net_device *netdev, u8 autoneg, u32 speed)
506 {
507 	struct hinic_dev *nic_dev = netdev_priv(netdev);
508 	u32 set_settings = 0;
509 	int err;
510 
511 	err = get_link_settings_type(nic_dev, autoneg, speed, &set_settings);
512 	if (err)
513 		return err;
514 
515 	if (set_settings)
516 		err = hinic_set_settings_to_hw(nic_dev, set_settings,
517 					       autoneg, speed);
518 	else
519 		netif_info(nic_dev, drv, netdev, "Nothing changed, exit without setting anything\n");
520 
521 	return err;
522 }
523 
524 static int hinic_set_link_ksettings(struct net_device *netdev, const struct
525 				    ethtool_link_ksettings *link_settings)
526 {
527 	/* only support to set autoneg and speed */
528 	return set_link_settings(netdev, link_settings->base.autoneg,
529 				 link_settings->base.speed);
530 }
531 
532 static void hinic_get_drvinfo(struct net_device *netdev,
533 			      struct ethtool_drvinfo *info)
534 {
535 	struct hinic_dev *nic_dev = netdev_priv(netdev);
536 	u8 mgmt_ver[HINIC_MGMT_VERSION_MAX_LEN] = {0};
537 	struct hinic_hwdev *hwdev = nic_dev->hwdev;
538 	struct hinic_hwif *hwif = hwdev->hwif;
539 	int err;
540 
541 	strscpy(info->driver, HINIC_DRV_NAME, sizeof(info->driver));
542 	strscpy(info->bus_info, pci_name(hwif->pdev), sizeof(info->bus_info));
543 
544 	err = hinic_get_mgmt_version(nic_dev, mgmt_ver);
545 	if (err)
546 		return;
547 
548 	snprintf(info->fw_version, sizeof(info->fw_version), "%s", mgmt_ver);
549 }
550 
551 static void hinic_get_ringparam(struct net_device *netdev,
552 				struct ethtool_ringparam *ring)
553 {
554 	struct hinic_dev *nic_dev = netdev_priv(netdev);
555 
556 	ring->rx_max_pending = HINIC_MAX_QUEUE_DEPTH;
557 	ring->tx_max_pending = HINIC_MAX_QUEUE_DEPTH;
558 	ring->rx_pending = nic_dev->rq_depth;
559 	ring->tx_pending = nic_dev->sq_depth;
560 }
561 
562 static int check_ringparam_valid(struct hinic_dev *nic_dev,
563 				 struct ethtool_ringparam *ring)
564 {
565 	if (ring->rx_jumbo_pending || ring->rx_mini_pending) {
566 		netif_err(nic_dev, drv, nic_dev->netdev,
567 			  "Unsupported rx_jumbo_pending/rx_mini_pending\n");
568 		return -EINVAL;
569 	}
570 
571 	if (ring->tx_pending > HINIC_MAX_QUEUE_DEPTH ||
572 	    ring->tx_pending < HINIC_MIN_QUEUE_DEPTH ||
573 	    ring->rx_pending > HINIC_MAX_QUEUE_DEPTH ||
574 	    ring->rx_pending < HINIC_MIN_QUEUE_DEPTH) {
575 		netif_err(nic_dev, drv, nic_dev->netdev,
576 			  "Queue depth out of range [%d-%d]\n",
577 			  HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH);
578 		return -EINVAL;
579 	}
580 
581 	return 0;
582 }
583 
584 static int hinic_set_ringparam(struct net_device *netdev,
585 			       struct ethtool_ringparam *ring)
586 {
587 	struct hinic_dev *nic_dev = netdev_priv(netdev);
588 	u16 new_sq_depth, new_rq_depth;
589 	int err;
590 
591 	err = check_ringparam_valid(nic_dev, ring);
592 	if (err)
593 		return err;
594 
595 	new_sq_depth = (u16)(1U << (u16)ilog2(ring->tx_pending));
596 	new_rq_depth = (u16)(1U << (u16)ilog2(ring->rx_pending));
597 
598 	if (new_sq_depth == nic_dev->sq_depth &&
599 	    new_rq_depth == nic_dev->rq_depth)
600 		return 0;
601 
602 	netif_info(nic_dev, drv, netdev,
603 		   "Change Tx/Rx ring depth from %d/%d to %d/%d\n",
604 		   nic_dev->sq_depth, nic_dev->rq_depth,
605 		   new_sq_depth, new_rq_depth);
606 
607 	nic_dev->sq_depth = new_sq_depth;
608 	nic_dev->rq_depth = new_rq_depth;
609 
610 	if (netif_running(netdev)) {
611 		netif_info(nic_dev, drv, netdev, "Restarting netdev\n");
612 		err = hinic_close(netdev);
613 		if (err) {
614 			netif_err(nic_dev, drv, netdev,
615 				  "Failed to close netdev\n");
616 			return -EFAULT;
617 		}
618 
619 		err = hinic_open(netdev);
620 		if (err) {
621 			netif_err(nic_dev, drv, netdev,
622 				  "Failed to open netdev\n");
623 			return -EFAULT;
624 		}
625 	}
626 
627 	return 0;
628 }
629 
630 static int __hinic_get_coalesce(struct net_device *netdev,
631 				struct ethtool_coalesce *coal, u16 queue)
632 {
633 	struct hinic_dev *nic_dev = netdev_priv(netdev);
634 	struct hinic_intr_coal_info *rx_intr_coal_info;
635 	struct hinic_intr_coal_info *tx_intr_coal_info;
636 
637 	if (queue == COALESCE_ALL_QUEUE) {
638 		/* get tx/rx irq0 as default parameters */
639 		rx_intr_coal_info = &nic_dev->rx_intr_coalesce[0];
640 		tx_intr_coal_info = &nic_dev->tx_intr_coalesce[0];
641 	} else {
642 		if (queue >= nic_dev->num_qps) {
643 			netif_err(nic_dev, drv, netdev,
644 				  "Invalid queue_id: %d\n", queue);
645 			return -EINVAL;
646 		}
647 		rx_intr_coal_info = &nic_dev->rx_intr_coalesce[queue];
648 		tx_intr_coal_info = &nic_dev->tx_intr_coalesce[queue];
649 	}
650 
651 	/* coalesce_timer is in unit of 9us */
652 	coal->rx_coalesce_usecs = rx_intr_coal_info->coalesce_timer_cfg *
653 			COALESCE_TIMER_CFG_UNIT;
654 	/* coalesced_frames is in unit of 8 */
655 	coal->rx_max_coalesced_frames = rx_intr_coal_info->pending_limt *
656 			COALESCE_PENDING_LIMIT_UNIT;
657 	coal->tx_coalesce_usecs = tx_intr_coal_info->coalesce_timer_cfg *
658 			COALESCE_TIMER_CFG_UNIT;
659 	coal->tx_max_coalesced_frames = tx_intr_coal_info->pending_limt *
660 			COALESCE_PENDING_LIMIT_UNIT;
661 
662 	return 0;
663 }
664 
665 static int is_coalesce_exceed_limit(const struct ethtool_coalesce *coal)
666 {
667 	if (coal->rx_coalesce_usecs > COALESCE_MAX_TIMER_CFG ||
668 	    coal->rx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT ||
669 	    coal->tx_coalesce_usecs > COALESCE_MAX_TIMER_CFG ||
670 	    coal->tx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT)
671 		return -ERANGE;
672 
673 	return 0;
674 }
675 
676 static int set_queue_coalesce(struct hinic_dev *nic_dev, u16 q_id,
677 			      struct hinic_intr_coal_info *coal,
678 			      bool set_rx_coal)
679 {
680 	struct hinic_intr_coal_info *intr_coal = NULL;
681 	struct hinic_msix_config interrupt_info = {0};
682 	struct net_device *netdev = nic_dev->netdev;
683 	u16 msix_idx;
684 	int err;
685 
686 	intr_coal = set_rx_coal ? &nic_dev->rx_intr_coalesce[q_id] :
687 		    &nic_dev->tx_intr_coalesce[q_id];
688 
689 	intr_coal->coalesce_timer_cfg = coal->coalesce_timer_cfg;
690 	intr_coal->pending_limt = coal->pending_limt;
691 
692 	/* netdev not running or qp not in using,
693 	 * don't need to set coalesce to hw
694 	 */
695 	if (!(nic_dev->flags & HINIC_INTF_UP) ||
696 	    q_id >= nic_dev->num_qps)
697 		return 0;
698 
699 	msix_idx = set_rx_coal ? nic_dev->rxqs[q_id].rq->msix_entry :
700 		   nic_dev->txqs[q_id].sq->msix_entry;
701 	interrupt_info.msix_index = msix_idx;
702 	interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg;
703 	interrupt_info.pending_cnt = intr_coal->pending_limt;
704 	interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg;
705 
706 	err = hinic_set_interrupt_cfg(nic_dev->hwdev, &interrupt_info);
707 	if (err)
708 		netif_warn(nic_dev, drv, netdev,
709 			   "Failed to set %s queue%d coalesce",
710 			   set_rx_coal ? "rx" : "tx", q_id);
711 
712 	return err;
713 }
714 
715 static int __set_hw_coal_param(struct hinic_dev *nic_dev,
716 			       struct hinic_intr_coal_info *intr_coal,
717 			       u16 queue, bool set_rx_coal)
718 {
719 	int err;
720 	u16 i;
721 
722 	if (queue == COALESCE_ALL_QUEUE) {
723 		for (i = 0; i < nic_dev->max_qps; i++) {
724 			err = set_queue_coalesce(nic_dev, i, intr_coal,
725 						 set_rx_coal);
726 			if (err)
727 				return err;
728 		}
729 	} else {
730 		if (queue >= nic_dev->num_qps) {
731 			netif_err(nic_dev, drv, nic_dev->netdev,
732 				  "Invalid queue_id: %d\n", queue);
733 			return -EINVAL;
734 		}
735 		err = set_queue_coalesce(nic_dev, queue, intr_coal,
736 					 set_rx_coal);
737 		if (err)
738 			return err;
739 	}
740 
741 	return 0;
742 }
743 
744 static int __hinic_set_coalesce(struct net_device *netdev,
745 				struct ethtool_coalesce *coal, u16 queue)
746 {
747 	struct hinic_dev *nic_dev = netdev_priv(netdev);
748 	struct hinic_intr_coal_info rx_intr_coal = {0};
749 	struct hinic_intr_coal_info tx_intr_coal = {0};
750 	bool set_rx_coal = false;
751 	bool set_tx_coal = false;
752 	int err;
753 
754 	err = is_coalesce_exceed_limit(coal);
755 	if (err)
756 		return err;
757 
758 	if (coal->rx_coalesce_usecs || coal->rx_max_coalesced_frames) {
759 		rx_intr_coal.coalesce_timer_cfg =
760 		(u8)(coal->rx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT);
761 		rx_intr_coal.pending_limt = (u8)(coal->rx_max_coalesced_frames /
762 				COALESCE_PENDING_LIMIT_UNIT);
763 		set_rx_coal = true;
764 	}
765 
766 	if (coal->tx_coalesce_usecs || coal->tx_max_coalesced_frames) {
767 		tx_intr_coal.coalesce_timer_cfg =
768 		(u8)(coal->tx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT);
769 		tx_intr_coal.pending_limt = (u8)(coal->tx_max_coalesced_frames /
770 		COALESCE_PENDING_LIMIT_UNIT);
771 		set_tx_coal = true;
772 	}
773 
774 	/* setting coalesce timer or pending limit to zero will disable
775 	 * coalesce
776 	 */
777 	if (set_rx_coal && (!rx_intr_coal.coalesce_timer_cfg ||
778 			    !rx_intr_coal.pending_limt))
779 		netif_warn(nic_dev, drv, netdev, "RX coalesce will be disabled\n");
780 	if (set_tx_coal && (!tx_intr_coal.coalesce_timer_cfg ||
781 			    !tx_intr_coal.pending_limt))
782 		netif_warn(nic_dev, drv, netdev, "TX coalesce will be disabled\n");
783 
784 	if (set_rx_coal) {
785 		err = __set_hw_coal_param(nic_dev, &rx_intr_coal, queue, true);
786 		if (err)
787 			return err;
788 	}
789 	if (set_tx_coal) {
790 		err = __set_hw_coal_param(nic_dev, &tx_intr_coal, queue, false);
791 		if (err)
792 			return err;
793 	}
794 	return 0;
795 }
796 
797 static int hinic_get_coalesce(struct net_device *netdev,
798 			      struct ethtool_coalesce *coal,
799 			      struct kernel_ethtool_coalesce *kernel_coal,
800 			      struct netlink_ext_ack *extack)
801 {
802 	return __hinic_get_coalesce(netdev, coal, COALESCE_ALL_QUEUE);
803 }
804 
805 static int hinic_set_coalesce(struct net_device *netdev,
806 			      struct ethtool_coalesce *coal,
807 			      struct kernel_ethtool_coalesce *kernel_coal,
808 			      struct netlink_ext_ack *extack)
809 {
810 	return __hinic_set_coalesce(netdev, coal, COALESCE_ALL_QUEUE);
811 }
812 
813 static int hinic_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
814 					struct ethtool_coalesce *coal)
815 {
816 	return __hinic_get_coalesce(netdev, coal, queue);
817 }
818 
819 static int hinic_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
820 					struct ethtool_coalesce *coal)
821 {
822 	return __hinic_set_coalesce(netdev, coal, queue);
823 }
824 
825 static void hinic_get_pauseparam(struct net_device *netdev,
826 				 struct ethtool_pauseparam *pause)
827 {
828 	struct hinic_dev *nic_dev = netdev_priv(netdev);
829 	struct hinic_pause_config pause_info = {0};
830 	struct hinic_nic_cfg *nic_cfg;
831 	int err;
832 
833 	nic_cfg = &nic_dev->hwdev->func_to_io.nic_cfg;
834 
835 	err = hinic_get_hw_pause_info(nic_dev->hwdev, &pause_info);
836 	if (!err) {
837 		pause->autoneg = pause_info.auto_neg;
838 		if (nic_cfg->pause_set || !pause_info.auto_neg) {
839 			pause->rx_pause = nic_cfg->rx_pause;
840 			pause->tx_pause = nic_cfg->tx_pause;
841 		} else {
842 			pause->rx_pause = pause_info.rx_pause;
843 			pause->tx_pause = pause_info.tx_pause;
844 		}
845 	}
846 }
847 
848 static int hinic_set_pauseparam(struct net_device *netdev,
849 				struct ethtool_pauseparam *pause)
850 {
851 	struct hinic_dev *nic_dev = netdev_priv(netdev);
852 	struct hinic_pause_config pause_info = {0};
853 	struct hinic_port_cap port_cap = {0};
854 	int err;
855 
856 	err = hinic_port_get_cap(nic_dev, &port_cap);
857 	if (err)
858 		return -EIO;
859 
860 	if (pause->autoneg != port_cap.autoneg_state)
861 		return -EOPNOTSUPP;
862 
863 	pause_info.auto_neg = pause->autoneg;
864 	pause_info.rx_pause = pause->rx_pause;
865 	pause_info.tx_pause = pause->tx_pause;
866 
867 	mutex_lock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex);
868 	err = hinic_set_hw_pause_info(nic_dev->hwdev, &pause_info);
869 	if (err) {
870 		mutex_unlock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex);
871 		return err;
872 	}
873 	nic_dev->hwdev->func_to_io.nic_cfg.pause_set = true;
874 	nic_dev->hwdev->func_to_io.nic_cfg.auto_neg = pause->autoneg;
875 	nic_dev->hwdev->func_to_io.nic_cfg.rx_pause = pause->rx_pause;
876 	nic_dev->hwdev->func_to_io.nic_cfg.tx_pause = pause->tx_pause;
877 	mutex_unlock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex);
878 
879 	return 0;
880 }
881 
882 static void hinic_get_channels(struct net_device *netdev,
883 			       struct ethtool_channels *channels)
884 {
885 	struct hinic_dev *nic_dev = netdev_priv(netdev);
886 	struct hinic_hwdev *hwdev = nic_dev->hwdev;
887 
888 	channels->max_combined = nic_dev->max_qps;
889 	channels->combined_count = hinic_hwdev_num_qps(hwdev);
890 }
891 
892 static int hinic_set_channels(struct net_device *netdev,
893 			      struct ethtool_channels *channels)
894 {
895 	struct hinic_dev *nic_dev = netdev_priv(netdev);
896 	unsigned int count = channels->combined_count;
897 	int err;
898 
899 	netif_info(nic_dev, drv, netdev, "Set max combined queue number from %d to %d\n",
900 		   hinic_hwdev_num_qps(nic_dev->hwdev), count);
901 
902 	if (netif_running(netdev)) {
903 		netif_info(nic_dev, drv, netdev, "Restarting netdev\n");
904 		hinic_close(netdev);
905 
906 		nic_dev->hwdev->nic_cap.num_qps = count;
907 
908 		err = hinic_open(netdev);
909 		if (err) {
910 			netif_err(nic_dev, drv, netdev,
911 				  "Failed to open netdev\n");
912 			return -EFAULT;
913 		}
914 	} else {
915 		nic_dev->hwdev->nic_cap.num_qps = count;
916 	}
917 
918 	return 0;
919 }
920 
921 static int hinic_get_rss_hash_opts(struct hinic_dev *nic_dev,
922 				   struct ethtool_rxnfc *cmd)
923 {
924 	struct hinic_rss_type rss_type = { 0 };
925 	int err;
926 
927 	cmd->data = 0;
928 
929 	if (!(nic_dev->flags & HINIC_RSS_ENABLE))
930 		return 0;
931 
932 	err = hinic_get_rss_type(nic_dev, nic_dev->rss_tmpl_idx,
933 				 &rss_type);
934 	if (err)
935 		return err;
936 
937 	cmd->data = RXH_IP_SRC | RXH_IP_DST;
938 	switch (cmd->flow_type) {
939 	case TCP_V4_FLOW:
940 		if (rss_type.tcp_ipv4)
941 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
942 		break;
943 	case TCP_V6_FLOW:
944 		if (rss_type.tcp_ipv6)
945 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
946 		break;
947 	case UDP_V4_FLOW:
948 		if (rss_type.udp_ipv4)
949 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
950 		break;
951 	case UDP_V6_FLOW:
952 		if (rss_type.udp_ipv6)
953 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
954 		break;
955 	case IPV4_FLOW:
956 	case IPV6_FLOW:
957 		break;
958 	default:
959 		cmd->data = 0;
960 		return -EINVAL;
961 	}
962 
963 	return 0;
964 }
965 
966 static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd,
967 			       struct hinic_rss_type *rss_type)
968 {
969 	u8 rss_l4_en = 0;
970 
971 	switch (cmd->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
972 	case 0:
973 		rss_l4_en = 0;
974 		break;
975 	case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
976 		rss_l4_en = 1;
977 		break;
978 	default:
979 		return -EINVAL;
980 	}
981 
982 	switch (cmd->flow_type) {
983 	case TCP_V4_FLOW:
984 		rss_type->tcp_ipv4 = rss_l4_en;
985 		break;
986 	case TCP_V6_FLOW:
987 		rss_type->tcp_ipv6 = rss_l4_en;
988 		break;
989 	case UDP_V4_FLOW:
990 		rss_type->udp_ipv4 = rss_l4_en;
991 		break;
992 	case UDP_V6_FLOW:
993 		rss_type->udp_ipv6 = rss_l4_en;
994 		break;
995 	default:
996 		return -EINVAL;
997 	}
998 
999 	return 0;
1000 }
1001 
1002 static int hinic_set_rss_hash_opts(struct hinic_dev *nic_dev,
1003 				   struct ethtool_rxnfc *cmd)
1004 {
1005 	struct hinic_rss_type *rss_type = &nic_dev->rss_type;
1006 	int err;
1007 
1008 	if (!(nic_dev->flags & HINIC_RSS_ENABLE)) {
1009 		cmd->data = 0;
1010 		return -EOPNOTSUPP;
1011 	}
1012 
1013 	/* RSS does not support anything other than hashing
1014 	 * to queues on src and dst IPs and ports
1015 	 */
1016 	if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 |
1017 		RXH_L4_B_2_3))
1018 		return -EINVAL;
1019 
1020 	/* We need at least the IP SRC and DEST fields for hashing */
1021 	if (!(cmd->data & RXH_IP_SRC) || !(cmd->data & RXH_IP_DST))
1022 		return -EINVAL;
1023 
1024 	err = hinic_get_rss_type(nic_dev,
1025 				 nic_dev->rss_tmpl_idx, rss_type);
1026 	if (err)
1027 		return -EFAULT;
1028 
1029 	switch (cmd->flow_type) {
1030 	case TCP_V4_FLOW:
1031 	case TCP_V6_FLOW:
1032 	case UDP_V4_FLOW:
1033 	case UDP_V6_FLOW:
1034 		err = set_l4_rss_hash_ops(cmd, rss_type);
1035 		if (err)
1036 			return err;
1037 		break;
1038 	case IPV4_FLOW:
1039 		rss_type->ipv4 = 1;
1040 		break;
1041 	case IPV6_FLOW:
1042 		rss_type->ipv6 = 1;
1043 		break;
1044 	default:
1045 		return -EINVAL;
1046 	}
1047 
1048 	err = hinic_set_rss_type(nic_dev, nic_dev->rss_tmpl_idx,
1049 				 *rss_type);
1050 	if (err)
1051 		return -EFAULT;
1052 
1053 	return 0;
1054 }
1055 
1056 static int __set_rss_rxfh(struct net_device *netdev,
1057 			  const u32 *indir, const u8 *key)
1058 {
1059 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1060 	int err;
1061 
1062 	if (indir) {
1063 		if (!nic_dev->rss_indir_user) {
1064 			nic_dev->rss_indir_user =
1065 				kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE,
1066 					GFP_KERNEL);
1067 			if (!nic_dev->rss_indir_user)
1068 				return -ENOMEM;
1069 		}
1070 
1071 		memcpy(nic_dev->rss_indir_user, indir,
1072 		       sizeof(u32) * HINIC_RSS_INDIR_SIZE);
1073 
1074 		err = hinic_rss_set_indir_tbl(nic_dev,
1075 					      nic_dev->rss_tmpl_idx, indir);
1076 		if (err)
1077 			return -EFAULT;
1078 	}
1079 
1080 	if (key) {
1081 		if (!nic_dev->rss_hkey_user) {
1082 			nic_dev->rss_hkey_user =
1083 				kzalloc(HINIC_RSS_KEY_SIZE * 2, GFP_KERNEL);
1084 
1085 			if (!nic_dev->rss_hkey_user)
1086 				return -ENOMEM;
1087 		}
1088 
1089 		memcpy(nic_dev->rss_hkey_user, key, HINIC_RSS_KEY_SIZE);
1090 
1091 		err = hinic_rss_set_template_tbl(nic_dev,
1092 						 nic_dev->rss_tmpl_idx, key);
1093 		if (err)
1094 			return -EFAULT;
1095 	}
1096 
1097 	return 0;
1098 }
1099 
1100 static int hinic_get_rxnfc(struct net_device *netdev,
1101 			   struct ethtool_rxnfc *cmd, u32 *rule_locs)
1102 {
1103 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1104 	int err = 0;
1105 
1106 	switch (cmd->cmd) {
1107 	case ETHTOOL_GRXRINGS:
1108 		cmd->data = nic_dev->num_qps;
1109 		break;
1110 	case ETHTOOL_GRXFH:
1111 		err = hinic_get_rss_hash_opts(nic_dev, cmd);
1112 		break;
1113 	default:
1114 		err = -EOPNOTSUPP;
1115 		break;
1116 	}
1117 
1118 	return err;
1119 }
1120 
1121 static int hinic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1122 {
1123 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1124 	int err = 0;
1125 
1126 	switch (cmd->cmd) {
1127 	case ETHTOOL_SRXFH:
1128 		err = hinic_set_rss_hash_opts(nic_dev, cmd);
1129 		break;
1130 	default:
1131 		err = -EOPNOTSUPP;
1132 		break;
1133 	}
1134 
1135 	return err;
1136 }
1137 
1138 static int hinic_get_rxfh(struct net_device *netdev,
1139 			  u32 *indir, u8 *key, u8 *hfunc)
1140 {
1141 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1142 	u8 hash_engine_type = 0;
1143 	int err = 0;
1144 
1145 	if (!(nic_dev->flags & HINIC_RSS_ENABLE))
1146 		return -EOPNOTSUPP;
1147 
1148 	if (hfunc) {
1149 		err = hinic_rss_get_hash_engine(nic_dev,
1150 						nic_dev->rss_tmpl_idx,
1151 						&hash_engine_type);
1152 		if (err)
1153 			return -EFAULT;
1154 
1155 		*hfunc = hash_engine_type ? ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR;
1156 	}
1157 
1158 	if (indir) {
1159 		err = hinic_rss_get_indir_tbl(nic_dev,
1160 					      nic_dev->rss_tmpl_idx, indir);
1161 		if (err)
1162 			return -EFAULT;
1163 	}
1164 
1165 	if (key)
1166 		err = hinic_rss_get_template_tbl(nic_dev,
1167 						 nic_dev->rss_tmpl_idx, key);
1168 
1169 	return err;
1170 }
1171 
1172 static int hinic_set_rxfh(struct net_device *netdev, const u32 *indir,
1173 			  const u8 *key, const u8 hfunc)
1174 {
1175 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1176 	int err = 0;
1177 
1178 	if (!(nic_dev->flags & HINIC_RSS_ENABLE))
1179 		return -EOPNOTSUPP;
1180 
1181 	if (hfunc != ETH_RSS_HASH_NO_CHANGE) {
1182 		if (hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)
1183 			return -EOPNOTSUPP;
1184 
1185 		nic_dev->rss_hash_engine = (hfunc == ETH_RSS_HASH_XOR) ?
1186 			HINIC_RSS_HASH_ENGINE_TYPE_XOR :
1187 			HINIC_RSS_HASH_ENGINE_TYPE_TOEP;
1188 		err = hinic_rss_set_hash_engine
1189 			(nic_dev, nic_dev->rss_tmpl_idx,
1190 			nic_dev->rss_hash_engine);
1191 		if (err)
1192 			return -EFAULT;
1193 	}
1194 
1195 	err = __set_rss_rxfh(netdev, indir, key);
1196 
1197 	return err;
1198 }
1199 
1200 static u32 hinic_get_rxfh_key_size(struct net_device *netdev)
1201 {
1202 	return HINIC_RSS_KEY_SIZE;
1203 }
1204 
1205 static u32 hinic_get_rxfh_indir_size(struct net_device *netdev)
1206 {
1207 	return HINIC_RSS_INDIR_SIZE;
1208 }
1209 
1210 #define ARRAY_LEN(arr) ((int)((int)sizeof(arr) / (int)sizeof(arr[0])))
1211 
1212 #define HINIC_FUNC_STAT(_stat_item) {	\
1213 	.name = #_stat_item, \
1214 	.size = sizeof_field(struct hinic_vport_stats, _stat_item), \
1215 	.offset = offsetof(struct hinic_vport_stats, _stat_item) \
1216 }
1217 
1218 static struct hinic_stats hinic_function_stats[] = {
1219 	HINIC_FUNC_STAT(tx_unicast_pkts_vport),
1220 	HINIC_FUNC_STAT(tx_unicast_bytes_vport),
1221 	HINIC_FUNC_STAT(tx_multicast_pkts_vport),
1222 	HINIC_FUNC_STAT(tx_multicast_bytes_vport),
1223 	HINIC_FUNC_STAT(tx_broadcast_pkts_vport),
1224 	HINIC_FUNC_STAT(tx_broadcast_bytes_vport),
1225 
1226 	HINIC_FUNC_STAT(rx_unicast_pkts_vport),
1227 	HINIC_FUNC_STAT(rx_unicast_bytes_vport),
1228 	HINIC_FUNC_STAT(rx_multicast_pkts_vport),
1229 	HINIC_FUNC_STAT(rx_multicast_bytes_vport),
1230 	HINIC_FUNC_STAT(rx_broadcast_pkts_vport),
1231 	HINIC_FUNC_STAT(rx_broadcast_bytes_vport),
1232 
1233 	HINIC_FUNC_STAT(tx_discard_vport),
1234 	HINIC_FUNC_STAT(rx_discard_vport),
1235 	HINIC_FUNC_STAT(tx_err_vport),
1236 	HINIC_FUNC_STAT(rx_err_vport),
1237 };
1238 
1239 static char hinic_test_strings[][ETH_GSTRING_LEN] = {
1240 	"Internal lb test  (on/offline)",
1241 	"External lb test (external_lb)",
1242 };
1243 
1244 #define HINIC_PORT_STAT(_stat_item) { \
1245 	.name = #_stat_item, \
1246 	.size = sizeof_field(struct hinic_phy_port_stats, _stat_item), \
1247 	.offset = offsetof(struct hinic_phy_port_stats, _stat_item) \
1248 }
1249 
1250 static struct hinic_stats hinic_port_stats[] = {
1251 	HINIC_PORT_STAT(mac_rx_total_pkt_num),
1252 	HINIC_PORT_STAT(mac_rx_total_oct_num),
1253 	HINIC_PORT_STAT(mac_rx_bad_pkt_num),
1254 	HINIC_PORT_STAT(mac_rx_bad_oct_num),
1255 	HINIC_PORT_STAT(mac_rx_good_pkt_num),
1256 	HINIC_PORT_STAT(mac_rx_good_oct_num),
1257 	HINIC_PORT_STAT(mac_rx_uni_pkt_num),
1258 	HINIC_PORT_STAT(mac_rx_multi_pkt_num),
1259 	HINIC_PORT_STAT(mac_rx_broad_pkt_num),
1260 	HINIC_PORT_STAT(mac_tx_total_pkt_num),
1261 	HINIC_PORT_STAT(mac_tx_total_oct_num),
1262 	HINIC_PORT_STAT(mac_tx_bad_pkt_num),
1263 	HINIC_PORT_STAT(mac_tx_bad_oct_num),
1264 	HINIC_PORT_STAT(mac_tx_good_pkt_num),
1265 	HINIC_PORT_STAT(mac_tx_good_oct_num),
1266 	HINIC_PORT_STAT(mac_tx_uni_pkt_num),
1267 	HINIC_PORT_STAT(mac_tx_multi_pkt_num),
1268 	HINIC_PORT_STAT(mac_tx_broad_pkt_num),
1269 	HINIC_PORT_STAT(mac_rx_fragment_pkt_num),
1270 	HINIC_PORT_STAT(mac_rx_undersize_pkt_num),
1271 	HINIC_PORT_STAT(mac_rx_undermin_pkt_num),
1272 	HINIC_PORT_STAT(mac_rx_64_oct_pkt_num),
1273 	HINIC_PORT_STAT(mac_rx_65_127_oct_pkt_num),
1274 	HINIC_PORT_STAT(mac_rx_128_255_oct_pkt_num),
1275 	HINIC_PORT_STAT(mac_rx_256_511_oct_pkt_num),
1276 	HINIC_PORT_STAT(mac_rx_512_1023_oct_pkt_num),
1277 	HINIC_PORT_STAT(mac_rx_1024_1518_oct_pkt_num),
1278 	HINIC_PORT_STAT(mac_rx_1519_2047_oct_pkt_num),
1279 	HINIC_PORT_STAT(mac_rx_2048_4095_oct_pkt_num),
1280 	HINIC_PORT_STAT(mac_rx_4096_8191_oct_pkt_num),
1281 	HINIC_PORT_STAT(mac_rx_8192_9216_oct_pkt_num),
1282 	HINIC_PORT_STAT(mac_rx_9217_12287_oct_pkt_num),
1283 	HINIC_PORT_STAT(mac_rx_12288_16383_oct_pkt_num),
1284 	HINIC_PORT_STAT(mac_rx_1519_max_good_pkt_num),
1285 	HINIC_PORT_STAT(mac_rx_1519_max_bad_pkt_num),
1286 	HINIC_PORT_STAT(mac_rx_oversize_pkt_num),
1287 	HINIC_PORT_STAT(mac_rx_jabber_pkt_num),
1288 	HINIC_PORT_STAT(mac_rx_pause_num),
1289 	HINIC_PORT_STAT(mac_rx_pfc_pkt_num),
1290 	HINIC_PORT_STAT(mac_rx_pfc_pri0_pkt_num),
1291 	HINIC_PORT_STAT(mac_rx_pfc_pri1_pkt_num),
1292 	HINIC_PORT_STAT(mac_rx_pfc_pri2_pkt_num),
1293 	HINIC_PORT_STAT(mac_rx_pfc_pri3_pkt_num),
1294 	HINIC_PORT_STAT(mac_rx_pfc_pri4_pkt_num),
1295 	HINIC_PORT_STAT(mac_rx_pfc_pri5_pkt_num),
1296 	HINIC_PORT_STAT(mac_rx_pfc_pri6_pkt_num),
1297 	HINIC_PORT_STAT(mac_rx_pfc_pri7_pkt_num),
1298 	HINIC_PORT_STAT(mac_rx_control_pkt_num),
1299 	HINIC_PORT_STAT(mac_rx_sym_err_pkt_num),
1300 	HINIC_PORT_STAT(mac_rx_fcs_err_pkt_num),
1301 	HINIC_PORT_STAT(mac_rx_send_app_good_pkt_num),
1302 	HINIC_PORT_STAT(mac_rx_send_app_bad_pkt_num),
1303 	HINIC_PORT_STAT(mac_tx_fragment_pkt_num),
1304 	HINIC_PORT_STAT(mac_tx_undersize_pkt_num),
1305 	HINIC_PORT_STAT(mac_tx_undermin_pkt_num),
1306 	HINIC_PORT_STAT(mac_tx_64_oct_pkt_num),
1307 	HINIC_PORT_STAT(mac_tx_65_127_oct_pkt_num),
1308 	HINIC_PORT_STAT(mac_tx_128_255_oct_pkt_num),
1309 	HINIC_PORT_STAT(mac_tx_256_511_oct_pkt_num),
1310 	HINIC_PORT_STAT(mac_tx_512_1023_oct_pkt_num),
1311 	HINIC_PORT_STAT(mac_tx_1024_1518_oct_pkt_num),
1312 	HINIC_PORT_STAT(mac_tx_1519_2047_oct_pkt_num),
1313 	HINIC_PORT_STAT(mac_tx_2048_4095_oct_pkt_num),
1314 	HINIC_PORT_STAT(mac_tx_4096_8191_oct_pkt_num),
1315 	HINIC_PORT_STAT(mac_tx_8192_9216_oct_pkt_num),
1316 	HINIC_PORT_STAT(mac_tx_9217_12287_oct_pkt_num),
1317 	HINIC_PORT_STAT(mac_tx_12288_16383_oct_pkt_num),
1318 	HINIC_PORT_STAT(mac_tx_1519_max_good_pkt_num),
1319 	HINIC_PORT_STAT(mac_tx_1519_max_bad_pkt_num),
1320 	HINIC_PORT_STAT(mac_tx_oversize_pkt_num),
1321 	HINIC_PORT_STAT(mac_tx_jabber_pkt_num),
1322 	HINIC_PORT_STAT(mac_tx_pause_num),
1323 	HINIC_PORT_STAT(mac_tx_pfc_pkt_num),
1324 	HINIC_PORT_STAT(mac_tx_pfc_pri0_pkt_num),
1325 	HINIC_PORT_STAT(mac_tx_pfc_pri1_pkt_num),
1326 	HINIC_PORT_STAT(mac_tx_pfc_pri2_pkt_num),
1327 	HINIC_PORT_STAT(mac_tx_pfc_pri3_pkt_num),
1328 	HINIC_PORT_STAT(mac_tx_pfc_pri4_pkt_num),
1329 	HINIC_PORT_STAT(mac_tx_pfc_pri5_pkt_num),
1330 	HINIC_PORT_STAT(mac_tx_pfc_pri6_pkt_num),
1331 	HINIC_PORT_STAT(mac_tx_pfc_pri7_pkt_num),
1332 	HINIC_PORT_STAT(mac_tx_control_pkt_num),
1333 	HINIC_PORT_STAT(mac_tx_err_all_pkt_num),
1334 	HINIC_PORT_STAT(mac_tx_from_app_good_pkt_num),
1335 	HINIC_PORT_STAT(mac_tx_from_app_bad_pkt_num),
1336 };
1337 
1338 #define HINIC_TXQ_STAT(_stat_item) { \
1339 	.name = "txq%d_"#_stat_item, \
1340 	.size = sizeof_field(struct hinic_txq_stats, _stat_item), \
1341 	.offset = offsetof(struct hinic_txq_stats, _stat_item) \
1342 }
1343 
1344 static struct hinic_stats hinic_tx_queue_stats[] = {
1345 	HINIC_TXQ_STAT(pkts),
1346 	HINIC_TXQ_STAT(bytes),
1347 	HINIC_TXQ_STAT(tx_busy),
1348 	HINIC_TXQ_STAT(tx_wake),
1349 	HINIC_TXQ_STAT(tx_dropped),
1350 	HINIC_TXQ_STAT(big_frags_pkts),
1351 };
1352 
1353 #define HINIC_RXQ_STAT(_stat_item) { \
1354 	.name = "rxq%d_"#_stat_item, \
1355 	.size = sizeof_field(struct hinic_rxq_stats, _stat_item), \
1356 	.offset = offsetof(struct hinic_rxq_stats, _stat_item) \
1357 }
1358 
1359 static struct hinic_stats hinic_rx_queue_stats[] = {
1360 	HINIC_RXQ_STAT(pkts),
1361 	HINIC_RXQ_STAT(bytes),
1362 	HINIC_RXQ_STAT(errors),
1363 	HINIC_RXQ_STAT(csum_errors),
1364 	HINIC_RXQ_STAT(other_errors),
1365 };
1366 
1367 static void get_drv_queue_stats(struct hinic_dev *nic_dev, u64 *data)
1368 {
1369 	struct hinic_txq_stats txq_stats;
1370 	struct hinic_rxq_stats rxq_stats;
1371 	u16 i = 0, j = 0, qid = 0;
1372 	char *p;
1373 
1374 	for (qid = 0; qid < nic_dev->num_qps; qid++) {
1375 		if (!nic_dev->txqs)
1376 			break;
1377 
1378 		hinic_txq_get_stats(&nic_dev->txqs[qid], &txq_stats);
1379 		for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++, i++) {
1380 			p = (char *)&txq_stats +
1381 				hinic_tx_queue_stats[j].offset;
1382 			data[i] = (hinic_tx_queue_stats[j].size ==
1383 					sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1384 		}
1385 	}
1386 
1387 	for (qid = 0; qid < nic_dev->num_qps; qid++) {
1388 		if (!nic_dev->rxqs)
1389 			break;
1390 
1391 		hinic_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats);
1392 		for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++, i++) {
1393 			p = (char *)&rxq_stats +
1394 				hinic_rx_queue_stats[j].offset;
1395 			data[i] = (hinic_rx_queue_stats[j].size ==
1396 					sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1397 		}
1398 	}
1399 }
1400 
1401 static void hinic_get_ethtool_stats(struct net_device *netdev,
1402 				    struct ethtool_stats *stats, u64 *data)
1403 {
1404 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1405 	struct hinic_vport_stats vport_stats = {0};
1406 	struct hinic_phy_port_stats *port_stats;
1407 	u16 i = 0, j = 0;
1408 	char *p;
1409 	int err;
1410 
1411 	err = hinic_get_vport_stats(nic_dev, &vport_stats);
1412 	if (err)
1413 		netif_err(nic_dev, drv, netdev,
1414 			  "Failed to get vport stats from firmware\n");
1415 
1416 	for (j = 0; j < ARRAY_LEN(hinic_function_stats); j++, i++) {
1417 		p = (char *)&vport_stats + hinic_function_stats[j].offset;
1418 		data[i] = (hinic_function_stats[j].size ==
1419 				sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1420 	}
1421 
1422 	port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL);
1423 	if (!port_stats) {
1424 		memset(&data[i], 0,
1425 		       ARRAY_LEN(hinic_port_stats) * sizeof(*data));
1426 		i += ARRAY_LEN(hinic_port_stats);
1427 		goto get_drv_stats;
1428 	}
1429 
1430 	err = hinic_get_phy_port_stats(nic_dev, port_stats);
1431 	if (err)
1432 		netif_err(nic_dev, drv, netdev,
1433 			  "Failed to get port stats from firmware\n");
1434 
1435 	for (j = 0; j < ARRAY_LEN(hinic_port_stats); j++, i++) {
1436 		p = (char *)port_stats + hinic_port_stats[j].offset;
1437 		data[i] = (hinic_port_stats[j].size ==
1438 				sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1439 	}
1440 
1441 	kfree(port_stats);
1442 
1443 get_drv_stats:
1444 	get_drv_queue_stats(nic_dev, data + i);
1445 }
1446 
1447 static int hinic_get_sset_count(struct net_device *netdev, int sset)
1448 {
1449 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1450 	int count, q_num;
1451 
1452 	switch (sset) {
1453 	case ETH_SS_TEST:
1454 		return ARRAY_LEN(hinic_test_strings);
1455 	case ETH_SS_STATS:
1456 		q_num = nic_dev->num_qps;
1457 		count = ARRAY_LEN(hinic_function_stats) +
1458 			(ARRAY_LEN(hinic_tx_queue_stats) +
1459 			ARRAY_LEN(hinic_rx_queue_stats)) * q_num;
1460 
1461 		count += ARRAY_LEN(hinic_port_stats);
1462 
1463 		return count;
1464 	default:
1465 		return -EOPNOTSUPP;
1466 	}
1467 }
1468 
1469 static void hinic_get_strings(struct net_device *netdev,
1470 			      u32 stringset, u8 *data)
1471 {
1472 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1473 	char *p = (char *)data;
1474 	u16 i, j;
1475 
1476 	switch (stringset) {
1477 	case ETH_SS_TEST:
1478 		memcpy(data, *hinic_test_strings, sizeof(hinic_test_strings));
1479 		return;
1480 	case ETH_SS_STATS:
1481 		for (i = 0; i < ARRAY_LEN(hinic_function_stats); i++) {
1482 			memcpy(p, hinic_function_stats[i].name,
1483 			       ETH_GSTRING_LEN);
1484 			p += ETH_GSTRING_LEN;
1485 		}
1486 
1487 		for (i = 0; i < ARRAY_LEN(hinic_port_stats); i++) {
1488 			memcpy(p, hinic_port_stats[i].name,
1489 			       ETH_GSTRING_LEN);
1490 			p += ETH_GSTRING_LEN;
1491 		}
1492 
1493 		for (i = 0; i < nic_dev->num_qps; i++) {
1494 			for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++) {
1495 				sprintf(p, hinic_tx_queue_stats[j].name, i);
1496 				p += ETH_GSTRING_LEN;
1497 			}
1498 		}
1499 
1500 		for (i = 0; i < nic_dev->num_qps; i++) {
1501 			for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++) {
1502 				sprintf(p, hinic_rx_queue_stats[j].name, i);
1503 				p += ETH_GSTRING_LEN;
1504 			}
1505 		}
1506 
1507 		return;
1508 	default:
1509 		return;
1510 	}
1511 }
1512 
1513 static int hinic_run_lp_test(struct hinic_dev *nic_dev, u32 test_time)
1514 {
1515 	u8 *lb_test_rx_buf = nic_dev->lb_test_rx_buf;
1516 	struct net_device *netdev = nic_dev->netdev;
1517 	struct sk_buff *skb_tmp = NULL;
1518 	struct sk_buff *skb = NULL;
1519 	u32 cnt = test_time * 5;
1520 	u8 *test_data = NULL;
1521 	u32 i;
1522 	u8 j;
1523 
1524 	skb_tmp = alloc_skb(LP_PKT_LEN, GFP_ATOMIC);
1525 	if (!skb_tmp)
1526 		return -ENOMEM;
1527 
1528 	test_data = __skb_put(skb_tmp, LP_PKT_LEN);
1529 
1530 	memset(test_data, 0xFF, 2 * ETH_ALEN);
1531 	test_data[ETH_ALEN] = 0xFE;
1532 	test_data[2 * ETH_ALEN] = 0x08;
1533 	test_data[2 * ETH_ALEN + 1] = 0x0;
1534 
1535 	for (i = ETH_HLEN; i < LP_PKT_LEN; i++)
1536 		test_data[i] = i & 0xFF;
1537 
1538 	skb_tmp->queue_mapping = 0;
1539 	skb_tmp->ip_summed = CHECKSUM_COMPLETE;
1540 	skb_tmp->dev = netdev;
1541 
1542 	for (i = 0; i < cnt; i++) {
1543 		nic_dev->lb_test_rx_idx = 0;
1544 		memset(lb_test_rx_buf, 0, LP_PKT_CNT * LP_PKT_LEN);
1545 
1546 		for (j = 0; j < LP_PKT_CNT; j++) {
1547 			skb = pskb_copy(skb_tmp, GFP_ATOMIC);
1548 			if (!skb) {
1549 				dev_kfree_skb_any(skb_tmp);
1550 				netif_err(nic_dev, drv, netdev,
1551 					  "Copy skb failed for loopback test\n");
1552 				return -ENOMEM;
1553 			}
1554 
1555 			/* mark index for every pkt */
1556 			skb->data[LP_PKT_LEN - 1] = j;
1557 
1558 			if (hinic_lb_xmit_frame(skb, netdev)) {
1559 				dev_kfree_skb_any(skb);
1560 				dev_kfree_skb_any(skb_tmp);
1561 				netif_err(nic_dev, drv, netdev,
1562 					  "Xmit pkt failed for loopback test\n");
1563 				return -EBUSY;
1564 			}
1565 		}
1566 
1567 		/* wait till all pkts received to RX buffer */
1568 		msleep(200);
1569 
1570 		for (j = 0; j < LP_PKT_CNT; j++) {
1571 			if (memcmp(lb_test_rx_buf + j * LP_PKT_LEN,
1572 				   skb_tmp->data, LP_PKT_LEN - 1) ||
1573 			    (*(lb_test_rx_buf + j * LP_PKT_LEN +
1574 			     LP_PKT_LEN - 1) != j)) {
1575 				dev_kfree_skb_any(skb_tmp);
1576 				netif_err(nic_dev, drv, netdev,
1577 					  "Compare pkt failed in loopback test(index=0x%02x, data[%d]=0x%02x)\n",
1578 					  j + i * LP_PKT_CNT,
1579 					  LP_PKT_LEN - 1,
1580 					  *(lb_test_rx_buf + j * LP_PKT_LEN +
1581 					    LP_PKT_LEN - 1));
1582 				return -EIO;
1583 			}
1584 		}
1585 	}
1586 
1587 	dev_kfree_skb_any(skb_tmp);
1588 	return 0;
1589 }
1590 
1591 static int do_lp_test(struct hinic_dev *nic_dev, u32 flags, u32 test_time,
1592 		      enum diag_test_index *test_index)
1593 {
1594 	struct net_device *netdev = nic_dev->netdev;
1595 	u8 *lb_test_rx_buf = NULL;
1596 	int err = 0;
1597 
1598 	if (!(flags & ETH_TEST_FL_EXTERNAL_LB)) {
1599 		*test_index = INTERNAL_LP_TEST;
1600 		if (hinic_set_loopback_mode(nic_dev->hwdev,
1601 					    HINIC_INTERNAL_LP_MODE, true)) {
1602 			netif_err(nic_dev, drv, netdev,
1603 				  "Failed to set port loopback mode before loopback test\n");
1604 			return -EIO;
1605 		}
1606 	} else {
1607 		*test_index = EXTERNAL_LP_TEST;
1608 	}
1609 
1610 	lb_test_rx_buf = vmalloc(LP_PKT_CNT * LP_PKT_LEN);
1611 	if (!lb_test_rx_buf) {
1612 		err = -ENOMEM;
1613 	} else {
1614 		nic_dev->lb_test_rx_buf = lb_test_rx_buf;
1615 		nic_dev->lb_pkt_len = LP_PKT_LEN;
1616 		nic_dev->flags |= HINIC_LP_TEST;
1617 		err = hinic_run_lp_test(nic_dev, test_time);
1618 		nic_dev->flags &= ~HINIC_LP_TEST;
1619 		msleep(100);
1620 		vfree(lb_test_rx_buf);
1621 		nic_dev->lb_test_rx_buf = NULL;
1622 	}
1623 
1624 	if (!(flags & ETH_TEST_FL_EXTERNAL_LB)) {
1625 		if (hinic_set_loopback_mode(nic_dev->hwdev,
1626 					    HINIC_INTERNAL_LP_MODE, false)) {
1627 			netif_err(nic_dev, drv, netdev,
1628 				  "Failed to cancel port loopback mode after loopback test\n");
1629 			err = -EIO;
1630 		}
1631 	}
1632 
1633 	return err;
1634 }
1635 
1636 static void hinic_diag_test(struct net_device *netdev,
1637 			    struct ethtool_test *eth_test, u64 *data)
1638 {
1639 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1640 	enum hinic_port_link_state link_state;
1641 	enum diag_test_index test_index = 0;
1642 	int err = 0;
1643 
1644 	memset(data, 0, DIAG_TEST_MAX * sizeof(u64));
1645 
1646 	/* don't support loopback test when netdev is closed. */
1647 	if (!(nic_dev->flags & HINIC_INTF_UP)) {
1648 		netif_err(nic_dev, drv, netdev,
1649 			  "Do not support loopback test when netdev is closed\n");
1650 		eth_test->flags |= ETH_TEST_FL_FAILED;
1651 		data[PORT_DOWN_ERR_IDX] = 1;
1652 		return;
1653 	}
1654 
1655 	netif_carrier_off(netdev);
1656 	netif_tx_disable(netdev);
1657 
1658 	err = do_lp_test(nic_dev, eth_test->flags, LP_DEFAULT_TIME,
1659 			 &test_index);
1660 	if (err) {
1661 		eth_test->flags |= ETH_TEST_FL_FAILED;
1662 		data[test_index] = 1;
1663 	}
1664 
1665 	netif_tx_wake_all_queues(netdev);
1666 
1667 	err = hinic_port_link_state(nic_dev, &link_state);
1668 	if (!err && link_state == HINIC_LINK_STATE_UP)
1669 		netif_carrier_on(netdev);
1670 }
1671 
1672 static int hinic_set_phys_id(struct net_device *netdev,
1673 			     enum ethtool_phys_id_state state)
1674 {
1675 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1676 	int err = 0;
1677 	u8 port;
1678 
1679 	port = nic_dev->hwdev->port_id;
1680 
1681 	switch (state) {
1682 	case ETHTOOL_ID_ACTIVE:
1683 		err = hinic_set_led_status(nic_dev->hwdev, port,
1684 					   HINIC_LED_TYPE_LINK,
1685 					   HINIC_LED_MODE_FORCE_2HZ);
1686 		if (err)
1687 			netif_err(nic_dev, drv, netdev,
1688 				  "Set LED blinking in 2HZ failed\n");
1689 		break;
1690 
1691 	case ETHTOOL_ID_INACTIVE:
1692 		err = hinic_reset_led_status(nic_dev->hwdev, port);
1693 		if (err)
1694 			netif_err(nic_dev, drv, netdev,
1695 				  "Reset LED to original status failed\n");
1696 		break;
1697 
1698 	default:
1699 		return -EOPNOTSUPP;
1700 	}
1701 
1702 	return err;
1703 }
1704 
1705 static int hinic_get_module_info(struct net_device *netdev,
1706 				 struct ethtool_modinfo *modinfo)
1707 {
1708 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1709 	u8 sfp_type_ext;
1710 	u8 sfp_type;
1711 	int err;
1712 
1713 	err = hinic_get_sfp_type(nic_dev->hwdev, &sfp_type, &sfp_type_ext);
1714 	if (err)
1715 		return err;
1716 
1717 	switch (sfp_type) {
1718 	case SFF8024_ID_SFP:
1719 		modinfo->type = ETH_MODULE_SFF_8472;
1720 		modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
1721 		break;
1722 	case SFF8024_ID_QSFP_8438:
1723 		modinfo->type = ETH_MODULE_SFF_8436;
1724 		modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
1725 		break;
1726 	case SFF8024_ID_QSFP_8436_8636:
1727 		if (sfp_type_ext >= 0x3) {
1728 			modinfo->type = ETH_MODULE_SFF_8636;
1729 			modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
1730 
1731 		} else {
1732 			modinfo->type = ETH_MODULE_SFF_8436;
1733 			modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
1734 		}
1735 		break;
1736 	case SFF8024_ID_QSFP28_8636:
1737 		modinfo->type = ETH_MODULE_SFF_8636;
1738 		modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
1739 		break;
1740 	default:
1741 		netif_warn(nic_dev, drv, netdev,
1742 			   "Optical module unknown: 0x%x\n", sfp_type);
1743 		return -EINVAL;
1744 	}
1745 
1746 	return 0;
1747 }
1748 
1749 static int hinic_get_module_eeprom(struct net_device *netdev,
1750 				   struct ethtool_eeprom *ee, u8 *data)
1751 {
1752 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1753 	u8 sfp_data[STD_SFP_INFO_MAX_SIZE];
1754 	u16 len;
1755 	int err;
1756 
1757 	if (!ee->len || ((ee->len + ee->offset) > STD_SFP_INFO_MAX_SIZE))
1758 		return -EINVAL;
1759 
1760 	memset(data, 0, ee->len);
1761 
1762 	err = hinic_get_sfp_eeprom(nic_dev->hwdev, sfp_data, &len);
1763 	if (err)
1764 		return err;
1765 
1766 	memcpy(data, sfp_data + ee->offset, ee->len);
1767 
1768 	return 0;
1769 }
1770 
1771 static int
1772 hinic_get_link_ext_state(struct net_device *netdev,
1773 			 struct ethtool_link_ext_state_info *link_ext_state_info)
1774 {
1775 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1776 
1777 	if (netif_carrier_ok(netdev))
1778 		return -ENODATA;
1779 
1780 	if (nic_dev->cable_unplugged)
1781 		link_ext_state_info->link_ext_state =
1782 			ETHTOOL_LINK_EXT_STATE_NO_CABLE;
1783 	else if (nic_dev->module_unrecognized)
1784 		link_ext_state_info->link_ext_state =
1785 			ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH;
1786 
1787 	return 0;
1788 }
1789 
1790 static const struct ethtool_ops hinic_ethtool_ops = {
1791 	.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
1792 				     ETHTOOL_COALESCE_RX_MAX_FRAMES |
1793 				     ETHTOOL_COALESCE_TX_USECS |
1794 				     ETHTOOL_COALESCE_TX_MAX_FRAMES,
1795 
1796 	.get_link_ksettings = hinic_get_link_ksettings,
1797 	.set_link_ksettings = hinic_set_link_ksettings,
1798 	.get_drvinfo = hinic_get_drvinfo,
1799 	.get_link = ethtool_op_get_link,
1800 	.get_link_ext_state = hinic_get_link_ext_state,
1801 	.get_ringparam = hinic_get_ringparam,
1802 	.set_ringparam = hinic_set_ringparam,
1803 	.get_coalesce = hinic_get_coalesce,
1804 	.set_coalesce = hinic_set_coalesce,
1805 	.get_per_queue_coalesce = hinic_get_per_queue_coalesce,
1806 	.set_per_queue_coalesce = hinic_set_per_queue_coalesce,
1807 	.get_pauseparam = hinic_get_pauseparam,
1808 	.set_pauseparam = hinic_set_pauseparam,
1809 	.get_channels = hinic_get_channels,
1810 	.set_channels = hinic_set_channels,
1811 	.get_rxnfc = hinic_get_rxnfc,
1812 	.set_rxnfc = hinic_set_rxnfc,
1813 	.get_rxfh_key_size = hinic_get_rxfh_key_size,
1814 	.get_rxfh_indir_size = hinic_get_rxfh_indir_size,
1815 	.get_rxfh = hinic_get_rxfh,
1816 	.set_rxfh = hinic_set_rxfh,
1817 	.get_sset_count = hinic_get_sset_count,
1818 	.get_ethtool_stats = hinic_get_ethtool_stats,
1819 	.get_strings = hinic_get_strings,
1820 	.self_test = hinic_diag_test,
1821 	.set_phys_id = hinic_set_phys_id,
1822 	.get_module_info = hinic_get_module_info,
1823 	.get_module_eeprom = hinic_get_module_eeprom,
1824 };
1825 
1826 static const struct ethtool_ops hinicvf_ethtool_ops = {
1827 	.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
1828 				     ETHTOOL_COALESCE_RX_MAX_FRAMES |
1829 				     ETHTOOL_COALESCE_TX_USECS |
1830 				     ETHTOOL_COALESCE_TX_MAX_FRAMES,
1831 
1832 	.get_link_ksettings = hinic_get_link_ksettings,
1833 	.get_drvinfo = hinic_get_drvinfo,
1834 	.get_link = ethtool_op_get_link,
1835 	.get_ringparam = hinic_get_ringparam,
1836 	.set_ringparam = hinic_set_ringparam,
1837 	.get_coalesce = hinic_get_coalesce,
1838 	.set_coalesce = hinic_set_coalesce,
1839 	.get_per_queue_coalesce = hinic_get_per_queue_coalesce,
1840 	.set_per_queue_coalesce = hinic_set_per_queue_coalesce,
1841 	.get_channels = hinic_get_channels,
1842 	.set_channels = hinic_set_channels,
1843 	.get_rxnfc = hinic_get_rxnfc,
1844 	.set_rxnfc = hinic_set_rxnfc,
1845 	.get_rxfh_key_size = hinic_get_rxfh_key_size,
1846 	.get_rxfh_indir_size = hinic_get_rxfh_indir_size,
1847 	.get_rxfh = hinic_get_rxfh,
1848 	.set_rxfh = hinic_set_rxfh,
1849 	.get_sset_count = hinic_get_sset_count,
1850 	.get_ethtool_stats = hinic_get_ethtool_stats,
1851 	.get_strings = hinic_get_strings,
1852 };
1853 
1854 void hinic_set_ethtool_ops(struct net_device *netdev)
1855 {
1856 	struct hinic_dev *nic_dev = netdev_priv(netdev);
1857 
1858 	if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
1859 		netdev->ethtool_ops = &hinic_ethtool_ops;
1860 	else
1861 		netdev->ethtool_ops = &hinicvf_ethtool_ops;
1862 }
1863