1 // SPDX-License-Identifier: GPL-2.0 2 /* Huawei HiNIC PCI Express Linux driver 3 * Copyright(c) 2017 Huawei Technologies Co., Ltd 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * for more details. 13 * 14 */ 15 16 #include <linux/kernel.h> 17 #include <linux/pci.h> 18 #include <linux/device.h> 19 #include <linux/module.h> 20 #include <linux/types.h> 21 #include <linux/errno.h> 22 #include <linux/interrupt.h> 23 #include <linux/etherdevice.h> 24 #include <linux/netdevice.h> 25 #include <linux/if_vlan.h> 26 #include <linux/ethtool.h> 27 #include <linux/vmalloc.h> 28 #include <linux/sfp.h> 29 30 #include "hinic_hw_qp.h" 31 #include "hinic_hw_dev.h" 32 #include "hinic_port.h" 33 #include "hinic_tx.h" 34 #include "hinic_rx.h" 35 #include "hinic_dev.h" 36 37 #define SET_LINK_STR_MAX_LEN 16 38 39 #define GET_SUPPORTED_MODE 0 40 #define GET_ADVERTISED_MODE 1 41 42 #define ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE(ecmd, mode) \ 43 ((ecmd)->supported |= \ 44 (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit)) 45 #define ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE(ecmd, mode) \ 46 ((ecmd)->advertising |= \ 47 (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit)) 48 #define ETHTOOL_ADD_SUPPORTED_LINK_MODE(ecmd, mode) \ 49 ((ecmd)->supported |= SUPPORTED_##mode) 50 #define ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode) \ 51 ((ecmd)->advertising |= ADVERTISED_##mode) 52 53 #define COALESCE_PENDING_LIMIT_UNIT 8 54 #define COALESCE_TIMER_CFG_UNIT 9 55 #define COALESCE_ALL_QUEUE 0xFFFF 56 #define COALESCE_MAX_PENDING_LIMIT (255 * COALESCE_PENDING_LIMIT_UNIT) 57 #define COALESCE_MAX_TIMER_CFG (255 * COALESCE_TIMER_CFG_UNIT) 58 #define OBJ_STR_MAX_LEN 32 59 60 struct hw2ethtool_link_mode { 61 enum ethtool_link_mode_bit_indices link_mode_bit; 62 u32 speed; 63 enum hinic_link_mode hw_link_mode; 64 }; 65 66 struct cmd_link_settings { 67 u64 supported; 68 u64 advertising; 69 70 u32 speed; 71 u8 duplex; 72 u8 port; 73 u8 autoneg; 74 }; 75 76 static u32 hw_to_ethtool_speed[LINK_SPEED_LEVELS] = { 77 SPEED_10, SPEED_100, 78 SPEED_1000, SPEED_10000, 79 SPEED_25000, SPEED_40000, 80 SPEED_100000 81 }; 82 83 static struct hw2ethtool_link_mode 84 hw_to_ethtool_link_mode_table[HINIC_LINK_MODE_NUMBERS] = { 85 { 86 .link_mode_bit = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 87 .speed = SPEED_10000, 88 .hw_link_mode = HINIC_10GE_BASE_KR, 89 }, 90 { 91 .link_mode_bit = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 92 .speed = SPEED_40000, 93 .hw_link_mode = HINIC_40GE_BASE_KR4, 94 }, 95 { 96 .link_mode_bit = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 97 .speed = SPEED_40000, 98 .hw_link_mode = HINIC_40GE_BASE_CR4, 99 }, 100 { 101 .link_mode_bit = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 102 .speed = SPEED_100000, 103 .hw_link_mode = HINIC_100GE_BASE_KR4, 104 }, 105 { 106 .link_mode_bit = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 107 .speed = SPEED_100000, 108 .hw_link_mode = HINIC_100GE_BASE_CR4, 109 }, 110 { 111 .link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 112 .speed = SPEED_25000, 113 .hw_link_mode = HINIC_25GE_BASE_KR_S, 114 }, 115 { 116 .link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 117 .speed = SPEED_25000, 118 .hw_link_mode = HINIC_25GE_BASE_CR_S, 119 }, 120 { 121 .link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 122 .speed = SPEED_25000, 123 .hw_link_mode = HINIC_25GE_BASE_KR, 124 }, 125 { 126 .link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 127 .speed = SPEED_25000, 128 .hw_link_mode = HINIC_25GE_BASE_CR, 129 }, 130 { 131 .link_mode_bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 132 .speed = SPEED_1000, 133 .hw_link_mode = HINIC_GE_BASE_KX, 134 }, 135 }; 136 137 #define LP_DEFAULT_TIME 5 /* seconds */ 138 #define LP_PKT_LEN 1514 139 140 #define PORT_DOWN_ERR_IDX 0 141 enum diag_test_index { 142 INTERNAL_LP_TEST = 0, 143 EXTERNAL_LP_TEST = 1, 144 DIAG_TEST_MAX = 2, 145 }; 146 147 static void set_link_speed(struct ethtool_link_ksettings *link_ksettings, 148 enum hinic_speed speed) 149 { 150 switch (speed) { 151 case HINIC_SPEED_10MB_LINK: 152 link_ksettings->base.speed = SPEED_10; 153 break; 154 155 case HINIC_SPEED_100MB_LINK: 156 link_ksettings->base.speed = SPEED_100; 157 break; 158 159 case HINIC_SPEED_1000MB_LINK: 160 link_ksettings->base.speed = SPEED_1000; 161 break; 162 163 case HINIC_SPEED_10GB_LINK: 164 link_ksettings->base.speed = SPEED_10000; 165 break; 166 167 case HINIC_SPEED_25GB_LINK: 168 link_ksettings->base.speed = SPEED_25000; 169 break; 170 171 case HINIC_SPEED_40GB_LINK: 172 link_ksettings->base.speed = SPEED_40000; 173 break; 174 175 case HINIC_SPEED_100GB_LINK: 176 link_ksettings->base.speed = SPEED_100000; 177 break; 178 179 default: 180 link_ksettings->base.speed = SPEED_UNKNOWN; 181 break; 182 } 183 } 184 185 static int hinic_get_link_mode_index(enum hinic_link_mode link_mode) 186 { 187 int i = 0; 188 189 for (i = 0; i < HINIC_LINK_MODE_NUMBERS; i++) { 190 if (link_mode == hw_to_ethtool_link_mode_table[i].hw_link_mode) 191 break; 192 } 193 194 return i; 195 } 196 197 static void hinic_add_ethtool_link_mode(struct cmd_link_settings *link_settings, 198 enum hinic_link_mode hw_link_mode, 199 u32 name) 200 { 201 enum hinic_link_mode link_mode; 202 int idx = 0; 203 204 for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) { 205 if (hw_link_mode & ((u32)1 << link_mode)) { 206 idx = hinic_get_link_mode_index(link_mode); 207 if (idx >= HINIC_LINK_MODE_NUMBERS) 208 continue; 209 210 if (name == GET_SUPPORTED_MODE) 211 ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE 212 (link_settings, idx); 213 else 214 ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE 215 (link_settings, idx); 216 } 217 } 218 } 219 220 static void hinic_link_port_type(struct cmd_link_settings *link_settings, 221 enum hinic_port_type port_type) 222 { 223 switch (port_type) { 224 case HINIC_PORT_ELEC: 225 case HINIC_PORT_TP: 226 ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, TP); 227 ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, TP); 228 link_settings->port = PORT_TP; 229 break; 230 231 case HINIC_PORT_AOC: 232 case HINIC_PORT_FIBRE: 233 ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE); 234 ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE); 235 link_settings->port = PORT_FIBRE; 236 break; 237 238 case HINIC_PORT_COPPER: 239 ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE); 240 ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE); 241 link_settings->port = PORT_DA; 242 break; 243 244 case HINIC_PORT_BACKPLANE: 245 ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Backplane); 246 ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Backplane); 247 link_settings->port = PORT_NONE; 248 break; 249 250 default: 251 link_settings->port = PORT_OTHER; 252 break; 253 } 254 } 255 256 static int hinic_get_link_ksettings(struct net_device *netdev, 257 struct ethtool_link_ksettings 258 *link_ksettings) 259 { 260 struct hinic_dev *nic_dev = netdev_priv(netdev); 261 struct hinic_link_mode_cmd link_mode = { 0 }; 262 struct hinic_pause_config pause_info = { 0 }; 263 struct cmd_link_settings settings = { 0 }; 264 enum hinic_port_link_state link_state; 265 struct hinic_port_cap port_cap; 266 int err; 267 268 ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); 269 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); 270 271 link_ksettings->base.speed = SPEED_UNKNOWN; 272 link_ksettings->base.autoneg = AUTONEG_DISABLE; 273 link_ksettings->base.duplex = DUPLEX_UNKNOWN; 274 275 err = hinic_port_get_cap(nic_dev, &port_cap); 276 if (err) 277 return err; 278 279 hinic_link_port_type(&settings, port_cap.port_type); 280 link_ksettings->base.port = settings.port; 281 282 err = hinic_port_link_state(nic_dev, &link_state); 283 if (err) 284 return err; 285 286 if (link_state == HINIC_LINK_STATE_UP) { 287 set_link_speed(link_ksettings, port_cap.speed); 288 link_ksettings->base.duplex = 289 (port_cap.duplex == HINIC_DUPLEX_FULL) ? 290 DUPLEX_FULL : DUPLEX_HALF; 291 } 292 293 if (!!(port_cap.autoneg_cap & HINIC_AUTONEG_SUPPORTED)) 294 ethtool_link_ksettings_add_link_mode(link_ksettings, 295 advertising, Autoneg); 296 297 if (port_cap.autoneg_state == HINIC_AUTONEG_ACTIVE) 298 link_ksettings->base.autoneg = AUTONEG_ENABLE; 299 300 err = hinic_get_link_mode(nic_dev->hwdev, &link_mode); 301 if (err || link_mode.supported == HINIC_SUPPORTED_UNKNOWN || 302 link_mode.advertised == HINIC_SUPPORTED_UNKNOWN) 303 return -EIO; 304 305 hinic_add_ethtool_link_mode(&settings, link_mode.supported, 306 GET_SUPPORTED_MODE); 307 hinic_add_ethtool_link_mode(&settings, link_mode.advertised, 308 GET_ADVERTISED_MODE); 309 310 if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) { 311 err = hinic_get_hw_pause_info(nic_dev->hwdev, &pause_info); 312 if (err) 313 return err; 314 ETHTOOL_ADD_SUPPORTED_LINK_MODE(&settings, Pause); 315 if (pause_info.rx_pause && pause_info.tx_pause) { 316 ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause); 317 } else if (pause_info.tx_pause) { 318 ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause); 319 } else if (pause_info.rx_pause) { 320 ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause); 321 ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause); 322 } 323 } 324 325 bitmap_copy(link_ksettings->link_modes.supported, 326 (unsigned long *)&settings.supported, 327 __ETHTOOL_LINK_MODE_MASK_NBITS); 328 bitmap_copy(link_ksettings->link_modes.advertising, 329 (unsigned long *)&settings.advertising, 330 __ETHTOOL_LINK_MODE_MASK_NBITS); 331 332 return 0; 333 } 334 335 static int hinic_ethtool_to_hw_speed_level(u32 speed) 336 { 337 int i; 338 339 for (i = 0; i < LINK_SPEED_LEVELS; i++) { 340 if (hw_to_ethtool_speed[i] == speed) 341 break; 342 } 343 344 return i; 345 } 346 347 static bool hinic_is_support_speed(enum hinic_link_mode supported_link, 348 u32 speed) 349 { 350 enum hinic_link_mode link_mode; 351 int idx; 352 353 for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) { 354 if (!(supported_link & ((u32)1 << link_mode))) 355 continue; 356 357 idx = hinic_get_link_mode_index(link_mode); 358 if (idx >= HINIC_LINK_MODE_NUMBERS) 359 continue; 360 361 if (hw_to_ethtool_link_mode_table[idx].speed == speed) 362 return true; 363 } 364 365 return false; 366 } 367 368 static bool hinic_is_speed_legal(struct hinic_dev *nic_dev, u32 speed) 369 { 370 struct hinic_link_mode_cmd link_mode = { 0 }; 371 struct net_device *netdev = nic_dev->netdev; 372 enum nic_speed_level speed_level = 0; 373 int err; 374 375 err = hinic_get_link_mode(nic_dev->hwdev, &link_mode); 376 if (err) 377 return false; 378 379 if (link_mode.supported == HINIC_SUPPORTED_UNKNOWN || 380 link_mode.advertised == HINIC_SUPPORTED_UNKNOWN) 381 return false; 382 383 speed_level = hinic_ethtool_to_hw_speed_level(speed); 384 if (speed_level >= LINK_SPEED_LEVELS || 385 !hinic_is_support_speed(link_mode.supported, speed)) { 386 netif_err(nic_dev, drv, netdev, 387 "Unsupported speed: %d\n", speed); 388 return false; 389 } 390 391 return true; 392 } 393 394 static int get_link_settings_type(struct hinic_dev *nic_dev, 395 u8 autoneg, u32 speed, u32 *set_settings) 396 { 397 struct hinic_port_cap port_cap = { 0 }; 398 int err; 399 400 err = hinic_port_get_cap(nic_dev, &port_cap); 401 if (err) 402 return err; 403 404 /* always set autonegotiation */ 405 if (port_cap.autoneg_cap) 406 *set_settings |= HILINK_LINK_SET_AUTONEG; 407 408 if (autoneg == AUTONEG_ENABLE) { 409 if (!port_cap.autoneg_cap) { 410 netif_err(nic_dev, drv, nic_dev->netdev, "Not support autoneg\n"); 411 return -EOPNOTSUPP; 412 } 413 } else if (speed != (u32)SPEED_UNKNOWN) { 414 /* set speed only when autoneg is disabled */ 415 if (!hinic_is_speed_legal(nic_dev, speed)) 416 return -EINVAL; 417 *set_settings |= HILINK_LINK_SET_SPEED; 418 } else { 419 netif_err(nic_dev, drv, nic_dev->netdev, "Need to set speed when autoneg is off\n"); 420 return -EOPNOTSUPP; 421 } 422 423 return 0; 424 } 425 426 static int set_link_settings_separate_cmd(struct hinic_dev *nic_dev, 427 u32 set_settings, u8 autoneg, 428 u32 speed) 429 { 430 enum nic_speed_level speed_level = 0; 431 int err = 0; 432 433 if (set_settings & HILINK_LINK_SET_AUTONEG) { 434 err = hinic_set_autoneg(nic_dev->hwdev, 435 (autoneg == AUTONEG_ENABLE)); 436 if (err) 437 netif_err(nic_dev, drv, nic_dev->netdev, "%s autoneg failed\n", 438 (autoneg == AUTONEG_ENABLE) ? 439 "Enable" : "Disable"); 440 else 441 netif_info(nic_dev, drv, nic_dev->netdev, "%s autoneg successfully\n", 442 (autoneg == AUTONEG_ENABLE) ? 443 "Enable" : "Disable"); 444 } 445 446 if (!err && (set_settings & HILINK_LINK_SET_SPEED)) { 447 speed_level = hinic_ethtool_to_hw_speed_level(speed); 448 err = hinic_set_speed(nic_dev->hwdev, speed_level); 449 if (err) 450 netif_err(nic_dev, drv, nic_dev->netdev, "Set speed %d failed\n", 451 speed); 452 else 453 netif_info(nic_dev, drv, nic_dev->netdev, "Set speed %d successfully\n", 454 speed); 455 } 456 457 return err; 458 } 459 460 static int hinic_set_settings_to_hw(struct hinic_dev *nic_dev, 461 u32 set_settings, u8 autoneg, u32 speed) 462 { 463 struct hinic_link_ksettings_info settings = {0}; 464 char set_link_str[SET_LINK_STR_MAX_LEN] = {0}; 465 const char *autoneg_str; 466 struct net_device *netdev = nic_dev->netdev; 467 enum nic_speed_level speed_level = 0; 468 int err; 469 470 autoneg_str = (set_settings & HILINK_LINK_SET_AUTONEG) ? 471 (autoneg ? "autong enable " : "autong disable ") : ""; 472 473 if (set_settings & HILINK_LINK_SET_SPEED) { 474 speed_level = hinic_ethtool_to_hw_speed_level(speed); 475 err = snprintf(set_link_str, SET_LINK_STR_MAX_LEN, 476 "speed %d ", speed); 477 if (err >= SET_LINK_STR_MAX_LEN) { 478 netif_err(nic_dev, drv, netdev, "Failed to snprintf link speed, function return(%d) and dest_len(%d)\n", 479 err, SET_LINK_STR_MAX_LEN); 480 return -EFAULT; 481 } 482 } 483 484 settings.func_id = HINIC_HWIF_FUNC_IDX(nic_dev->hwdev->hwif); 485 settings.valid_bitmap = set_settings; 486 settings.autoneg = autoneg; 487 settings.speed = speed_level; 488 489 err = hinic_set_link_settings(nic_dev->hwdev, &settings); 490 if (err != HINIC_MGMT_CMD_UNSUPPORTED) { 491 if (err) 492 netif_err(nic_dev, drv, netdev, "Set %s%sfailed\n", 493 autoneg_str, set_link_str); 494 else 495 netif_info(nic_dev, drv, netdev, "Set %s%ssuccessfully\n", 496 autoneg_str, set_link_str); 497 498 return err; 499 } 500 501 return set_link_settings_separate_cmd(nic_dev, set_settings, autoneg, 502 speed); 503 } 504 505 static int set_link_settings(struct net_device *netdev, u8 autoneg, u32 speed) 506 { 507 struct hinic_dev *nic_dev = netdev_priv(netdev); 508 u32 set_settings = 0; 509 int err; 510 511 err = get_link_settings_type(nic_dev, autoneg, speed, &set_settings); 512 if (err) 513 return err; 514 515 if (set_settings) 516 err = hinic_set_settings_to_hw(nic_dev, set_settings, 517 autoneg, speed); 518 else 519 netif_info(nic_dev, drv, netdev, "Nothing changed, exit without setting anything\n"); 520 521 return err; 522 } 523 524 static int hinic_set_link_ksettings(struct net_device *netdev, const struct 525 ethtool_link_ksettings *link_settings) 526 { 527 /* only support to set autoneg and speed */ 528 return set_link_settings(netdev, link_settings->base.autoneg, 529 link_settings->base.speed); 530 } 531 532 static void hinic_get_drvinfo(struct net_device *netdev, 533 struct ethtool_drvinfo *info) 534 { 535 struct hinic_dev *nic_dev = netdev_priv(netdev); 536 u8 mgmt_ver[HINIC_MGMT_VERSION_MAX_LEN] = {0}; 537 struct hinic_hwdev *hwdev = nic_dev->hwdev; 538 struct hinic_hwif *hwif = hwdev->hwif; 539 int err; 540 541 strscpy(info->driver, HINIC_DRV_NAME, sizeof(info->driver)); 542 strscpy(info->bus_info, pci_name(hwif->pdev), sizeof(info->bus_info)); 543 544 err = hinic_get_mgmt_version(nic_dev, mgmt_ver); 545 if (err) 546 return; 547 548 snprintf(info->fw_version, sizeof(info->fw_version), "%s", mgmt_ver); 549 } 550 551 static void hinic_get_ringparam(struct net_device *netdev, 552 struct ethtool_ringparam *ring) 553 { 554 struct hinic_dev *nic_dev = netdev_priv(netdev); 555 556 ring->rx_max_pending = HINIC_MAX_QUEUE_DEPTH; 557 ring->tx_max_pending = HINIC_MAX_QUEUE_DEPTH; 558 ring->rx_pending = nic_dev->rq_depth; 559 ring->tx_pending = nic_dev->sq_depth; 560 } 561 562 static int check_ringparam_valid(struct hinic_dev *nic_dev, 563 struct ethtool_ringparam *ring) 564 { 565 if (ring->rx_jumbo_pending || ring->rx_mini_pending) { 566 netif_err(nic_dev, drv, nic_dev->netdev, 567 "Unsupported rx_jumbo_pending/rx_mini_pending\n"); 568 return -EINVAL; 569 } 570 571 if (ring->tx_pending > HINIC_MAX_QUEUE_DEPTH || 572 ring->tx_pending < HINIC_MIN_QUEUE_DEPTH || 573 ring->rx_pending > HINIC_MAX_QUEUE_DEPTH || 574 ring->rx_pending < HINIC_MIN_QUEUE_DEPTH) { 575 netif_err(nic_dev, drv, nic_dev->netdev, 576 "Queue depth out of range [%d-%d]\n", 577 HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH); 578 return -EINVAL; 579 } 580 581 return 0; 582 } 583 584 static int hinic_set_ringparam(struct net_device *netdev, 585 struct ethtool_ringparam *ring) 586 { 587 struct hinic_dev *nic_dev = netdev_priv(netdev); 588 u16 new_sq_depth, new_rq_depth; 589 int err; 590 591 err = check_ringparam_valid(nic_dev, ring); 592 if (err) 593 return err; 594 595 new_sq_depth = (u16)(1U << (u16)ilog2(ring->tx_pending)); 596 new_rq_depth = (u16)(1U << (u16)ilog2(ring->rx_pending)); 597 598 if (new_sq_depth == nic_dev->sq_depth && 599 new_rq_depth == nic_dev->rq_depth) 600 return 0; 601 602 netif_info(nic_dev, drv, netdev, 603 "Change Tx/Rx ring depth from %d/%d to %d/%d\n", 604 nic_dev->sq_depth, nic_dev->rq_depth, 605 new_sq_depth, new_rq_depth); 606 607 nic_dev->sq_depth = new_sq_depth; 608 nic_dev->rq_depth = new_rq_depth; 609 610 if (netif_running(netdev)) { 611 netif_info(nic_dev, drv, netdev, "Restarting netdev\n"); 612 err = hinic_close(netdev); 613 if (err) { 614 netif_err(nic_dev, drv, netdev, 615 "Failed to close netdev\n"); 616 return -EFAULT; 617 } 618 619 err = hinic_open(netdev); 620 if (err) { 621 netif_err(nic_dev, drv, netdev, 622 "Failed to open netdev\n"); 623 return -EFAULT; 624 } 625 } 626 627 return 0; 628 } 629 630 static int __hinic_get_coalesce(struct net_device *netdev, 631 struct ethtool_coalesce *coal, u16 queue) 632 { 633 struct hinic_dev *nic_dev = netdev_priv(netdev); 634 struct hinic_intr_coal_info *rx_intr_coal_info; 635 struct hinic_intr_coal_info *tx_intr_coal_info; 636 637 if (queue == COALESCE_ALL_QUEUE) { 638 /* get tx/rx irq0 as default parameters */ 639 rx_intr_coal_info = &nic_dev->rx_intr_coalesce[0]; 640 tx_intr_coal_info = &nic_dev->tx_intr_coalesce[0]; 641 } else { 642 if (queue >= nic_dev->num_qps) { 643 netif_err(nic_dev, drv, netdev, 644 "Invalid queue_id: %d\n", queue); 645 return -EINVAL; 646 } 647 rx_intr_coal_info = &nic_dev->rx_intr_coalesce[queue]; 648 tx_intr_coal_info = &nic_dev->tx_intr_coalesce[queue]; 649 } 650 651 /* coalesce_timer is in unit of 9us */ 652 coal->rx_coalesce_usecs = rx_intr_coal_info->coalesce_timer_cfg * 653 COALESCE_TIMER_CFG_UNIT; 654 /* coalesced_frames is in unit of 8 */ 655 coal->rx_max_coalesced_frames = rx_intr_coal_info->pending_limt * 656 COALESCE_PENDING_LIMIT_UNIT; 657 coal->tx_coalesce_usecs = tx_intr_coal_info->coalesce_timer_cfg * 658 COALESCE_TIMER_CFG_UNIT; 659 coal->tx_max_coalesced_frames = tx_intr_coal_info->pending_limt * 660 COALESCE_PENDING_LIMIT_UNIT; 661 662 return 0; 663 } 664 665 static int is_coalesce_exceed_limit(const struct ethtool_coalesce *coal) 666 { 667 if (coal->rx_coalesce_usecs > COALESCE_MAX_TIMER_CFG || 668 coal->rx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT || 669 coal->tx_coalesce_usecs > COALESCE_MAX_TIMER_CFG || 670 coal->tx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT) 671 return -ERANGE; 672 673 return 0; 674 } 675 676 static int set_queue_coalesce(struct hinic_dev *nic_dev, u16 q_id, 677 struct hinic_intr_coal_info *coal, 678 bool set_rx_coal) 679 { 680 struct hinic_intr_coal_info *intr_coal = NULL; 681 struct hinic_msix_config interrupt_info = {0}; 682 struct net_device *netdev = nic_dev->netdev; 683 u16 msix_idx; 684 int err; 685 686 intr_coal = set_rx_coal ? &nic_dev->rx_intr_coalesce[q_id] : 687 &nic_dev->tx_intr_coalesce[q_id]; 688 689 intr_coal->coalesce_timer_cfg = coal->coalesce_timer_cfg; 690 intr_coal->pending_limt = coal->pending_limt; 691 692 /* netdev not running or qp not in using, 693 * don't need to set coalesce to hw 694 */ 695 if (!(nic_dev->flags & HINIC_INTF_UP) || 696 q_id >= nic_dev->num_qps) 697 return 0; 698 699 msix_idx = set_rx_coal ? nic_dev->rxqs[q_id].rq->msix_entry : 700 nic_dev->txqs[q_id].sq->msix_entry; 701 interrupt_info.msix_index = msix_idx; 702 interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg; 703 interrupt_info.pending_cnt = intr_coal->pending_limt; 704 interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg; 705 706 err = hinic_set_interrupt_cfg(nic_dev->hwdev, &interrupt_info); 707 if (err) 708 netif_warn(nic_dev, drv, netdev, 709 "Failed to set %s queue%d coalesce", 710 set_rx_coal ? "rx" : "tx", q_id); 711 712 return err; 713 } 714 715 static int __set_hw_coal_param(struct hinic_dev *nic_dev, 716 struct hinic_intr_coal_info *intr_coal, 717 u16 queue, bool set_rx_coal) 718 { 719 int err; 720 u16 i; 721 722 if (queue == COALESCE_ALL_QUEUE) { 723 for (i = 0; i < nic_dev->max_qps; i++) { 724 err = set_queue_coalesce(nic_dev, i, intr_coal, 725 set_rx_coal); 726 if (err) 727 return err; 728 } 729 } else { 730 if (queue >= nic_dev->num_qps) { 731 netif_err(nic_dev, drv, nic_dev->netdev, 732 "Invalid queue_id: %d\n", queue); 733 return -EINVAL; 734 } 735 err = set_queue_coalesce(nic_dev, queue, intr_coal, 736 set_rx_coal); 737 if (err) 738 return err; 739 } 740 741 return 0; 742 } 743 744 static int __hinic_set_coalesce(struct net_device *netdev, 745 struct ethtool_coalesce *coal, u16 queue) 746 { 747 struct hinic_dev *nic_dev = netdev_priv(netdev); 748 struct hinic_intr_coal_info rx_intr_coal = {0}; 749 struct hinic_intr_coal_info tx_intr_coal = {0}; 750 bool set_rx_coal = false; 751 bool set_tx_coal = false; 752 int err; 753 754 err = is_coalesce_exceed_limit(coal); 755 if (err) 756 return err; 757 758 if (coal->rx_coalesce_usecs || coal->rx_max_coalesced_frames) { 759 rx_intr_coal.coalesce_timer_cfg = 760 (u8)(coal->rx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT); 761 rx_intr_coal.pending_limt = (u8)(coal->rx_max_coalesced_frames / 762 COALESCE_PENDING_LIMIT_UNIT); 763 set_rx_coal = true; 764 } 765 766 if (coal->tx_coalesce_usecs || coal->tx_max_coalesced_frames) { 767 tx_intr_coal.coalesce_timer_cfg = 768 (u8)(coal->tx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT); 769 tx_intr_coal.pending_limt = (u8)(coal->tx_max_coalesced_frames / 770 COALESCE_PENDING_LIMIT_UNIT); 771 set_tx_coal = true; 772 } 773 774 /* setting coalesce timer or pending limit to zero will disable 775 * coalesce 776 */ 777 if (set_rx_coal && (!rx_intr_coal.coalesce_timer_cfg || 778 !rx_intr_coal.pending_limt)) 779 netif_warn(nic_dev, drv, netdev, "RX coalesce will be disabled\n"); 780 if (set_tx_coal && (!tx_intr_coal.coalesce_timer_cfg || 781 !tx_intr_coal.pending_limt)) 782 netif_warn(nic_dev, drv, netdev, "TX coalesce will be disabled\n"); 783 784 if (set_rx_coal) { 785 err = __set_hw_coal_param(nic_dev, &rx_intr_coal, queue, true); 786 if (err) 787 return err; 788 } 789 if (set_tx_coal) { 790 err = __set_hw_coal_param(nic_dev, &tx_intr_coal, queue, false); 791 if (err) 792 return err; 793 } 794 return 0; 795 } 796 797 static int hinic_get_coalesce(struct net_device *netdev, 798 struct ethtool_coalesce *coal) 799 { 800 return __hinic_get_coalesce(netdev, coal, COALESCE_ALL_QUEUE); 801 } 802 803 static int hinic_set_coalesce(struct net_device *netdev, 804 struct ethtool_coalesce *coal) 805 { 806 return __hinic_set_coalesce(netdev, coal, COALESCE_ALL_QUEUE); 807 } 808 809 static int hinic_get_per_queue_coalesce(struct net_device *netdev, u32 queue, 810 struct ethtool_coalesce *coal) 811 { 812 return __hinic_get_coalesce(netdev, coal, queue); 813 } 814 815 static int hinic_set_per_queue_coalesce(struct net_device *netdev, u32 queue, 816 struct ethtool_coalesce *coal) 817 { 818 return __hinic_set_coalesce(netdev, coal, queue); 819 } 820 821 static void hinic_get_pauseparam(struct net_device *netdev, 822 struct ethtool_pauseparam *pause) 823 { 824 struct hinic_dev *nic_dev = netdev_priv(netdev); 825 struct hinic_pause_config pause_info = {0}; 826 struct hinic_nic_cfg *nic_cfg; 827 int err; 828 829 nic_cfg = &nic_dev->hwdev->func_to_io.nic_cfg; 830 831 err = hinic_get_hw_pause_info(nic_dev->hwdev, &pause_info); 832 if (!err) { 833 pause->autoneg = pause_info.auto_neg; 834 if (nic_cfg->pause_set || !pause_info.auto_neg) { 835 pause->rx_pause = nic_cfg->rx_pause; 836 pause->tx_pause = nic_cfg->tx_pause; 837 } else { 838 pause->rx_pause = pause_info.rx_pause; 839 pause->tx_pause = pause_info.tx_pause; 840 } 841 } 842 } 843 844 static int hinic_set_pauseparam(struct net_device *netdev, 845 struct ethtool_pauseparam *pause) 846 { 847 struct hinic_dev *nic_dev = netdev_priv(netdev); 848 struct hinic_pause_config pause_info = {0}; 849 struct hinic_port_cap port_cap = {0}; 850 int err; 851 852 err = hinic_port_get_cap(nic_dev, &port_cap); 853 if (err) 854 return -EIO; 855 856 if (pause->autoneg != port_cap.autoneg_state) 857 return -EOPNOTSUPP; 858 859 pause_info.auto_neg = pause->autoneg; 860 pause_info.rx_pause = pause->rx_pause; 861 pause_info.tx_pause = pause->tx_pause; 862 863 mutex_lock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex); 864 err = hinic_set_hw_pause_info(nic_dev->hwdev, &pause_info); 865 if (err) { 866 mutex_unlock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex); 867 return err; 868 } 869 nic_dev->hwdev->func_to_io.nic_cfg.pause_set = true; 870 nic_dev->hwdev->func_to_io.nic_cfg.auto_neg = pause->autoneg; 871 nic_dev->hwdev->func_to_io.nic_cfg.rx_pause = pause->rx_pause; 872 nic_dev->hwdev->func_to_io.nic_cfg.tx_pause = pause->tx_pause; 873 mutex_unlock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex); 874 875 return 0; 876 } 877 878 static void hinic_get_channels(struct net_device *netdev, 879 struct ethtool_channels *channels) 880 { 881 struct hinic_dev *nic_dev = netdev_priv(netdev); 882 struct hinic_hwdev *hwdev = nic_dev->hwdev; 883 884 channels->max_combined = nic_dev->max_qps; 885 channels->combined_count = hinic_hwdev_num_qps(hwdev); 886 } 887 888 static int hinic_set_channels(struct net_device *netdev, 889 struct ethtool_channels *channels) 890 { 891 struct hinic_dev *nic_dev = netdev_priv(netdev); 892 unsigned int count = channels->combined_count; 893 int err; 894 895 netif_info(nic_dev, drv, netdev, "Set max combined queue number from %d to %d\n", 896 hinic_hwdev_num_qps(nic_dev->hwdev), count); 897 898 if (netif_running(netdev)) { 899 netif_info(nic_dev, drv, netdev, "Restarting netdev\n"); 900 hinic_close(netdev); 901 902 nic_dev->hwdev->nic_cap.num_qps = count; 903 904 err = hinic_open(netdev); 905 if (err) { 906 netif_err(nic_dev, drv, netdev, 907 "Failed to open netdev\n"); 908 return -EFAULT; 909 } 910 } else { 911 nic_dev->hwdev->nic_cap.num_qps = count; 912 } 913 914 return 0; 915 } 916 917 static int hinic_get_rss_hash_opts(struct hinic_dev *nic_dev, 918 struct ethtool_rxnfc *cmd) 919 { 920 struct hinic_rss_type rss_type = { 0 }; 921 int err; 922 923 cmd->data = 0; 924 925 if (!(nic_dev->flags & HINIC_RSS_ENABLE)) 926 return 0; 927 928 err = hinic_get_rss_type(nic_dev, nic_dev->rss_tmpl_idx, 929 &rss_type); 930 if (err) 931 return err; 932 933 cmd->data = RXH_IP_SRC | RXH_IP_DST; 934 switch (cmd->flow_type) { 935 case TCP_V4_FLOW: 936 if (rss_type.tcp_ipv4) 937 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 938 break; 939 case TCP_V6_FLOW: 940 if (rss_type.tcp_ipv6) 941 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 942 break; 943 case UDP_V4_FLOW: 944 if (rss_type.udp_ipv4) 945 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 946 break; 947 case UDP_V6_FLOW: 948 if (rss_type.udp_ipv6) 949 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 950 break; 951 case IPV4_FLOW: 952 case IPV6_FLOW: 953 break; 954 default: 955 cmd->data = 0; 956 return -EINVAL; 957 } 958 959 return 0; 960 } 961 962 static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd, 963 struct hinic_rss_type *rss_type) 964 { 965 u8 rss_l4_en = 0; 966 967 switch (cmd->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 968 case 0: 969 rss_l4_en = 0; 970 break; 971 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 972 rss_l4_en = 1; 973 break; 974 default: 975 return -EINVAL; 976 } 977 978 switch (cmd->flow_type) { 979 case TCP_V4_FLOW: 980 rss_type->tcp_ipv4 = rss_l4_en; 981 break; 982 case TCP_V6_FLOW: 983 rss_type->tcp_ipv6 = rss_l4_en; 984 break; 985 case UDP_V4_FLOW: 986 rss_type->udp_ipv4 = rss_l4_en; 987 break; 988 case UDP_V6_FLOW: 989 rss_type->udp_ipv6 = rss_l4_en; 990 break; 991 default: 992 return -EINVAL; 993 } 994 995 return 0; 996 } 997 998 static int hinic_set_rss_hash_opts(struct hinic_dev *nic_dev, 999 struct ethtool_rxnfc *cmd) 1000 { 1001 struct hinic_rss_type *rss_type = &nic_dev->rss_type; 1002 int err; 1003 1004 if (!(nic_dev->flags & HINIC_RSS_ENABLE)) { 1005 cmd->data = 0; 1006 return -EOPNOTSUPP; 1007 } 1008 1009 /* RSS does not support anything other than hashing 1010 * to queues on src and dst IPs and ports 1011 */ 1012 if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | 1013 RXH_L4_B_2_3)) 1014 return -EINVAL; 1015 1016 /* We need at least the IP SRC and DEST fields for hashing */ 1017 if (!(cmd->data & RXH_IP_SRC) || !(cmd->data & RXH_IP_DST)) 1018 return -EINVAL; 1019 1020 err = hinic_get_rss_type(nic_dev, 1021 nic_dev->rss_tmpl_idx, rss_type); 1022 if (err) 1023 return -EFAULT; 1024 1025 switch (cmd->flow_type) { 1026 case TCP_V4_FLOW: 1027 case TCP_V6_FLOW: 1028 case UDP_V4_FLOW: 1029 case UDP_V6_FLOW: 1030 err = set_l4_rss_hash_ops(cmd, rss_type); 1031 if (err) 1032 return err; 1033 break; 1034 case IPV4_FLOW: 1035 rss_type->ipv4 = 1; 1036 break; 1037 case IPV6_FLOW: 1038 rss_type->ipv6 = 1; 1039 break; 1040 default: 1041 return -EINVAL; 1042 } 1043 1044 err = hinic_set_rss_type(nic_dev, nic_dev->rss_tmpl_idx, 1045 *rss_type); 1046 if (err) 1047 return -EFAULT; 1048 1049 return 0; 1050 } 1051 1052 static int __set_rss_rxfh(struct net_device *netdev, 1053 const u32 *indir, const u8 *key) 1054 { 1055 struct hinic_dev *nic_dev = netdev_priv(netdev); 1056 int err; 1057 1058 if (indir) { 1059 if (!nic_dev->rss_indir_user) { 1060 nic_dev->rss_indir_user = 1061 kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE, 1062 GFP_KERNEL); 1063 if (!nic_dev->rss_indir_user) 1064 return -ENOMEM; 1065 } 1066 1067 memcpy(nic_dev->rss_indir_user, indir, 1068 sizeof(u32) * HINIC_RSS_INDIR_SIZE); 1069 1070 err = hinic_rss_set_indir_tbl(nic_dev, 1071 nic_dev->rss_tmpl_idx, indir); 1072 if (err) 1073 return -EFAULT; 1074 } 1075 1076 if (key) { 1077 if (!nic_dev->rss_hkey_user) { 1078 nic_dev->rss_hkey_user = 1079 kzalloc(HINIC_RSS_KEY_SIZE * 2, GFP_KERNEL); 1080 1081 if (!nic_dev->rss_hkey_user) 1082 return -ENOMEM; 1083 } 1084 1085 memcpy(nic_dev->rss_hkey_user, key, HINIC_RSS_KEY_SIZE); 1086 1087 err = hinic_rss_set_template_tbl(nic_dev, 1088 nic_dev->rss_tmpl_idx, key); 1089 if (err) 1090 return -EFAULT; 1091 } 1092 1093 return 0; 1094 } 1095 1096 static int hinic_get_rxnfc(struct net_device *netdev, 1097 struct ethtool_rxnfc *cmd, u32 *rule_locs) 1098 { 1099 struct hinic_dev *nic_dev = netdev_priv(netdev); 1100 int err = 0; 1101 1102 switch (cmd->cmd) { 1103 case ETHTOOL_GRXRINGS: 1104 cmd->data = nic_dev->num_qps; 1105 break; 1106 case ETHTOOL_GRXFH: 1107 err = hinic_get_rss_hash_opts(nic_dev, cmd); 1108 break; 1109 default: 1110 err = -EOPNOTSUPP; 1111 break; 1112 } 1113 1114 return err; 1115 } 1116 1117 static int hinic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) 1118 { 1119 struct hinic_dev *nic_dev = netdev_priv(netdev); 1120 int err = 0; 1121 1122 switch (cmd->cmd) { 1123 case ETHTOOL_SRXFH: 1124 err = hinic_set_rss_hash_opts(nic_dev, cmd); 1125 break; 1126 default: 1127 err = -EOPNOTSUPP; 1128 break; 1129 } 1130 1131 return err; 1132 } 1133 1134 static int hinic_get_rxfh(struct net_device *netdev, 1135 u32 *indir, u8 *key, u8 *hfunc) 1136 { 1137 struct hinic_dev *nic_dev = netdev_priv(netdev); 1138 u8 hash_engine_type = 0; 1139 int err = 0; 1140 1141 if (!(nic_dev->flags & HINIC_RSS_ENABLE)) 1142 return -EOPNOTSUPP; 1143 1144 if (hfunc) { 1145 err = hinic_rss_get_hash_engine(nic_dev, 1146 nic_dev->rss_tmpl_idx, 1147 &hash_engine_type); 1148 if (err) 1149 return -EFAULT; 1150 1151 *hfunc = hash_engine_type ? ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR; 1152 } 1153 1154 if (indir) { 1155 err = hinic_rss_get_indir_tbl(nic_dev, 1156 nic_dev->rss_tmpl_idx, indir); 1157 if (err) 1158 return -EFAULT; 1159 } 1160 1161 if (key) 1162 err = hinic_rss_get_template_tbl(nic_dev, 1163 nic_dev->rss_tmpl_idx, key); 1164 1165 return err; 1166 } 1167 1168 static int hinic_set_rxfh(struct net_device *netdev, const u32 *indir, 1169 const u8 *key, const u8 hfunc) 1170 { 1171 struct hinic_dev *nic_dev = netdev_priv(netdev); 1172 int err = 0; 1173 1174 if (!(nic_dev->flags & HINIC_RSS_ENABLE)) 1175 return -EOPNOTSUPP; 1176 1177 if (hfunc != ETH_RSS_HASH_NO_CHANGE) { 1178 if (hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR) 1179 return -EOPNOTSUPP; 1180 1181 nic_dev->rss_hash_engine = (hfunc == ETH_RSS_HASH_XOR) ? 1182 HINIC_RSS_HASH_ENGINE_TYPE_XOR : 1183 HINIC_RSS_HASH_ENGINE_TYPE_TOEP; 1184 err = hinic_rss_set_hash_engine 1185 (nic_dev, nic_dev->rss_tmpl_idx, 1186 nic_dev->rss_hash_engine); 1187 if (err) 1188 return -EFAULT; 1189 } 1190 1191 err = __set_rss_rxfh(netdev, indir, key); 1192 1193 return err; 1194 } 1195 1196 static u32 hinic_get_rxfh_key_size(struct net_device *netdev) 1197 { 1198 return HINIC_RSS_KEY_SIZE; 1199 } 1200 1201 static u32 hinic_get_rxfh_indir_size(struct net_device *netdev) 1202 { 1203 return HINIC_RSS_INDIR_SIZE; 1204 } 1205 1206 #define ARRAY_LEN(arr) ((int)((int)sizeof(arr) / (int)sizeof(arr[0]))) 1207 1208 #define HINIC_FUNC_STAT(_stat_item) { \ 1209 .name = #_stat_item, \ 1210 .size = sizeof_field(struct hinic_vport_stats, _stat_item), \ 1211 .offset = offsetof(struct hinic_vport_stats, _stat_item) \ 1212 } 1213 1214 static struct hinic_stats hinic_function_stats[] = { 1215 HINIC_FUNC_STAT(tx_unicast_pkts_vport), 1216 HINIC_FUNC_STAT(tx_unicast_bytes_vport), 1217 HINIC_FUNC_STAT(tx_multicast_pkts_vport), 1218 HINIC_FUNC_STAT(tx_multicast_bytes_vport), 1219 HINIC_FUNC_STAT(tx_broadcast_pkts_vport), 1220 HINIC_FUNC_STAT(tx_broadcast_bytes_vport), 1221 1222 HINIC_FUNC_STAT(rx_unicast_pkts_vport), 1223 HINIC_FUNC_STAT(rx_unicast_bytes_vport), 1224 HINIC_FUNC_STAT(rx_multicast_pkts_vport), 1225 HINIC_FUNC_STAT(rx_multicast_bytes_vport), 1226 HINIC_FUNC_STAT(rx_broadcast_pkts_vport), 1227 HINIC_FUNC_STAT(rx_broadcast_bytes_vport), 1228 1229 HINIC_FUNC_STAT(tx_discard_vport), 1230 HINIC_FUNC_STAT(rx_discard_vport), 1231 HINIC_FUNC_STAT(tx_err_vport), 1232 HINIC_FUNC_STAT(rx_err_vport), 1233 }; 1234 1235 static char hinic_test_strings[][ETH_GSTRING_LEN] = { 1236 "Internal lb test (on/offline)", 1237 "External lb test (external_lb)", 1238 }; 1239 1240 #define HINIC_PORT_STAT(_stat_item) { \ 1241 .name = #_stat_item, \ 1242 .size = sizeof_field(struct hinic_phy_port_stats, _stat_item), \ 1243 .offset = offsetof(struct hinic_phy_port_stats, _stat_item) \ 1244 } 1245 1246 static struct hinic_stats hinic_port_stats[] = { 1247 HINIC_PORT_STAT(mac_rx_total_pkt_num), 1248 HINIC_PORT_STAT(mac_rx_total_oct_num), 1249 HINIC_PORT_STAT(mac_rx_bad_pkt_num), 1250 HINIC_PORT_STAT(mac_rx_bad_oct_num), 1251 HINIC_PORT_STAT(mac_rx_good_pkt_num), 1252 HINIC_PORT_STAT(mac_rx_good_oct_num), 1253 HINIC_PORT_STAT(mac_rx_uni_pkt_num), 1254 HINIC_PORT_STAT(mac_rx_multi_pkt_num), 1255 HINIC_PORT_STAT(mac_rx_broad_pkt_num), 1256 HINIC_PORT_STAT(mac_tx_total_pkt_num), 1257 HINIC_PORT_STAT(mac_tx_total_oct_num), 1258 HINIC_PORT_STAT(mac_tx_bad_pkt_num), 1259 HINIC_PORT_STAT(mac_tx_bad_oct_num), 1260 HINIC_PORT_STAT(mac_tx_good_pkt_num), 1261 HINIC_PORT_STAT(mac_tx_good_oct_num), 1262 HINIC_PORT_STAT(mac_tx_uni_pkt_num), 1263 HINIC_PORT_STAT(mac_tx_multi_pkt_num), 1264 HINIC_PORT_STAT(mac_tx_broad_pkt_num), 1265 HINIC_PORT_STAT(mac_rx_fragment_pkt_num), 1266 HINIC_PORT_STAT(mac_rx_undersize_pkt_num), 1267 HINIC_PORT_STAT(mac_rx_undermin_pkt_num), 1268 HINIC_PORT_STAT(mac_rx_64_oct_pkt_num), 1269 HINIC_PORT_STAT(mac_rx_65_127_oct_pkt_num), 1270 HINIC_PORT_STAT(mac_rx_128_255_oct_pkt_num), 1271 HINIC_PORT_STAT(mac_rx_256_511_oct_pkt_num), 1272 HINIC_PORT_STAT(mac_rx_512_1023_oct_pkt_num), 1273 HINIC_PORT_STAT(mac_rx_1024_1518_oct_pkt_num), 1274 HINIC_PORT_STAT(mac_rx_1519_2047_oct_pkt_num), 1275 HINIC_PORT_STAT(mac_rx_2048_4095_oct_pkt_num), 1276 HINIC_PORT_STAT(mac_rx_4096_8191_oct_pkt_num), 1277 HINIC_PORT_STAT(mac_rx_8192_9216_oct_pkt_num), 1278 HINIC_PORT_STAT(mac_rx_9217_12287_oct_pkt_num), 1279 HINIC_PORT_STAT(mac_rx_12288_16383_oct_pkt_num), 1280 HINIC_PORT_STAT(mac_rx_1519_max_good_pkt_num), 1281 HINIC_PORT_STAT(mac_rx_1519_max_bad_pkt_num), 1282 HINIC_PORT_STAT(mac_rx_oversize_pkt_num), 1283 HINIC_PORT_STAT(mac_rx_jabber_pkt_num), 1284 HINIC_PORT_STAT(mac_rx_pause_num), 1285 HINIC_PORT_STAT(mac_rx_pfc_pkt_num), 1286 HINIC_PORT_STAT(mac_rx_pfc_pri0_pkt_num), 1287 HINIC_PORT_STAT(mac_rx_pfc_pri1_pkt_num), 1288 HINIC_PORT_STAT(mac_rx_pfc_pri2_pkt_num), 1289 HINIC_PORT_STAT(mac_rx_pfc_pri3_pkt_num), 1290 HINIC_PORT_STAT(mac_rx_pfc_pri4_pkt_num), 1291 HINIC_PORT_STAT(mac_rx_pfc_pri5_pkt_num), 1292 HINIC_PORT_STAT(mac_rx_pfc_pri6_pkt_num), 1293 HINIC_PORT_STAT(mac_rx_pfc_pri7_pkt_num), 1294 HINIC_PORT_STAT(mac_rx_control_pkt_num), 1295 HINIC_PORT_STAT(mac_rx_sym_err_pkt_num), 1296 HINIC_PORT_STAT(mac_rx_fcs_err_pkt_num), 1297 HINIC_PORT_STAT(mac_rx_send_app_good_pkt_num), 1298 HINIC_PORT_STAT(mac_rx_send_app_bad_pkt_num), 1299 HINIC_PORT_STAT(mac_tx_fragment_pkt_num), 1300 HINIC_PORT_STAT(mac_tx_undersize_pkt_num), 1301 HINIC_PORT_STAT(mac_tx_undermin_pkt_num), 1302 HINIC_PORT_STAT(mac_tx_64_oct_pkt_num), 1303 HINIC_PORT_STAT(mac_tx_65_127_oct_pkt_num), 1304 HINIC_PORT_STAT(mac_tx_128_255_oct_pkt_num), 1305 HINIC_PORT_STAT(mac_tx_256_511_oct_pkt_num), 1306 HINIC_PORT_STAT(mac_tx_512_1023_oct_pkt_num), 1307 HINIC_PORT_STAT(mac_tx_1024_1518_oct_pkt_num), 1308 HINIC_PORT_STAT(mac_tx_1519_2047_oct_pkt_num), 1309 HINIC_PORT_STAT(mac_tx_2048_4095_oct_pkt_num), 1310 HINIC_PORT_STAT(mac_tx_4096_8191_oct_pkt_num), 1311 HINIC_PORT_STAT(mac_tx_8192_9216_oct_pkt_num), 1312 HINIC_PORT_STAT(mac_tx_9217_12287_oct_pkt_num), 1313 HINIC_PORT_STAT(mac_tx_12288_16383_oct_pkt_num), 1314 HINIC_PORT_STAT(mac_tx_1519_max_good_pkt_num), 1315 HINIC_PORT_STAT(mac_tx_1519_max_bad_pkt_num), 1316 HINIC_PORT_STAT(mac_tx_oversize_pkt_num), 1317 HINIC_PORT_STAT(mac_tx_jabber_pkt_num), 1318 HINIC_PORT_STAT(mac_tx_pause_num), 1319 HINIC_PORT_STAT(mac_tx_pfc_pkt_num), 1320 HINIC_PORT_STAT(mac_tx_pfc_pri0_pkt_num), 1321 HINIC_PORT_STAT(mac_tx_pfc_pri1_pkt_num), 1322 HINIC_PORT_STAT(mac_tx_pfc_pri2_pkt_num), 1323 HINIC_PORT_STAT(mac_tx_pfc_pri3_pkt_num), 1324 HINIC_PORT_STAT(mac_tx_pfc_pri4_pkt_num), 1325 HINIC_PORT_STAT(mac_tx_pfc_pri5_pkt_num), 1326 HINIC_PORT_STAT(mac_tx_pfc_pri6_pkt_num), 1327 HINIC_PORT_STAT(mac_tx_pfc_pri7_pkt_num), 1328 HINIC_PORT_STAT(mac_tx_control_pkt_num), 1329 HINIC_PORT_STAT(mac_tx_err_all_pkt_num), 1330 HINIC_PORT_STAT(mac_tx_from_app_good_pkt_num), 1331 HINIC_PORT_STAT(mac_tx_from_app_bad_pkt_num), 1332 }; 1333 1334 #define HINIC_TXQ_STAT(_stat_item) { \ 1335 .name = "txq%d_"#_stat_item, \ 1336 .size = sizeof_field(struct hinic_txq_stats, _stat_item), \ 1337 .offset = offsetof(struct hinic_txq_stats, _stat_item) \ 1338 } 1339 1340 static struct hinic_stats hinic_tx_queue_stats[] = { 1341 HINIC_TXQ_STAT(pkts), 1342 HINIC_TXQ_STAT(bytes), 1343 HINIC_TXQ_STAT(tx_busy), 1344 HINIC_TXQ_STAT(tx_wake), 1345 HINIC_TXQ_STAT(tx_dropped), 1346 HINIC_TXQ_STAT(big_frags_pkts), 1347 }; 1348 1349 #define HINIC_RXQ_STAT(_stat_item) { \ 1350 .name = "rxq%d_"#_stat_item, \ 1351 .size = sizeof_field(struct hinic_rxq_stats, _stat_item), \ 1352 .offset = offsetof(struct hinic_rxq_stats, _stat_item) \ 1353 } 1354 1355 static struct hinic_stats hinic_rx_queue_stats[] = { 1356 HINIC_RXQ_STAT(pkts), 1357 HINIC_RXQ_STAT(bytes), 1358 HINIC_RXQ_STAT(errors), 1359 HINIC_RXQ_STAT(csum_errors), 1360 HINIC_RXQ_STAT(other_errors), 1361 }; 1362 1363 static void get_drv_queue_stats(struct hinic_dev *nic_dev, u64 *data) 1364 { 1365 struct hinic_txq_stats txq_stats; 1366 struct hinic_rxq_stats rxq_stats; 1367 u16 i = 0, j = 0, qid = 0; 1368 char *p; 1369 1370 for (qid = 0; qid < nic_dev->num_qps; qid++) { 1371 if (!nic_dev->txqs) 1372 break; 1373 1374 hinic_txq_get_stats(&nic_dev->txqs[qid], &txq_stats); 1375 for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++, i++) { 1376 p = (char *)&txq_stats + 1377 hinic_tx_queue_stats[j].offset; 1378 data[i] = (hinic_tx_queue_stats[j].size == 1379 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1380 } 1381 } 1382 1383 for (qid = 0; qid < nic_dev->num_qps; qid++) { 1384 if (!nic_dev->rxqs) 1385 break; 1386 1387 hinic_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats); 1388 for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++, i++) { 1389 p = (char *)&rxq_stats + 1390 hinic_rx_queue_stats[j].offset; 1391 data[i] = (hinic_rx_queue_stats[j].size == 1392 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1393 } 1394 } 1395 } 1396 1397 static void hinic_get_ethtool_stats(struct net_device *netdev, 1398 struct ethtool_stats *stats, u64 *data) 1399 { 1400 struct hinic_dev *nic_dev = netdev_priv(netdev); 1401 struct hinic_vport_stats vport_stats = {0}; 1402 struct hinic_phy_port_stats *port_stats; 1403 u16 i = 0, j = 0; 1404 char *p; 1405 int err; 1406 1407 err = hinic_get_vport_stats(nic_dev, &vport_stats); 1408 if (err) 1409 netif_err(nic_dev, drv, netdev, 1410 "Failed to get vport stats from firmware\n"); 1411 1412 for (j = 0; j < ARRAY_LEN(hinic_function_stats); j++, i++) { 1413 p = (char *)&vport_stats + hinic_function_stats[j].offset; 1414 data[i] = (hinic_function_stats[j].size == 1415 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1416 } 1417 1418 port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL); 1419 if (!port_stats) { 1420 memset(&data[i], 0, 1421 ARRAY_LEN(hinic_port_stats) * sizeof(*data)); 1422 i += ARRAY_LEN(hinic_port_stats); 1423 goto get_drv_stats; 1424 } 1425 1426 err = hinic_get_phy_port_stats(nic_dev, port_stats); 1427 if (err) 1428 netif_err(nic_dev, drv, netdev, 1429 "Failed to get port stats from firmware\n"); 1430 1431 for (j = 0; j < ARRAY_LEN(hinic_port_stats); j++, i++) { 1432 p = (char *)port_stats + hinic_port_stats[j].offset; 1433 data[i] = (hinic_port_stats[j].size == 1434 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1435 } 1436 1437 kfree(port_stats); 1438 1439 get_drv_stats: 1440 get_drv_queue_stats(nic_dev, data + i); 1441 } 1442 1443 static int hinic_get_sset_count(struct net_device *netdev, int sset) 1444 { 1445 struct hinic_dev *nic_dev = netdev_priv(netdev); 1446 int count, q_num; 1447 1448 switch (sset) { 1449 case ETH_SS_TEST: 1450 return ARRAY_LEN(hinic_test_strings); 1451 case ETH_SS_STATS: 1452 q_num = nic_dev->num_qps; 1453 count = ARRAY_LEN(hinic_function_stats) + 1454 (ARRAY_LEN(hinic_tx_queue_stats) + 1455 ARRAY_LEN(hinic_rx_queue_stats)) * q_num; 1456 1457 count += ARRAY_LEN(hinic_port_stats); 1458 1459 return count; 1460 default: 1461 return -EOPNOTSUPP; 1462 } 1463 } 1464 1465 static void hinic_get_strings(struct net_device *netdev, 1466 u32 stringset, u8 *data) 1467 { 1468 struct hinic_dev *nic_dev = netdev_priv(netdev); 1469 char *p = (char *)data; 1470 u16 i, j; 1471 1472 switch (stringset) { 1473 case ETH_SS_TEST: 1474 memcpy(data, *hinic_test_strings, sizeof(hinic_test_strings)); 1475 return; 1476 case ETH_SS_STATS: 1477 for (i = 0; i < ARRAY_LEN(hinic_function_stats); i++) { 1478 memcpy(p, hinic_function_stats[i].name, 1479 ETH_GSTRING_LEN); 1480 p += ETH_GSTRING_LEN; 1481 } 1482 1483 for (i = 0; i < ARRAY_LEN(hinic_port_stats); i++) { 1484 memcpy(p, hinic_port_stats[i].name, 1485 ETH_GSTRING_LEN); 1486 p += ETH_GSTRING_LEN; 1487 } 1488 1489 for (i = 0; i < nic_dev->num_qps; i++) { 1490 for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++) { 1491 sprintf(p, hinic_tx_queue_stats[j].name, i); 1492 p += ETH_GSTRING_LEN; 1493 } 1494 } 1495 1496 for (i = 0; i < nic_dev->num_qps; i++) { 1497 for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++) { 1498 sprintf(p, hinic_rx_queue_stats[j].name, i); 1499 p += ETH_GSTRING_LEN; 1500 } 1501 } 1502 1503 return; 1504 default: 1505 return; 1506 } 1507 } 1508 1509 static int hinic_run_lp_test(struct hinic_dev *nic_dev, u32 test_time) 1510 { 1511 u8 *lb_test_rx_buf = nic_dev->lb_test_rx_buf; 1512 struct net_device *netdev = nic_dev->netdev; 1513 struct sk_buff *skb_tmp = NULL; 1514 struct sk_buff *skb = NULL; 1515 u32 cnt = test_time * 5; 1516 u8 *test_data = NULL; 1517 u32 i; 1518 u8 j; 1519 1520 skb_tmp = alloc_skb(LP_PKT_LEN, GFP_ATOMIC); 1521 if (!skb_tmp) 1522 return -ENOMEM; 1523 1524 test_data = __skb_put(skb_tmp, LP_PKT_LEN); 1525 1526 memset(test_data, 0xFF, 2 * ETH_ALEN); 1527 test_data[ETH_ALEN] = 0xFE; 1528 test_data[2 * ETH_ALEN] = 0x08; 1529 test_data[2 * ETH_ALEN + 1] = 0x0; 1530 1531 for (i = ETH_HLEN; i < LP_PKT_LEN; i++) 1532 test_data[i] = i & 0xFF; 1533 1534 skb_tmp->queue_mapping = 0; 1535 skb_tmp->ip_summed = CHECKSUM_COMPLETE; 1536 skb_tmp->dev = netdev; 1537 1538 for (i = 0; i < cnt; i++) { 1539 nic_dev->lb_test_rx_idx = 0; 1540 memset(lb_test_rx_buf, 0, LP_PKT_CNT * LP_PKT_LEN); 1541 1542 for (j = 0; j < LP_PKT_CNT; j++) { 1543 skb = pskb_copy(skb_tmp, GFP_ATOMIC); 1544 if (!skb) { 1545 dev_kfree_skb_any(skb_tmp); 1546 netif_err(nic_dev, drv, netdev, 1547 "Copy skb failed for loopback test\n"); 1548 return -ENOMEM; 1549 } 1550 1551 /* mark index for every pkt */ 1552 skb->data[LP_PKT_LEN - 1] = j; 1553 1554 if (hinic_lb_xmit_frame(skb, netdev)) { 1555 dev_kfree_skb_any(skb); 1556 dev_kfree_skb_any(skb_tmp); 1557 netif_err(nic_dev, drv, netdev, 1558 "Xmit pkt failed for loopback test\n"); 1559 return -EBUSY; 1560 } 1561 } 1562 1563 /* wait till all pkts received to RX buffer */ 1564 msleep(200); 1565 1566 for (j = 0; j < LP_PKT_CNT; j++) { 1567 if (memcmp(lb_test_rx_buf + j * LP_PKT_LEN, 1568 skb_tmp->data, LP_PKT_LEN - 1) || 1569 (*(lb_test_rx_buf + j * LP_PKT_LEN + 1570 LP_PKT_LEN - 1) != j)) { 1571 dev_kfree_skb_any(skb_tmp); 1572 netif_err(nic_dev, drv, netdev, 1573 "Compare pkt failed in loopback test(index=0x%02x, data[%d]=0x%02x)\n", 1574 j + i * LP_PKT_CNT, 1575 LP_PKT_LEN - 1, 1576 *(lb_test_rx_buf + j * LP_PKT_LEN + 1577 LP_PKT_LEN - 1)); 1578 return -EIO; 1579 } 1580 } 1581 } 1582 1583 dev_kfree_skb_any(skb_tmp); 1584 return 0; 1585 } 1586 1587 static int do_lp_test(struct hinic_dev *nic_dev, u32 flags, u32 test_time, 1588 enum diag_test_index *test_index) 1589 { 1590 struct net_device *netdev = nic_dev->netdev; 1591 u8 *lb_test_rx_buf = NULL; 1592 int err = 0; 1593 1594 if (!(flags & ETH_TEST_FL_EXTERNAL_LB)) { 1595 *test_index = INTERNAL_LP_TEST; 1596 if (hinic_set_loopback_mode(nic_dev->hwdev, 1597 HINIC_INTERNAL_LP_MODE, true)) { 1598 netif_err(nic_dev, drv, netdev, 1599 "Failed to set port loopback mode before loopback test\n"); 1600 return -EIO; 1601 } 1602 } else { 1603 *test_index = EXTERNAL_LP_TEST; 1604 } 1605 1606 lb_test_rx_buf = vmalloc(LP_PKT_CNT * LP_PKT_LEN); 1607 if (!lb_test_rx_buf) { 1608 err = -ENOMEM; 1609 } else { 1610 nic_dev->lb_test_rx_buf = lb_test_rx_buf; 1611 nic_dev->lb_pkt_len = LP_PKT_LEN; 1612 nic_dev->flags |= HINIC_LP_TEST; 1613 err = hinic_run_lp_test(nic_dev, test_time); 1614 nic_dev->flags &= ~HINIC_LP_TEST; 1615 msleep(100); 1616 vfree(lb_test_rx_buf); 1617 nic_dev->lb_test_rx_buf = NULL; 1618 } 1619 1620 if (!(flags & ETH_TEST_FL_EXTERNAL_LB)) { 1621 if (hinic_set_loopback_mode(nic_dev->hwdev, 1622 HINIC_INTERNAL_LP_MODE, false)) { 1623 netif_err(nic_dev, drv, netdev, 1624 "Failed to cancel port loopback mode after loopback test\n"); 1625 err = -EIO; 1626 } 1627 } 1628 1629 return err; 1630 } 1631 1632 static void hinic_diag_test(struct net_device *netdev, 1633 struct ethtool_test *eth_test, u64 *data) 1634 { 1635 struct hinic_dev *nic_dev = netdev_priv(netdev); 1636 enum hinic_port_link_state link_state; 1637 enum diag_test_index test_index = 0; 1638 int err = 0; 1639 1640 memset(data, 0, DIAG_TEST_MAX * sizeof(u64)); 1641 1642 /* don't support loopback test when netdev is closed. */ 1643 if (!(nic_dev->flags & HINIC_INTF_UP)) { 1644 netif_err(nic_dev, drv, netdev, 1645 "Do not support loopback test when netdev is closed\n"); 1646 eth_test->flags |= ETH_TEST_FL_FAILED; 1647 data[PORT_DOWN_ERR_IDX] = 1; 1648 return; 1649 } 1650 1651 netif_carrier_off(netdev); 1652 netif_tx_disable(netdev); 1653 1654 err = do_lp_test(nic_dev, eth_test->flags, LP_DEFAULT_TIME, 1655 &test_index); 1656 if (err) { 1657 eth_test->flags |= ETH_TEST_FL_FAILED; 1658 data[test_index] = 1; 1659 } 1660 1661 netif_tx_wake_all_queues(netdev); 1662 1663 err = hinic_port_link_state(nic_dev, &link_state); 1664 if (!err && link_state == HINIC_LINK_STATE_UP) 1665 netif_carrier_on(netdev); 1666 1667 } 1668 1669 static int hinic_set_phys_id(struct net_device *netdev, 1670 enum ethtool_phys_id_state state) 1671 { 1672 struct hinic_dev *nic_dev = netdev_priv(netdev); 1673 int err = 0; 1674 u8 port; 1675 1676 port = nic_dev->hwdev->port_id; 1677 1678 switch (state) { 1679 case ETHTOOL_ID_ACTIVE: 1680 err = hinic_set_led_status(nic_dev->hwdev, port, 1681 HINIC_LED_TYPE_LINK, 1682 HINIC_LED_MODE_FORCE_2HZ); 1683 if (err) 1684 netif_err(nic_dev, drv, netdev, 1685 "Set LED blinking in 2HZ failed\n"); 1686 break; 1687 1688 case ETHTOOL_ID_INACTIVE: 1689 err = hinic_reset_led_status(nic_dev->hwdev, port); 1690 if (err) 1691 netif_err(nic_dev, drv, netdev, 1692 "Reset LED to original status failed\n"); 1693 break; 1694 1695 default: 1696 return -EOPNOTSUPP; 1697 } 1698 1699 return err; 1700 } 1701 1702 static int hinic_get_module_info(struct net_device *netdev, 1703 struct ethtool_modinfo *modinfo) 1704 { 1705 struct hinic_dev *nic_dev = netdev_priv(netdev); 1706 u8 sfp_type_ext; 1707 u8 sfp_type; 1708 int err; 1709 1710 err = hinic_get_sfp_type(nic_dev->hwdev, &sfp_type, &sfp_type_ext); 1711 if (err) 1712 return err; 1713 1714 switch (sfp_type) { 1715 case SFF8024_ID_SFP: 1716 modinfo->type = ETH_MODULE_SFF_8472; 1717 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 1718 break; 1719 case SFF8024_ID_QSFP_8438: 1720 modinfo->type = ETH_MODULE_SFF_8436; 1721 modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; 1722 break; 1723 case SFF8024_ID_QSFP_8436_8636: 1724 if (sfp_type_ext >= 0x3) { 1725 modinfo->type = ETH_MODULE_SFF_8636; 1726 modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; 1727 1728 } else { 1729 modinfo->type = ETH_MODULE_SFF_8436; 1730 modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; 1731 } 1732 break; 1733 case SFF8024_ID_QSFP28_8636: 1734 modinfo->type = ETH_MODULE_SFF_8636; 1735 modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; 1736 break; 1737 default: 1738 netif_warn(nic_dev, drv, netdev, 1739 "Optical module unknown: 0x%x\n", sfp_type); 1740 return -EINVAL; 1741 } 1742 1743 return 0; 1744 } 1745 1746 static int hinic_get_module_eeprom(struct net_device *netdev, 1747 struct ethtool_eeprom *ee, u8 *data) 1748 { 1749 struct hinic_dev *nic_dev = netdev_priv(netdev); 1750 u8 sfp_data[STD_SFP_INFO_MAX_SIZE]; 1751 u16 len; 1752 int err; 1753 1754 if (!ee->len || ((ee->len + ee->offset) > STD_SFP_INFO_MAX_SIZE)) 1755 return -EINVAL; 1756 1757 memset(data, 0, ee->len); 1758 1759 err = hinic_get_sfp_eeprom(nic_dev->hwdev, sfp_data, &len); 1760 if (err) 1761 return err; 1762 1763 memcpy(data, sfp_data + ee->offset, ee->len); 1764 1765 return 0; 1766 } 1767 1768 static int 1769 hinic_get_link_ext_state(struct net_device *netdev, 1770 struct ethtool_link_ext_state_info *link_ext_state_info) 1771 { 1772 struct hinic_dev *nic_dev = netdev_priv(netdev); 1773 1774 if (netif_carrier_ok(netdev)) 1775 return -ENODATA; 1776 1777 if (nic_dev->cable_unplugged) 1778 link_ext_state_info->link_ext_state = 1779 ETHTOOL_LINK_EXT_STATE_NO_CABLE; 1780 else if (nic_dev->module_unrecognized) 1781 link_ext_state_info->link_ext_state = 1782 ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH; 1783 1784 return 0; 1785 } 1786 1787 static const struct ethtool_ops hinic_ethtool_ops = { 1788 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | 1789 ETHTOOL_COALESCE_RX_MAX_FRAMES | 1790 ETHTOOL_COALESCE_TX_USECS | 1791 ETHTOOL_COALESCE_TX_MAX_FRAMES, 1792 1793 .get_link_ksettings = hinic_get_link_ksettings, 1794 .set_link_ksettings = hinic_set_link_ksettings, 1795 .get_drvinfo = hinic_get_drvinfo, 1796 .get_link = ethtool_op_get_link, 1797 .get_link_ext_state = hinic_get_link_ext_state, 1798 .get_ringparam = hinic_get_ringparam, 1799 .set_ringparam = hinic_set_ringparam, 1800 .get_coalesce = hinic_get_coalesce, 1801 .set_coalesce = hinic_set_coalesce, 1802 .get_per_queue_coalesce = hinic_get_per_queue_coalesce, 1803 .set_per_queue_coalesce = hinic_set_per_queue_coalesce, 1804 .get_pauseparam = hinic_get_pauseparam, 1805 .set_pauseparam = hinic_set_pauseparam, 1806 .get_channels = hinic_get_channels, 1807 .set_channels = hinic_set_channels, 1808 .get_rxnfc = hinic_get_rxnfc, 1809 .set_rxnfc = hinic_set_rxnfc, 1810 .get_rxfh_key_size = hinic_get_rxfh_key_size, 1811 .get_rxfh_indir_size = hinic_get_rxfh_indir_size, 1812 .get_rxfh = hinic_get_rxfh, 1813 .set_rxfh = hinic_set_rxfh, 1814 .get_sset_count = hinic_get_sset_count, 1815 .get_ethtool_stats = hinic_get_ethtool_stats, 1816 .get_strings = hinic_get_strings, 1817 .self_test = hinic_diag_test, 1818 .set_phys_id = hinic_set_phys_id, 1819 .get_module_info = hinic_get_module_info, 1820 .get_module_eeprom = hinic_get_module_eeprom, 1821 }; 1822 1823 static const struct ethtool_ops hinicvf_ethtool_ops = { 1824 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | 1825 ETHTOOL_COALESCE_RX_MAX_FRAMES | 1826 ETHTOOL_COALESCE_TX_USECS | 1827 ETHTOOL_COALESCE_TX_MAX_FRAMES, 1828 1829 .get_link_ksettings = hinic_get_link_ksettings, 1830 .get_drvinfo = hinic_get_drvinfo, 1831 .get_link = ethtool_op_get_link, 1832 .get_ringparam = hinic_get_ringparam, 1833 .set_ringparam = hinic_set_ringparam, 1834 .get_coalesce = hinic_get_coalesce, 1835 .set_coalesce = hinic_set_coalesce, 1836 .get_per_queue_coalesce = hinic_get_per_queue_coalesce, 1837 .set_per_queue_coalesce = hinic_set_per_queue_coalesce, 1838 .get_channels = hinic_get_channels, 1839 .set_channels = hinic_set_channels, 1840 .get_rxnfc = hinic_get_rxnfc, 1841 .set_rxnfc = hinic_set_rxnfc, 1842 .get_rxfh_key_size = hinic_get_rxfh_key_size, 1843 .get_rxfh_indir_size = hinic_get_rxfh_indir_size, 1844 .get_rxfh = hinic_get_rxfh, 1845 .set_rxfh = hinic_set_rxfh, 1846 .get_sset_count = hinic_get_sset_count, 1847 .get_ethtool_stats = hinic_get_ethtool_stats, 1848 .get_strings = hinic_get_strings, 1849 }; 1850 1851 void hinic_set_ethtool_ops(struct net_device *netdev) 1852 { 1853 struct hinic_dev *nic_dev = netdev_priv(netdev); 1854 1855 if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) 1856 netdev->ethtool_ops = &hinic_ethtool_ops; 1857 else 1858 netdev->ethtool_ops = &hinicvf_ethtool_ops; 1859 } 1860