1 // SPDX-License-Identifier: GPL-2.0 2 /* Huawei HiNIC PCI Express Linux driver 3 * Copyright(c) 2017 Huawei Technologies Co., Ltd 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * for more details. 13 * 14 */ 15 16 #include <linux/kernel.h> 17 #include <linux/pci.h> 18 #include <linux/device.h> 19 #include <linux/module.h> 20 #include <linux/types.h> 21 #include <linux/errno.h> 22 #include <linux/interrupt.h> 23 #include <linux/etherdevice.h> 24 #include <linux/netdevice.h> 25 #include <linux/if_vlan.h> 26 #include <linux/ethtool.h> 27 #include <linux/vmalloc.h> 28 #include <linux/sfp.h> 29 30 #include "hinic_hw_qp.h" 31 #include "hinic_hw_dev.h" 32 #include "hinic_port.h" 33 #include "hinic_tx.h" 34 #include "hinic_rx.h" 35 #include "hinic_dev.h" 36 37 #define SET_LINK_STR_MAX_LEN 16 38 39 #define GET_SUPPORTED_MODE 0 40 #define GET_ADVERTISED_MODE 1 41 42 #define ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE(ecmd, mode) \ 43 ((ecmd)->supported |= \ 44 (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit)) 45 #define ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE(ecmd, mode) \ 46 ((ecmd)->advertising |= \ 47 (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit)) 48 #define ETHTOOL_ADD_SUPPORTED_LINK_MODE(ecmd, mode) \ 49 ((ecmd)->supported |= SUPPORTED_##mode) 50 #define ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode) \ 51 ((ecmd)->advertising |= ADVERTISED_##mode) 52 53 #define COALESCE_PENDING_LIMIT_UNIT 8 54 #define COALESCE_TIMER_CFG_UNIT 9 55 #define COALESCE_ALL_QUEUE 0xFFFF 56 #define COALESCE_MAX_PENDING_LIMIT (255 * COALESCE_PENDING_LIMIT_UNIT) 57 #define COALESCE_MAX_TIMER_CFG (255 * COALESCE_TIMER_CFG_UNIT) 58 #define OBJ_STR_MAX_LEN 32 59 60 struct hw2ethtool_link_mode { 61 enum ethtool_link_mode_bit_indices link_mode_bit; 62 u32 speed; 63 enum hinic_link_mode hw_link_mode; 64 }; 65 66 struct cmd_link_settings { 67 u64 supported; 68 u64 advertising; 69 70 u32 speed; 71 u8 duplex; 72 u8 port; 73 u8 autoneg; 74 }; 75 76 static u32 hw_to_ethtool_speed[LINK_SPEED_LEVELS] = { 77 SPEED_10, SPEED_100, 78 SPEED_1000, SPEED_10000, 79 SPEED_25000, SPEED_40000, 80 SPEED_100000 81 }; 82 83 static struct hw2ethtool_link_mode 84 hw_to_ethtool_link_mode_table[HINIC_LINK_MODE_NUMBERS] = { 85 { 86 .link_mode_bit = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 87 .speed = SPEED_10000, 88 .hw_link_mode = HINIC_10GE_BASE_KR, 89 }, 90 { 91 .link_mode_bit = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 92 .speed = SPEED_40000, 93 .hw_link_mode = HINIC_40GE_BASE_KR4, 94 }, 95 { 96 .link_mode_bit = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 97 .speed = SPEED_40000, 98 .hw_link_mode = HINIC_40GE_BASE_CR4, 99 }, 100 { 101 .link_mode_bit = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 102 .speed = SPEED_100000, 103 .hw_link_mode = HINIC_100GE_BASE_KR4, 104 }, 105 { 106 .link_mode_bit = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 107 .speed = SPEED_100000, 108 .hw_link_mode = HINIC_100GE_BASE_CR4, 109 }, 110 { 111 .link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 112 .speed = SPEED_25000, 113 .hw_link_mode = HINIC_25GE_BASE_KR_S, 114 }, 115 { 116 .link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 117 .speed = SPEED_25000, 118 .hw_link_mode = HINIC_25GE_BASE_CR_S, 119 }, 120 { 121 .link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 122 .speed = SPEED_25000, 123 .hw_link_mode = HINIC_25GE_BASE_KR, 124 }, 125 { 126 .link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 127 .speed = SPEED_25000, 128 .hw_link_mode = HINIC_25GE_BASE_CR, 129 }, 130 { 131 .link_mode_bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 132 .speed = SPEED_1000, 133 .hw_link_mode = HINIC_GE_BASE_KX, 134 }, 135 }; 136 137 #define LP_DEFAULT_TIME 5 /* seconds */ 138 #define LP_PKT_LEN 1514 139 140 #define PORT_DOWN_ERR_IDX 0 141 enum diag_test_index { 142 INTERNAL_LP_TEST = 0, 143 EXTERNAL_LP_TEST = 1, 144 DIAG_TEST_MAX = 2, 145 }; 146 147 static void set_link_speed(struct ethtool_link_ksettings *link_ksettings, 148 enum hinic_speed speed) 149 { 150 switch (speed) { 151 case HINIC_SPEED_10MB_LINK: 152 link_ksettings->base.speed = SPEED_10; 153 break; 154 155 case HINIC_SPEED_100MB_LINK: 156 link_ksettings->base.speed = SPEED_100; 157 break; 158 159 case HINIC_SPEED_1000MB_LINK: 160 link_ksettings->base.speed = SPEED_1000; 161 break; 162 163 case HINIC_SPEED_10GB_LINK: 164 link_ksettings->base.speed = SPEED_10000; 165 break; 166 167 case HINIC_SPEED_25GB_LINK: 168 link_ksettings->base.speed = SPEED_25000; 169 break; 170 171 case HINIC_SPEED_40GB_LINK: 172 link_ksettings->base.speed = SPEED_40000; 173 break; 174 175 case HINIC_SPEED_100GB_LINK: 176 link_ksettings->base.speed = SPEED_100000; 177 break; 178 179 default: 180 link_ksettings->base.speed = SPEED_UNKNOWN; 181 break; 182 } 183 } 184 185 static int hinic_get_link_mode_index(enum hinic_link_mode link_mode) 186 { 187 int i = 0; 188 189 for (i = 0; i < HINIC_LINK_MODE_NUMBERS; i++) { 190 if (link_mode == hw_to_ethtool_link_mode_table[i].hw_link_mode) 191 break; 192 } 193 194 return i; 195 } 196 197 static void hinic_add_ethtool_link_mode(struct cmd_link_settings *link_settings, 198 enum hinic_link_mode hw_link_mode, 199 u32 name) 200 { 201 enum hinic_link_mode link_mode; 202 int idx = 0; 203 204 for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) { 205 if (hw_link_mode & ((u32)1 << link_mode)) { 206 idx = hinic_get_link_mode_index(link_mode); 207 if (idx >= HINIC_LINK_MODE_NUMBERS) 208 continue; 209 210 if (name == GET_SUPPORTED_MODE) 211 ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE 212 (link_settings, idx); 213 else 214 ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE 215 (link_settings, idx); 216 } 217 } 218 } 219 220 static void hinic_link_port_type(struct cmd_link_settings *link_settings, 221 enum hinic_port_type port_type) 222 { 223 switch (port_type) { 224 case HINIC_PORT_ELEC: 225 case HINIC_PORT_TP: 226 ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, TP); 227 ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, TP); 228 link_settings->port = PORT_TP; 229 break; 230 231 case HINIC_PORT_AOC: 232 case HINIC_PORT_FIBRE: 233 ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE); 234 ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE); 235 link_settings->port = PORT_FIBRE; 236 break; 237 238 case HINIC_PORT_COPPER: 239 ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE); 240 ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE); 241 link_settings->port = PORT_DA; 242 break; 243 244 case HINIC_PORT_BACKPLANE: 245 ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Backplane); 246 ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Backplane); 247 link_settings->port = PORT_NONE; 248 break; 249 250 default: 251 link_settings->port = PORT_OTHER; 252 break; 253 } 254 } 255 256 static int hinic_get_link_ksettings(struct net_device *netdev, 257 struct ethtool_link_ksettings 258 *link_ksettings) 259 { 260 struct hinic_dev *nic_dev = netdev_priv(netdev); 261 struct hinic_link_mode_cmd link_mode = { 0 }; 262 struct hinic_pause_config pause_info = { 0 }; 263 struct cmd_link_settings settings = { 0 }; 264 enum hinic_port_link_state link_state; 265 struct hinic_port_cap port_cap; 266 int err; 267 268 ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); 269 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); 270 271 link_ksettings->base.speed = SPEED_UNKNOWN; 272 link_ksettings->base.autoneg = AUTONEG_DISABLE; 273 link_ksettings->base.duplex = DUPLEX_UNKNOWN; 274 275 err = hinic_port_get_cap(nic_dev, &port_cap); 276 if (err) 277 return err; 278 279 hinic_link_port_type(&settings, port_cap.port_type); 280 link_ksettings->base.port = settings.port; 281 282 err = hinic_port_link_state(nic_dev, &link_state); 283 if (err) 284 return err; 285 286 if (link_state == HINIC_LINK_STATE_UP) { 287 set_link_speed(link_ksettings, port_cap.speed); 288 link_ksettings->base.duplex = 289 (port_cap.duplex == HINIC_DUPLEX_FULL) ? 290 DUPLEX_FULL : DUPLEX_HALF; 291 } 292 293 if (!!(port_cap.autoneg_cap & HINIC_AUTONEG_SUPPORTED)) 294 ethtool_link_ksettings_add_link_mode(link_ksettings, 295 advertising, Autoneg); 296 297 if (port_cap.autoneg_state == HINIC_AUTONEG_ACTIVE) 298 link_ksettings->base.autoneg = AUTONEG_ENABLE; 299 300 err = hinic_get_link_mode(nic_dev->hwdev, &link_mode); 301 if (err || link_mode.supported == HINIC_SUPPORTED_UNKNOWN || 302 link_mode.advertised == HINIC_SUPPORTED_UNKNOWN) 303 return -EIO; 304 305 hinic_add_ethtool_link_mode(&settings, link_mode.supported, 306 GET_SUPPORTED_MODE); 307 hinic_add_ethtool_link_mode(&settings, link_mode.advertised, 308 GET_ADVERTISED_MODE); 309 310 if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) { 311 err = hinic_get_hw_pause_info(nic_dev->hwdev, &pause_info); 312 if (err) 313 return err; 314 ETHTOOL_ADD_SUPPORTED_LINK_MODE(&settings, Pause); 315 if (pause_info.rx_pause && pause_info.tx_pause) { 316 ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause); 317 } else if (pause_info.tx_pause) { 318 ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause); 319 } else if (pause_info.rx_pause) { 320 ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause); 321 ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause); 322 } 323 } 324 325 linkmode_copy(link_ksettings->link_modes.supported, 326 (unsigned long *)&settings.supported); 327 linkmode_copy(link_ksettings->link_modes.advertising, 328 (unsigned long *)&settings.advertising); 329 330 return 0; 331 } 332 333 static int hinic_ethtool_to_hw_speed_level(u32 speed) 334 { 335 int i; 336 337 for (i = 0; i < LINK_SPEED_LEVELS; i++) { 338 if (hw_to_ethtool_speed[i] == speed) 339 break; 340 } 341 342 return i; 343 } 344 345 static bool hinic_is_support_speed(enum hinic_link_mode supported_link, 346 u32 speed) 347 { 348 enum hinic_link_mode link_mode; 349 int idx; 350 351 for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) { 352 if (!(supported_link & ((u32)1 << link_mode))) 353 continue; 354 355 idx = hinic_get_link_mode_index(link_mode); 356 if (idx >= HINIC_LINK_MODE_NUMBERS) 357 continue; 358 359 if (hw_to_ethtool_link_mode_table[idx].speed == speed) 360 return true; 361 } 362 363 return false; 364 } 365 366 static bool hinic_is_speed_legal(struct hinic_dev *nic_dev, u32 speed) 367 { 368 struct hinic_link_mode_cmd link_mode = { 0 }; 369 struct net_device *netdev = nic_dev->netdev; 370 enum nic_speed_level speed_level = 0; 371 int err; 372 373 err = hinic_get_link_mode(nic_dev->hwdev, &link_mode); 374 if (err) 375 return false; 376 377 if (link_mode.supported == HINIC_SUPPORTED_UNKNOWN || 378 link_mode.advertised == HINIC_SUPPORTED_UNKNOWN) 379 return false; 380 381 speed_level = hinic_ethtool_to_hw_speed_level(speed); 382 if (speed_level >= LINK_SPEED_LEVELS || 383 !hinic_is_support_speed(link_mode.supported, speed)) { 384 netif_err(nic_dev, drv, netdev, 385 "Unsupported speed: %d\n", speed); 386 return false; 387 } 388 389 return true; 390 } 391 392 static int get_link_settings_type(struct hinic_dev *nic_dev, 393 u8 autoneg, u32 speed, u32 *set_settings) 394 { 395 struct hinic_port_cap port_cap = { 0 }; 396 int err; 397 398 err = hinic_port_get_cap(nic_dev, &port_cap); 399 if (err) 400 return err; 401 402 /* always set autonegotiation */ 403 if (port_cap.autoneg_cap) 404 *set_settings |= HILINK_LINK_SET_AUTONEG; 405 406 if (autoneg == AUTONEG_ENABLE) { 407 if (!port_cap.autoneg_cap) { 408 netif_err(nic_dev, drv, nic_dev->netdev, "Not support autoneg\n"); 409 return -EOPNOTSUPP; 410 } 411 } else if (speed != (u32)SPEED_UNKNOWN) { 412 /* set speed only when autoneg is disabled */ 413 if (!hinic_is_speed_legal(nic_dev, speed)) 414 return -EINVAL; 415 *set_settings |= HILINK_LINK_SET_SPEED; 416 } else { 417 netif_err(nic_dev, drv, nic_dev->netdev, "Need to set speed when autoneg is off\n"); 418 return -EOPNOTSUPP; 419 } 420 421 return 0; 422 } 423 424 static int set_link_settings_separate_cmd(struct hinic_dev *nic_dev, 425 u32 set_settings, u8 autoneg, 426 u32 speed) 427 { 428 enum nic_speed_level speed_level = 0; 429 int err = 0; 430 431 if (set_settings & HILINK_LINK_SET_AUTONEG) { 432 err = hinic_set_autoneg(nic_dev->hwdev, 433 (autoneg == AUTONEG_ENABLE)); 434 if (err) 435 netif_err(nic_dev, drv, nic_dev->netdev, "%s autoneg failed\n", 436 (autoneg == AUTONEG_ENABLE) ? 437 "Enable" : "Disable"); 438 else 439 netif_info(nic_dev, drv, nic_dev->netdev, "%s autoneg successfully\n", 440 (autoneg == AUTONEG_ENABLE) ? 441 "Enable" : "Disable"); 442 } 443 444 if (!err && (set_settings & HILINK_LINK_SET_SPEED)) { 445 speed_level = hinic_ethtool_to_hw_speed_level(speed); 446 err = hinic_set_speed(nic_dev->hwdev, speed_level); 447 if (err) 448 netif_err(nic_dev, drv, nic_dev->netdev, "Set speed %d failed\n", 449 speed); 450 else 451 netif_info(nic_dev, drv, nic_dev->netdev, "Set speed %d successfully\n", 452 speed); 453 } 454 455 return err; 456 } 457 458 static int hinic_set_settings_to_hw(struct hinic_dev *nic_dev, 459 u32 set_settings, u8 autoneg, u32 speed) 460 { 461 struct hinic_link_ksettings_info settings = {0}; 462 char set_link_str[SET_LINK_STR_MAX_LEN] = {0}; 463 const char *autoneg_str; 464 struct net_device *netdev = nic_dev->netdev; 465 enum nic_speed_level speed_level = 0; 466 int err; 467 468 autoneg_str = (set_settings & HILINK_LINK_SET_AUTONEG) ? 469 (autoneg ? "autong enable " : "autong disable ") : ""; 470 471 if (set_settings & HILINK_LINK_SET_SPEED) { 472 speed_level = hinic_ethtool_to_hw_speed_level(speed); 473 err = snprintf(set_link_str, SET_LINK_STR_MAX_LEN, 474 "speed %d ", speed); 475 if (err >= SET_LINK_STR_MAX_LEN) { 476 netif_err(nic_dev, drv, netdev, "Failed to snprintf link speed, function return(%d) and dest_len(%d)\n", 477 err, SET_LINK_STR_MAX_LEN); 478 return -EFAULT; 479 } 480 } 481 482 settings.func_id = HINIC_HWIF_FUNC_IDX(nic_dev->hwdev->hwif); 483 settings.valid_bitmap = set_settings; 484 settings.autoneg = autoneg; 485 settings.speed = speed_level; 486 487 err = hinic_set_link_settings(nic_dev->hwdev, &settings); 488 if (err != HINIC_MGMT_CMD_UNSUPPORTED) { 489 if (err) 490 netif_err(nic_dev, drv, netdev, "Set %s%sfailed\n", 491 autoneg_str, set_link_str); 492 else 493 netif_info(nic_dev, drv, netdev, "Set %s%ssuccessfully\n", 494 autoneg_str, set_link_str); 495 496 return err; 497 } 498 499 return set_link_settings_separate_cmd(nic_dev, set_settings, autoneg, 500 speed); 501 } 502 503 static int set_link_settings(struct net_device *netdev, u8 autoneg, u32 speed) 504 { 505 struct hinic_dev *nic_dev = netdev_priv(netdev); 506 u32 set_settings = 0; 507 int err; 508 509 err = get_link_settings_type(nic_dev, autoneg, speed, &set_settings); 510 if (err) 511 return err; 512 513 if (set_settings) 514 err = hinic_set_settings_to_hw(nic_dev, set_settings, 515 autoneg, speed); 516 else 517 netif_info(nic_dev, drv, netdev, "Nothing changed, exit without setting anything\n"); 518 519 return err; 520 } 521 522 static int hinic_set_link_ksettings(struct net_device *netdev, const struct 523 ethtool_link_ksettings *link_settings) 524 { 525 /* only support to set autoneg and speed */ 526 return set_link_settings(netdev, link_settings->base.autoneg, 527 link_settings->base.speed); 528 } 529 530 static void hinic_get_drvinfo(struct net_device *netdev, 531 struct ethtool_drvinfo *info) 532 { 533 struct hinic_dev *nic_dev = netdev_priv(netdev); 534 u8 mgmt_ver[HINIC_MGMT_VERSION_MAX_LEN] = {0}; 535 struct hinic_hwdev *hwdev = nic_dev->hwdev; 536 struct hinic_hwif *hwif = hwdev->hwif; 537 int err; 538 539 strscpy(info->driver, HINIC_DRV_NAME, sizeof(info->driver)); 540 strscpy(info->bus_info, pci_name(hwif->pdev), sizeof(info->bus_info)); 541 542 err = hinic_get_mgmt_version(nic_dev, mgmt_ver); 543 if (err) 544 return; 545 546 snprintf(info->fw_version, sizeof(info->fw_version), "%s", mgmt_ver); 547 } 548 549 static void hinic_get_ringparam(struct net_device *netdev, 550 struct ethtool_ringparam *ring) 551 { 552 struct hinic_dev *nic_dev = netdev_priv(netdev); 553 554 ring->rx_max_pending = HINIC_MAX_QUEUE_DEPTH; 555 ring->tx_max_pending = HINIC_MAX_QUEUE_DEPTH; 556 ring->rx_pending = nic_dev->rq_depth; 557 ring->tx_pending = nic_dev->sq_depth; 558 } 559 560 static int check_ringparam_valid(struct hinic_dev *nic_dev, 561 struct ethtool_ringparam *ring) 562 { 563 if (ring->rx_jumbo_pending || ring->rx_mini_pending) { 564 netif_err(nic_dev, drv, nic_dev->netdev, 565 "Unsupported rx_jumbo_pending/rx_mini_pending\n"); 566 return -EINVAL; 567 } 568 569 if (ring->tx_pending > HINIC_MAX_QUEUE_DEPTH || 570 ring->tx_pending < HINIC_MIN_QUEUE_DEPTH || 571 ring->rx_pending > HINIC_MAX_QUEUE_DEPTH || 572 ring->rx_pending < HINIC_MIN_QUEUE_DEPTH) { 573 netif_err(nic_dev, drv, nic_dev->netdev, 574 "Queue depth out of range [%d-%d]\n", 575 HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH); 576 return -EINVAL; 577 } 578 579 return 0; 580 } 581 582 static int hinic_set_ringparam(struct net_device *netdev, 583 struct ethtool_ringparam *ring) 584 { 585 struct hinic_dev *nic_dev = netdev_priv(netdev); 586 u16 new_sq_depth, new_rq_depth; 587 int err; 588 589 err = check_ringparam_valid(nic_dev, ring); 590 if (err) 591 return err; 592 593 new_sq_depth = (u16)(1U << (u16)ilog2(ring->tx_pending)); 594 new_rq_depth = (u16)(1U << (u16)ilog2(ring->rx_pending)); 595 596 if (new_sq_depth == nic_dev->sq_depth && 597 new_rq_depth == nic_dev->rq_depth) 598 return 0; 599 600 netif_info(nic_dev, drv, netdev, 601 "Change Tx/Rx ring depth from %d/%d to %d/%d\n", 602 nic_dev->sq_depth, nic_dev->rq_depth, 603 new_sq_depth, new_rq_depth); 604 605 nic_dev->sq_depth = new_sq_depth; 606 nic_dev->rq_depth = new_rq_depth; 607 608 if (netif_running(netdev)) { 609 netif_info(nic_dev, drv, netdev, "Restarting netdev\n"); 610 err = hinic_close(netdev); 611 if (err) { 612 netif_err(nic_dev, drv, netdev, 613 "Failed to close netdev\n"); 614 return -EFAULT; 615 } 616 617 err = hinic_open(netdev); 618 if (err) { 619 netif_err(nic_dev, drv, netdev, 620 "Failed to open netdev\n"); 621 return -EFAULT; 622 } 623 } 624 625 return 0; 626 } 627 628 static int __hinic_get_coalesce(struct net_device *netdev, 629 struct ethtool_coalesce *coal, u16 queue) 630 { 631 struct hinic_dev *nic_dev = netdev_priv(netdev); 632 struct hinic_intr_coal_info *rx_intr_coal_info; 633 struct hinic_intr_coal_info *tx_intr_coal_info; 634 635 if (queue == COALESCE_ALL_QUEUE) { 636 /* get tx/rx irq0 as default parameters */ 637 rx_intr_coal_info = &nic_dev->rx_intr_coalesce[0]; 638 tx_intr_coal_info = &nic_dev->tx_intr_coalesce[0]; 639 } else { 640 if (queue >= nic_dev->num_qps) { 641 netif_err(nic_dev, drv, netdev, 642 "Invalid queue_id: %d\n", queue); 643 return -EINVAL; 644 } 645 rx_intr_coal_info = &nic_dev->rx_intr_coalesce[queue]; 646 tx_intr_coal_info = &nic_dev->tx_intr_coalesce[queue]; 647 } 648 649 /* coalesce_timer is in unit of 9us */ 650 coal->rx_coalesce_usecs = rx_intr_coal_info->coalesce_timer_cfg * 651 COALESCE_TIMER_CFG_UNIT; 652 /* coalesced_frames is in unit of 8 */ 653 coal->rx_max_coalesced_frames = rx_intr_coal_info->pending_limt * 654 COALESCE_PENDING_LIMIT_UNIT; 655 coal->tx_coalesce_usecs = tx_intr_coal_info->coalesce_timer_cfg * 656 COALESCE_TIMER_CFG_UNIT; 657 coal->tx_max_coalesced_frames = tx_intr_coal_info->pending_limt * 658 COALESCE_PENDING_LIMIT_UNIT; 659 660 return 0; 661 } 662 663 static int is_coalesce_exceed_limit(const struct ethtool_coalesce *coal) 664 { 665 if (coal->rx_coalesce_usecs > COALESCE_MAX_TIMER_CFG || 666 coal->rx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT || 667 coal->tx_coalesce_usecs > COALESCE_MAX_TIMER_CFG || 668 coal->tx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT) 669 return -ERANGE; 670 671 return 0; 672 } 673 674 static int set_queue_coalesce(struct hinic_dev *nic_dev, u16 q_id, 675 struct hinic_intr_coal_info *coal, 676 bool set_rx_coal) 677 { 678 struct hinic_intr_coal_info *intr_coal = NULL; 679 struct hinic_msix_config interrupt_info = {0}; 680 struct net_device *netdev = nic_dev->netdev; 681 u16 msix_idx; 682 int err; 683 684 intr_coal = set_rx_coal ? &nic_dev->rx_intr_coalesce[q_id] : 685 &nic_dev->tx_intr_coalesce[q_id]; 686 687 intr_coal->coalesce_timer_cfg = coal->coalesce_timer_cfg; 688 intr_coal->pending_limt = coal->pending_limt; 689 690 /* netdev not running or qp not in using, 691 * don't need to set coalesce to hw 692 */ 693 if (!(nic_dev->flags & HINIC_INTF_UP) || 694 q_id >= nic_dev->num_qps) 695 return 0; 696 697 msix_idx = set_rx_coal ? nic_dev->rxqs[q_id].rq->msix_entry : 698 nic_dev->txqs[q_id].sq->msix_entry; 699 interrupt_info.msix_index = msix_idx; 700 interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg; 701 interrupt_info.pending_cnt = intr_coal->pending_limt; 702 interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg; 703 704 err = hinic_set_interrupt_cfg(nic_dev->hwdev, &interrupt_info); 705 if (err) 706 netif_warn(nic_dev, drv, netdev, 707 "Failed to set %s queue%d coalesce", 708 set_rx_coal ? "rx" : "tx", q_id); 709 710 return err; 711 } 712 713 static int __set_hw_coal_param(struct hinic_dev *nic_dev, 714 struct hinic_intr_coal_info *intr_coal, 715 u16 queue, bool set_rx_coal) 716 { 717 int err; 718 u16 i; 719 720 if (queue == COALESCE_ALL_QUEUE) { 721 for (i = 0; i < nic_dev->max_qps; i++) { 722 err = set_queue_coalesce(nic_dev, i, intr_coal, 723 set_rx_coal); 724 if (err) 725 return err; 726 } 727 } else { 728 if (queue >= nic_dev->num_qps) { 729 netif_err(nic_dev, drv, nic_dev->netdev, 730 "Invalid queue_id: %d\n", queue); 731 return -EINVAL; 732 } 733 err = set_queue_coalesce(nic_dev, queue, intr_coal, 734 set_rx_coal); 735 if (err) 736 return err; 737 } 738 739 return 0; 740 } 741 742 static int __hinic_set_coalesce(struct net_device *netdev, 743 struct ethtool_coalesce *coal, u16 queue) 744 { 745 struct hinic_dev *nic_dev = netdev_priv(netdev); 746 struct hinic_intr_coal_info rx_intr_coal = {0}; 747 struct hinic_intr_coal_info tx_intr_coal = {0}; 748 bool set_rx_coal = false; 749 bool set_tx_coal = false; 750 int err; 751 752 err = is_coalesce_exceed_limit(coal); 753 if (err) 754 return err; 755 756 if (coal->rx_coalesce_usecs || coal->rx_max_coalesced_frames) { 757 rx_intr_coal.coalesce_timer_cfg = 758 (u8)(coal->rx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT); 759 rx_intr_coal.pending_limt = (u8)(coal->rx_max_coalesced_frames / 760 COALESCE_PENDING_LIMIT_UNIT); 761 set_rx_coal = true; 762 } 763 764 if (coal->tx_coalesce_usecs || coal->tx_max_coalesced_frames) { 765 tx_intr_coal.coalesce_timer_cfg = 766 (u8)(coal->tx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT); 767 tx_intr_coal.pending_limt = (u8)(coal->tx_max_coalesced_frames / 768 COALESCE_PENDING_LIMIT_UNIT); 769 set_tx_coal = true; 770 } 771 772 /* setting coalesce timer or pending limit to zero will disable 773 * coalesce 774 */ 775 if (set_rx_coal && (!rx_intr_coal.coalesce_timer_cfg || 776 !rx_intr_coal.pending_limt)) 777 netif_warn(nic_dev, drv, netdev, "RX coalesce will be disabled\n"); 778 if (set_tx_coal && (!tx_intr_coal.coalesce_timer_cfg || 779 !tx_intr_coal.pending_limt)) 780 netif_warn(nic_dev, drv, netdev, "TX coalesce will be disabled\n"); 781 782 if (set_rx_coal) { 783 err = __set_hw_coal_param(nic_dev, &rx_intr_coal, queue, true); 784 if (err) 785 return err; 786 } 787 if (set_tx_coal) { 788 err = __set_hw_coal_param(nic_dev, &tx_intr_coal, queue, false); 789 if (err) 790 return err; 791 } 792 return 0; 793 } 794 795 static int hinic_get_coalesce(struct net_device *netdev, 796 struct ethtool_coalesce *coal, 797 struct kernel_ethtool_coalesce *kernel_coal, 798 struct netlink_ext_ack *extack) 799 { 800 return __hinic_get_coalesce(netdev, coal, COALESCE_ALL_QUEUE); 801 } 802 803 static int hinic_set_coalesce(struct net_device *netdev, 804 struct ethtool_coalesce *coal, 805 struct kernel_ethtool_coalesce *kernel_coal, 806 struct netlink_ext_ack *extack) 807 { 808 return __hinic_set_coalesce(netdev, coal, COALESCE_ALL_QUEUE); 809 } 810 811 static int hinic_get_per_queue_coalesce(struct net_device *netdev, u32 queue, 812 struct ethtool_coalesce *coal) 813 { 814 return __hinic_get_coalesce(netdev, coal, queue); 815 } 816 817 static int hinic_set_per_queue_coalesce(struct net_device *netdev, u32 queue, 818 struct ethtool_coalesce *coal) 819 { 820 return __hinic_set_coalesce(netdev, coal, queue); 821 } 822 823 static void hinic_get_pauseparam(struct net_device *netdev, 824 struct ethtool_pauseparam *pause) 825 { 826 struct hinic_dev *nic_dev = netdev_priv(netdev); 827 struct hinic_pause_config pause_info = {0}; 828 struct hinic_nic_cfg *nic_cfg; 829 int err; 830 831 nic_cfg = &nic_dev->hwdev->func_to_io.nic_cfg; 832 833 err = hinic_get_hw_pause_info(nic_dev->hwdev, &pause_info); 834 if (!err) { 835 pause->autoneg = pause_info.auto_neg; 836 if (nic_cfg->pause_set || !pause_info.auto_neg) { 837 pause->rx_pause = nic_cfg->rx_pause; 838 pause->tx_pause = nic_cfg->tx_pause; 839 } else { 840 pause->rx_pause = pause_info.rx_pause; 841 pause->tx_pause = pause_info.tx_pause; 842 } 843 } 844 } 845 846 static int hinic_set_pauseparam(struct net_device *netdev, 847 struct ethtool_pauseparam *pause) 848 { 849 struct hinic_dev *nic_dev = netdev_priv(netdev); 850 struct hinic_pause_config pause_info = {0}; 851 struct hinic_port_cap port_cap = {0}; 852 int err; 853 854 err = hinic_port_get_cap(nic_dev, &port_cap); 855 if (err) 856 return -EIO; 857 858 if (pause->autoneg != port_cap.autoneg_state) 859 return -EOPNOTSUPP; 860 861 pause_info.auto_neg = pause->autoneg; 862 pause_info.rx_pause = pause->rx_pause; 863 pause_info.tx_pause = pause->tx_pause; 864 865 mutex_lock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex); 866 err = hinic_set_hw_pause_info(nic_dev->hwdev, &pause_info); 867 if (err) { 868 mutex_unlock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex); 869 return err; 870 } 871 nic_dev->hwdev->func_to_io.nic_cfg.pause_set = true; 872 nic_dev->hwdev->func_to_io.nic_cfg.auto_neg = pause->autoneg; 873 nic_dev->hwdev->func_to_io.nic_cfg.rx_pause = pause->rx_pause; 874 nic_dev->hwdev->func_to_io.nic_cfg.tx_pause = pause->tx_pause; 875 mutex_unlock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex); 876 877 return 0; 878 } 879 880 static void hinic_get_channels(struct net_device *netdev, 881 struct ethtool_channels *channels) 882 { 883 struct hinic_dev *nic_dev = netdev_priv(netdev); 884 struct hinic_hwdev *hwdev = nic_dev->hwdev; 885 886 channels->max_combined = nic_dev->max_qps; 887 channels->combined_count = hinic_hwdev_num_qps(hwdev); 888 } 889 890 static int hinic_set_channels(struct net_device *netdev, 891 struct ethtool_channels *channels) 892 { 893 struct hinic_dev *nic_dev = netdev_priv(netdev); 894 unsigned int count = channels->combined_count; 895 int err; 896 897 netif_info(nic_dev, drv, netdev, "Set max combined queue number from %d to %d\n", 898 hinic_hwdev_num_qps(nic_dev->hwdev), count); 899 900 if (netif_running(netdev)) { 901 netif_info(nic_dev, drv, netdev, "Restarting netdev\n"); 902 hinic_close(netdev); 903 904 nic_dev->hwdev->nic_cap.num_qps = count; 905 906 err = hinic_open(netdev); 907 if (err) { 908 netif_err(nic_dev, drv, netdev, 909 "Failed to open netdev\n"); 910 return -EFAULT; 911 } 912 } else { 913 nic_dev->hwdev->nic_cap.num_qps = count; 914 } 915 916 return 0; 917 } 918 919 static int hinic_get_rss_hash_opts(struct hinic_dev *nic_dev, 920 struct ethtool_rxnfc *cmd) 921 { 922 struct hinic_rss_type rss_type = { 0 }; 923 int err; 924 925 cmd->data = 0; 926 927 if (!(nic_dev->flags & HINIC_RSS_ENABLE)) 928 return 0; 929 930 err = hinic_get_rss_type(nic_dev, nic_dev->rss_tmpl_idx, 931 &rss_type); 932 if (err) 933 return err; 934 935 cmd->data = RXH_IP_SRC | RXH_IP_DST; 936 switch (cmd->flow_type) { 937 case TCP_V4_FLOW: 938 if (rss_type.tcp_ipv4) 939 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 940 break; 941 case TCP_V6_FLOW: 942 if (rss_type.tcp_ipv6) 943 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 944 break; 945 case UDP_V4_FLOW: 946 if (rss_type.udp_ipv4) 947 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 948 break; 949 case UDP_V6_FLOW: 950 if (rss_type.udp_ipv6) 951 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 952 break; 953 case IPV4_FLOW: 954 case IPV6_FLOW: 955 break; 956 default: 957 cmd->data = 0; 958 return -EINVAL; 959 } 960 961 return 0; 962 } 963 964 static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd, 965 struct hinic_rss_type *rss_type) 966 { 967 u8 rss_l4_en = 0; 968 969 switch (cmd->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 970 case 0: 971 rss_l4_en = 0; 972 break; 973 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 974 rss_l4_en = 1; 975 break; 976 default: 977 return -EINVAL; 978 } 979 980 switch (cmd->flow_type) { 981 case TCP_V4_FLOW: 982 rss_type->tcp_ipv4 = rss_l4_en; 983 break; 984 case TCP_V6_FLOW: 985 rss_type->tcp_ipv6 = rss_l4_en; 986 break; 987 case UDP_V4_FLOW: 988 rss_type->udp_ipv4 = rss_l4_en; 989 break; 990 case UDP_V6_FLOW: 991 rss_type->udp_ipv6 = rss_l4_en; 992 break; 993 default: 994 return -EINVAL; 995 } 996 997 return 0; 998 } 999 1000 static int hinic_set_rss_hash_opts(struct hinic_dev *nic_dev, 1001 struct ethtool_rxnfc *cmd) 1002 { 1003 struct hinic_rss_type *rss_type = &nic_dev->rss_type; 1004 int err; 1005 1006 if (!(nic_dev->flags & HINIC_RSS_ENABLE)) { 1007 cmd->data = 0; 1008 return -EOPNOTSUPP; 1009 } 1010 1011 /* RSS does not support anything other than hashing 1012 * to queues on src and dst IPs and ports 1013 */ 1014 if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | 1015 RXH_L4_B_2_3)) 1016 return -EINVAL; 1017 1018 /* We need at least the IP SRC and DEST fields for hashing */ 1019 if (!(cmd->data & RXH_IP_SRC) || !(cmd->data & RXH_IP_DST)) 1020 return -EINVAL; 1021 1022 err = hinic_get_rss_type(nic_dev, 1023 nic_dev->rss_tmpl_idx, rss_type); 1024 if (err) 1025 return -EFAULT; 1026 1027 switch (cmd->flow_type) { 1028 case TCP_V4_FLOW: 1029 case TCP_V6_FLOW: 1030 case UDP_V4_FLOW: 1031 case UDP_V6_FLOW: 1032 err = set_l4_rss_hash_ops(cmd, rss_type); 1033 if (err) 1034 return err; 1035 break; 1036 case IPV4_FLOW: 1037 rss_type->ipv4 = 1; 1038 break; 1039 case IPV6_FLOW: 1040 rss_type->ipv6 = 1; 1041 break; 1042 default: 1043 return -EINVAL; 1044 } 1045 1046 err = hinic_set_rss_type(nic_dev, nic_dev->rss_tmpl_idx, 1047 *rss_type); 1048 if (err) 1049 return -EFAULT; 1050 1051 return 0; 1052 } 1053 1054 static int __set_rss_rxfh(struct net_device *netdev, 1055 const u32 *indir, const u8 *key) 1056 { 1057 struct hinic_dev *nic_dev = netdev_priv(netdev); 1058 int err; 1059 1060 if (indir) { 1061 if (!nic_dev->rss_indir_user) { 1062 nic_dev->rss_indir_user = 1063 kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE, 1064 GFP_KERNEL); 1065 if (!nic_dev->rss_indir_user) 1066 return -ENOMEM; 1067 } 1068 1069 memcpy(nic_dev->rss_indir_user, indir, 1070 sizeof(u32) * HINIC_RSS_INDIR_SIZE); 1071 1072 err = hinic_rss_set_indir_tbl(nic_dev, 1073 nic_dev->rss_tmpl_idx, indir); 1074 if (err) 1075 return -EFAULT; 1076 } 1077 1078 if (key) { 1079 if (!nic_dev->rss_hkey_user) { 1080 nic_dev->rss_hkey_user = 1081 kzalloc(HINIC_RSS_KEY_SIZE * 2, GFP_KERNEL); 1082 1083 if (!nic_dev->rss_hkey_user) 1084 return -ENOMEM; 1085 } 1086 1087 memcpy(nic_dev->rss_hkey_user, key, HINIC_RSS_KEY_SIZE); 1088 1089 err = hinic_rss_set_template_tbl(nic_dev, 1090 nic_dev->rss_tmpl_idx, key); 1091 if (err) 1092 return -EFAULT; 1093 } 1094 1095 return 0; 1096 } 1097 1098 static int hinic_get_rxnfc(struct net_device *netdev, 1099 struct ethtool_rxnfc *cmd, u32 *rule_locs) 1100 { 1101 struct hinic_dev *nic_dev = netdev_priv(netdev); 1102 int err = 0; 1103 1104 switch (cmd->cmd) { 1105 case ETHTOOL_GRXRINGS: 1106 cmd->data = nic_dev->num_qps; 1107 break; 1108 case ETHTOOL_GRXFH: 1109 err = hinic_get_rss_hash_opts(nic_dev, cmd); 1110 break; 1111 default: 1112 err = -EOPNOTSUPP; 1113 break; 1114 } 1115 1116 return err; 1117 } 1118 1119 static int hinic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) 1120 { 1121 struct hinic_dev *nic_dev = netdev_priv(netdev); 1122 int err = 0; 1123 1124 switch (cmd->cmd) { 1125 case ETHTOOL_SRXFH: 1126 err = hinic_set_rss_hash_opts(nic_dev, cmd); 1127 break; 1128 default: 1129 err = -EOPNOTSUPP; 1130 break; 1131 } 1132 1133 return err; 1134 } 1135 1136 static int hinic_get_rxfh(struct net_device *netdev, 1137 u32 *indir, u8 *key, u8 *hfunc) 1138 { 1139 struct hinic_dev *nic_dev = netdev_priv(netdev); 1140 u8 hash_engine_type = 0; 1141 int err = 0; 1142 1143 if (!(nic_dev->flags & HINIC_RSS_ENABLE)) 1144 return -EOPNOTSUPP; 1145 1146 if (hfunc) { 1147 err = hinic_rss_get_hash_engine(nic_dev, 1148 nic_dev->rss_tmpl_idx, 1149 &hash_engine_type); 1150 if (err) 1151 return -EFAULT; 1152 1153 *hfunc = hash_engine_type ? ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR; 1154 } 1155 1156 if (indir) { 1157 err = hinic_rss_get_indir_tbl(nic_dev, 1158 nic_dev->rss_tmpl_idx, indir); 1159 if (err) 1160 return -EFAULT; 1161 } 1162 1163 if (key) 1164 err = hinic_rss_get_template_tbl(nic_dev, 1165 nic_dev->rss_tmpl_idx, key); 1166 1167 return err; 1168 } 1169 1170 static int hinic_set_rxfh(struct net_device *netdev, const u32 *indir, 1171 const u8 *key, const u8 hfunc) 1172 { 1173 struct hinic_dev *nic_dev = netdev_priv(netdev); 1174 int err = 0; 1175 1176 if (!(nic_dev->flags & HINIC_RSS_ENABLE)) 1177 return -EOPNOTSUPP; 1178 1179 if (hfunc != ETH_RSS_HASH_NO_CHANGE) { 1180 if (hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR) 1181 return -EOPNOTSUPP; 1182 1183 nic_dev->rss_hash_engine = (hfunc == ETH_RSS_HASH_XOR) ? 1184 HINIC_RSS_HASH_ENGINE_TYPE_XOR : 1185 HINIC_RSS_HASH_ENGINE_TYPE_TOEP; 1186 err = hinic_rss_set_hash_engine 1187 (nic_dev, nic_dev->rss_tmpl_idx, 1188 nic_dev->rss_hash_engine); 1189 if (err) 1190 return -EFAULT; 1191 } 1192 1193 err = __set_rss_rxfh(netdev, indir, key); 1194 1195 return err; 1196 } 1197 1198 static u32 hinic_get_rxfh_key_size(struct net_device *netdev) 1199 { 1200 return HINIC_RSS_KEY_SIZE; 1201 } 1202 1203 static u32 hinic_get_rxfh_indir_size(struct net_device *netdev) 1204 { 1205 return HINIC_RSS_INDIR_SIZE; 1206 } 1207 1208 #define ARRAY_LEN(arr) ((int)((int)sizeof(arr) / (int)sizeof(arr[0]))) 1209 1210 #define HINIC_FUNC_STAT(_stat_item) { \ 1211 .name = #_stat_item, \ 1212 .size = sizeof_field(struct hinic_vport_stats, _stat_item), \ 1213 .offset = offsetof(struct hinic_vport_stats, _stat_item) \ 1214 } 1215 1216 static struct hinic_stats hinic_function_stats[] = { 1217 HINIC_FUNC_STAT(tx_unicast_pkts_vport), 1218 HINIC_FUNC_STAT(tx_unicast_bytes_vport), 1219 HINIC_FUNC_STAT(tx_multicast_pkts_vport), 1220 HINIC_FUNC_STAT(tx_multicast_bytes_vport), 1221 HINIC_FUNC_STAT(tx_broadcast_pkts_vport), 1222 HINIC_FUNC_STAT(tx_broadcast_bytes_vport), 1223 1224 HINIC_FUNC_STAT(rx_unicast_pkts_vport), 1225 HINIC_FUNC_STAT(rx_unicast_bytes_vport), 1226 HINIC_FUNC_STAT(rx_multicast_pkts_vport), 1227 HINIC_FUNC_STAT(rx_multicast_bytes_vport), 1228 HINIC_FUNC_STAT(rx_broadcast_pkts_vport), 1229 HINIC_FUNC_STAT(rx_broadcast_bytes_vport), 1230 1231 HINIC_FUNC_STAT(tx_discard_vport), 1232 HINIC_FUNC_STAT(rx_discard_vport), 1233 HINIC_FUNC_STAT(tx_err_vport), 1234 HINIC_FUNC_STAT(rx_err_vport), 1235 }; 1236 1237 static char hinic_test_strings[][ETH_GSTRING_LEN] = { 1238 "Internal lb test (on/offline)", 1239 "External lb test (external_lb)", 1240 }; 1241 1242 #define HINIC_PORT_STAT(_stat_item) { \ 1243 .name = #_stat_item, \ 1244 .size = sizeof_field(struct hinic_phy_port_stats, _stat_item), \ 1245 .offset = offsetof(struct hinic_phy_port_stats, _stat_item) \ 1246 } 1247 1248 static struct hinic_stats hinic_port_stats[] = { 1249 HINIC_PORT_STAT(mac_rx_total_pkt_num), 1250 HINIC_PORT_STAT(mac_rx_total_oct_num), 1251 HINIC_PORT_STAT(mac_rx_bad_pkt_num), 1252 HINIC_PORT_STAT(mac_rx_bad_oct_num), 1253 HINIC_PORT_STAT(mac_rx_good_pkt_num), 1254 HINIC_PORT_STAT(mac_rx_good_oct_num), 1255 HINIC_PORT_STAT(mac_rx_uni_pkt_num), 1256 HINIC_PORT_STAT(mac_rx_multi_pkt_num), 1257 HINIC_PORT_STAT(mac_rx_broad_pkt_num), 1258 HINIC_PORT_STAT(mac_tx_total_pkt_num), 1259 HINIC_PORT_STAT(mac_tx_total_oct_num), 1260 HINIC_PORT_STAT(mac_tx_bad_pkt_num), 1261 HINIC_PORT_STAT(mac_tx_bad_oct_num), 1262 HINIC_PORT_STAT(mac_tx_good_pkt_num), 1263 HINIC_PORT_STAT(mac_tx_good_oct_num), 1264 HINIC_PORT_STAT(mac_tx_uni_pkt_num), 1265 HINIC_PORT_STAT(mac_tx_multi_pkt_num), 1266 HINIC_PORT_STAT(mac_tx_broad_pkt_num), 1267 HINIC_PORT_STAT(mac_rx_fragment_pkt_num), 1268 HINIC_PORT_STAT(mac_rx_undersize_pkt_num), 1269 HINIC_PORT_STAT(mac_rx_undermin_pkt_num), 1270 HINIC_PORT_STAT(mac_rx_64_oct_pkt_num), 1271 HINIC_PORT_STAT(mac_rx_65_127_oct_pkt_num), 1272 HINIC_PORT_STAT(mac_rx_128_255_oct_pkt_num), 1273 HINIC_PORT_STAT(mac_rx_256_511_oct_pkt_num), 1274 HINIC_PORT_STAT(mac_rx_512_1023_oct_pkt_num), 1275 HINIC_PORT_STAT(mac_rx_1024_1518_oct_pkt_num), 1276 HINIC_PORT_STAT(mac_rx_1519_2047_oct_pkt_num), 1277 HINIC_PORT_STAT(mac_rx_2048_4095_oct_pkt_num), 1278 HINIC_PORT_STAT(mac_rx_4096_8191_oct_pkt_num), 1279 HINIC_PORT_STAT(mac_rx_8192_9216_oct_pkt_num), 1280 HINIC_PORT_STAT(mac_rx_9217_12287_oct_pkt_num), 1281 HINIC_PORT_STAT(mac_rx_12288_16383_oct_pkt_num), 1282 HINIC_PORT_STAT(mac_rx_1519_max_good_pkt_num), 1283 HINIC_PORT_STAT(mac_rx_1519_max_bad_pkt_num), 1284 HINIC_PORT_STAT(mac_rx_oversize_pkt_num), 1285 HINIC_PORT_STAT(mac_rx_jabber_pkt_num), 1286 HINIC_PORT_STAT(mac_rx_pause_num), 1287 HINIC_PORT_STAT(mac_rx_pfc_pkt_num), 1288 HINIC_PORT_STAT(mac_rx_pfc_pri0_pkt_num), 1289 HINIC_PORT_STAT(mac_rx_pfc_pri1_pkt_num), 1290 HINIC_PORT_STAT(mac_rx_pfc_pri2_pkt_num), 1291 HINIC_PORT_STAT(mac_rx_pfc_pri3_pkt_num), 1292 HINIC_PORT_STAT(mac_rx_pfc_pri4_pkt_num), 1293 HINIC_PORT_STAT(mac_rx_pfc_pri5_pkt_num), 1294 HINIC_PORT_STAT(mac_rx_pfc_pri6_pkt_num), 1295 HINIC_PORT_STAT(mac_rx_pfc_pri7_pkt_num), 1296 HINIC_PORT_STAT(mac_rx_control_pkt_num), 1297 HINIC_PORT_STAT(mac_rx_sym_err_pkt_num), 1298 HINIC_PORT_STAT(mac_rx_fcs_err_pkt_num), 1299 HINIC_PORT_STAT(mac_rx_send_app_good_pkt_num), 1300 HINIC_PORT_STAT(mac_rx_send_app_bad_pkt_num), 1301 HINIC_PORT_STAT(mac_tx_fragment_pkt_num), 1302 HINIC_PORT_STAT(mac_tx_undersize_pkt_num), 1303 HINIC_PORT_STAT(mac_tx_undermin_pkt_num), 1304 HINIC_PORT_STAT(mac_tx_64_oct_pkt_num), 1305 HINIC_PORT_STAT(mac_tx_65_127_oct_pkt_num), 1306 HINIC_PORT_STAT(mac_tx_128_255_oct_pkt_num), 1307 HINIC_PORT_STAT(mac_tx_256_511_oct_pkt_num), 1308 HINIC_PORT_STAT(mac_tx_512_1023_oct_pkt_num), 1309 HINIC_PORT_STAT(mac_tx_1024_1518_oct_pkt_num), 1310 HINIC_PORT_STAT(mac_tx_1519_2047_oct_pkt_num), 1311 HINIC_PORT_STAT(mac_tx_2048_4095_oct_pkt_num), 1312 HINIC_PORT_STAT(mac_tx_4096_8191_oct_pkt_num), 1313 HINIC_PORT_STAT(mac_tx_8192_9216_oct_pkt_num), 1314 HINIC_PORT_STAT(mac_tx_9217_12287_oct_pkt_num), 1315 HINIC_PORT_STAT(mac_tx_12288_16383_oct_pkt_num), 1316 HINIC_PORT_STAT(mac_tx_1519_max_good_pkt_num), 1317 HINIC_PORT_STAT(mac_tx_1519_max_bad_pkt_num), 1318 HINIC_PORT_STAT(mac_tx_oversize_pkt_num), 1319 HINIC_PORT_STAT(mac_tx_jabber_pkt_num), 1320 HINIC_PORT_STAT(mac_tx_pause_num), 1321 HINIC_PORT_STAT(mac_tx_pfc_pkt_num), 1322 HINIC_PORT_STAT(mac_tx_pfc_pri0_pkt_num), 1323 HINIC_PORT_STAT(mac_tx_pfc_pri1_pkt_num), 1324 HINIC_PORT_STAT(mac_tx_pfc_pri2_pkt_num), 1325 HINIC_PORT_STAT(mac_tx_pfc_pri3_pkt_num), 1326 HINIC_PORT_STAT(mac_tx_pfc_pri4_pkt_num), 1327 HINIC_PORT_STAT(mac_tx_pfc_pri5_pkt_num), 1328 HINIC_PORT_STAT(mac_tx_pfc_pri6_pkt_num), 1329 HINIC_PORT_STAT(mac_tx_pfc_pri7_pkt_num), 1330 HINIC_PORT_STAT(mac_tx_control_pkt_num), 1331 HINIC_PORT_STAT(mac_tx_err_all_pkt_num), 1332 HINIC_PORT_STAT(mac_tx_from_app_good_pkt_num), 1333 HINIC_PORT_STAT(mac_tx_from_app_bad_pkt_num), 1334 }; 1335 1336 #define HINIC_TXQ_STAT(_stat_item) { \ 1337 .name = "txq%d_"#_stat_item, \ 1338 .size = sizeof_field(struct hinic_txq_stats, _stat_item), \ 1339 .offset = offsetof(struct hinic_txq_stats, _stat_item) \ 1340 } 1341 1342 static struct hinic_stats hinic_tx_queue_stats[] = { 1343 HINIC_TXQ_STAT(pkts), 1344 HINIC_TXQ_STAT(bytes), 1345 HINIC_TXQ_STAT(tx_busy), 1346 HINIC_TXQ_STAT(tx_wake), 1347 HINIC_TXQ_STAT(tx_dropped), 1348 HINIC_TXQ_STAT(big_frags_pkts), 1349 }; 1350 1351 #define HINIC_RXQ_STAT(_stat_item) { \ 1352 .name = "rxq%d_"#_stat_item, \ 1353 .size = sizeof_field(struct hinic_rxq_stats, _stat_item), \ 1354 .offset = offsetof(struct hinic_rxq_stats, _stat_item) \ 1355 } 1356 1357 static struct hinic_stats hinic_rx_queue_stats[] = { 1358 HINIC_RXQ_STAT(pkts), 1359 HINIC_RXQ_STAT(bytes), 1360 HINIC_RXQ_STAT(errors), 1361 HINIC_RXQ_STAT(csum_errors), 1362 HINIC_RXQ_STAT(other_errors), 1363 }; 1364 1365 static void get_drv_queue_stats(struct hinic_dev *nic_dev, u64 *data) 1366 { 1367 struct hinic_txq_stats txq_stats; 1368 struct hinic_rxq_stats rxq_stats; 1369 u16 i = 0, j = 0, qid = 0; 1370 char *p; 1371 1372 for (qid = 0; qid < nic_dev->num_qps; qid++) { 1373 if (!nic_dev->txqs) 1374 break; 1375 1376 hinic_txq_get_stats(&nic_dev->txqs[qid], &txq_stats); 1377 for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++, i++) { 1378 p = (char *)&txq_stats + 1379 hinic_tx_queue_stats[j].offset; 1380 data[i] = (hinic_tx_queue_stats[j].size == 1381 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1382 } 1383 } 1384 1385 for (qid = 0; qid < nic_dev->num_qps; qid++) { 1386 if (!nic_dev->rxqs) 1387 break; 1388 1389 hinic_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats); 1390 for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++, i++) { 1391 p = (char *)&rxq_stats + 1392 hinic_rx_queue_stats[j].offset; 1393 data[i] = (hinic_rx_queue_stats[j].size == 1394 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1395 } 1396 } 1397 } 1398 1399 static void hinic_get_ethtool_stats(struct net_device *netdev, 1400 struct ethtool_stats *stats, u64 *data) 1401 { 1402 struct hinic_dev *nic_dev = netdev_priv(netdev); 1403 struct hinic_vport_stats vport_stats = {0}; 1404 struct hinic_phy_port_stats *port_stats; 1405 u16 i = 0, j = 0; 1406 char *p; 1407 int err; 1408 1409 err = hinic_get_vport_stats(nic_dev, &vport_stats); 1410 if (err) 1411 netif_err(nic_dev, drv, netdev, 1412 "Failed to get vport stats from firmware\n"); 1413 1414 for (j = 0; j < ARRAY_LEN(hinic_function_stats); j++, i++) { 1415 p = (char *)&vport_stats + hinic_function_stats[j].offset; 1416 data[i] = (hinic_function_stats[j].size == 1417 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1418 } 1419 1420 port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL); 1421 if (!port_stats) { 1422 memset(&data[i], 0, 1423 ARRAY_LEN(hinic_port_stats) * sizeof(*data)); 1424 i += ARRAY_LEN(hinic_port_stats); 1425 goto get_drv_stats; 1426 } 1427 1428 err = hinic_get_phy_port_stats(nic_dev, port_stats); 1429 if (err) 1430 netif_err(nic_dev, drv, netdev, 1431 "Failed to get port stats from firmware\n"); 1432 1433 for (j = 0; j < ARRAY_LEN(hinic_port_stats); j++, i++) { 1434 p = (char *)port_stats + hinic_port_stats[j].offset; 1435 data[i] = (hinic_port_stats[j].size == 1436 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1437 } 1438 1439 kfree(port_stats); 1440 1441 get_drv_stats: 1442 get_drv_queue_stats(nic_dev, data + i); 1443 } 1444 1445 static int hinic_get_sset_count(struct net_device *netdev, int sset) 1446 { 1447 struct hinic_dev *nic_dev = netdev_priv(netdev); 1448 int count, q_num; 1449 1450 switch (sset) { 1451 case ETH_SS_TEST: 1452 return ARRAY_LEN(hinic_test_strings); 1453 case ETH_SS_STATS: 1454 q_num = nic_dev->num_qps; 1455 count = ARRAY_LEN(hinic_function_stats) + 1456 (ARRAY_LEN(hinic_tx_queue_stats) + 1457 ARRAY_LEN(hinic_rx_queue_stats)) * q_num; 1458 1459 count += ARRAY_LEN(hinic_port_stats); 1460 1461 return count; 1462 default: 1463 return -EOPNOTSUPP; 1464 } 1465 } 1466 1467 static void hinic_get_strings(struct net_device *netdev, 1468 u32 stringset, u8 *data) 1469 { 1470 struct hinic_dev *nic_dev = netdev_priv(netdev); 1471 char *p = (char *)data; 1472 u16 i, j; 1473 1474 switch (stringset) { 1475 case ETH_SS_TEST: 1476 memcpy(data, *hinic_test_strings, sizeof(hinic_test_strings)); 1477 return; 1478 case ETH_SS_STATS: 1479 for (i = 0; i < ARRAY_LEN(hinic_function_stats); i++) { 1480 memcpy(p, hinic_function_stats[i].name, 1481 ETH_GSTRING_LEN); 1482 p += ETH_GSTRING_LEN; 1483 } 1484 1485 for (i = 0; i < ARRAY_LEN(hinic_port_stats); i++) { 1486 memcpy(p, hinic_port_stats[i].name, 1487 ETH_GSTRING_LEN); 1488 p += ETH_GSTRING_LEN; 1489 } 1490 1491 for (i = 0; i < nic_dev->num_qps; i++) { 1492 for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++) { 1493 sprintf(p, hinic_tx_queue_stats[j].name, i); 1494 p += ETH_GSTRING_LEN; 1495 } 1496 } 1497 1498 for (i = 0; i < nic_dev->num_qps; i++) { 1499 for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++) { 1500 sprintf(p, hinic_rx_queue_stats[j].name, i); 1501 p += ETH_GSTRING_LEN; 1502 } 1503 } 1504 1505 return; 1506 default: 1507 return; 1508 } 1509 } 1510 1511 static int hinic_run_lp_test(struct hinic_dev *nic_dev, u32 test_time) 1512 { 1513 u8 *lb_test_rx_buf = nic_dev->lb_test_rx_buf; 1514 struct net_device *netdev = nic_dev->netdev; 1515 struct sk_buff *skb_tmp = NULL; 1516 struct sk_buff *skb = NULL; 1517 u32 cnt = test_time * 5; 1518 u8 *test_data = NULL; 1519 u32 i; 1520 u8 j; 1521 1522 skb_tmp = alloc_skb(LP_PKT_LEN, GFP_ATOMIC); 1523 if (!skb_tmp) 1524 return -ENOMEM; 1525 1526 test_data = __skb_put(skb_tmp, LP_PKT_LEN); 1527 1528 memset(test_data, 0xFF, 2 * ETH_ALEN); 1529 test_data[ETH_ALEN] = 0xFE; 1530 test_data[2 * ETH_ALEN] = 0x08; 1531 test_data[2 * ETH_ALEN + 1] = 0x0; 1532 1533 for (i = ETH_HLEN; i < LP_PKT_LEN; i++) 1534 test_data[i] = i & 0xFF; 1535 1536 skb_tmp->queue_mapping = 0; 1537 skb_tmp->ip_summed = CHECKSUM_COMPLETE; 1538 skb_tmp->dev = netdev; 1539 1540 for (i = 0; i < cnt; i++) { 1541 nic_dev->lb_test_rx_idx = 0; 1542 memset(lb_test_rx_buf, 0, LP_PKT_CNT * LP_PKT_LEN); 1543 1544 for (j = 0; j < LP_PKT_CNT; j++) { 1545 skb = pskb_copy(skb_tmp, GFP_ATOMIC); 1546 if (!skb) { 1547 dev_kfree_skb_any(skb_tmp); 1548 netif_err(nic_dev, drv, netdev, 1549 "Copy skb failed for loopback test\n"); 1550 return -ENOMEM; 1551 } 1552 1553 /* mark index for every pkt */ 1554 skb->data[LP_PKT_LEN - 1] = j; 1555 1556 if (hinic_lb_xmit_frame(skb, netdev)) { 1557 dev_kfree_skb_any(skb); 1558 dev_kfree_skb_any(skb_tmp); 1559 netif_err(nic_dev, drv, netdev, 1560 "Xmit pkt failed for loopback test\n"); 1561 return -EBUSY; 1562 } 1563 } 1564 1565 /* wait till all pkts received to RX buffer */ 1566 msleep(200); 1567 1568 for (j = 0; j < LP_PKT_CNT; j++) { 1569 if (memcmp(lb_test_rx_buf + j * LP_PKT_LEN, 1570 skb_tmp->data, LP_PKT_LEN - 1) || 1571 (*(lb_test_rx_buf + j * LP_PKT_LEN + 1572 LP_PKT_LEN - 1) != j)) { 1573 dev_kfree_skb_any(skb_tmp); 1574 netif_err(nic_dev, drv, netdev, 1575 "Compare pkt failed in loopback test(index=0x%02x, data[%d]=0x%02x)\n", 1576 j + i * LP_PKT_CNT, 1577 LP_PKT_LEN - 1, 1578 *(lb_test_rx_buf + j * LP_PKT_LEN + 1579 LP_PKT_LEN - 1)); 1580 return -EIO; 1581 } 1582 } 1583 } 1584 1585 dev_kfree_skb_any(skb_tmp); 1586 return 0; 1587 } 1588 1589 static int do_lp_test(struct hinic_dev *nic_dev, u32 flags, u32 test_time, 1590 enum diag_test_index *test_index) 1591 { 1592 struct net_device *netdev = nic_dev->netdev; 1593 u8 *lb_test_rx_buf = NULL; 1594 int err = 0; 1595 1596 if (!(flags & ETH_TEST_FL_EXTERNAL_LB)) { 1597 *test_index = INTERNAL_LP_TEST; 1598 if (hinic_set_loopback_mode(nic_dev->hwdev, 1599 HINIC_INTERNAL_LP_MODE, true)) { 1600 netif_err(nic_dev, drv, netdev, 1601 "Failed to set port loopback mode before loopback test\n"); 1602 return -EIO; 1603 } 1604 } else { 1605 *test_index = EXTERNAL_LP_TEST; 1606 } 1607 1608 lb_test_rx_buf = vmalloc(LP_PKT_CNT * LP_PKT_LEN); 1609 if (!lb_test_rx_buf) { 1610 err = -ENOMEM; 1611 } else { 1612 nic_dev->lb_test_rx_buf = lb_test_rx_buf; 1613 nic_dev->lb_pkt_len = LP_PKT_LEN; 1614 nic_dev->flags |= HINIC_LP_TEST; 1615 err = hinic_run_lp_test(nic_dev, test_time); 1616 nic_dev->flags &= ~HINIC_LP_TEST; 1617 msleep(100); 1618 vfree(lb_test_rx_buf); 1619 nic_dev->lb_test_rx_buf = NULL; 1620 } 1621 1622 if (!(flags & ETH_TEST_FL_EXTERNAL_LB)) { 1623 if (hinic_set_loopback_mode(nic_dev->hwdev, 1624 HINIC_INTERNAL_LP_MODE, false)) { 1625 netif_err(nic_dev, drv, netdev, 1626 "Failed to cancel port loopback mode after loopback test\n"); 1627 err = -EIO; 1628 } 1629 } 1630 1631 return err; 1632 } 1633 1634 static void hinic_diag_test(struct net_device *netdev, 1635 struct ethtool_test *eth_test, u64 *data) 1636 { 1637 struct hinic_dev *nic_dev = netdev_priv(netdev); 1638 enum hinic_port_link_state link_state; 1639 enum diag_test_index test_index = 0; 1640 int err = 0; 1641 1642 memset(data, 0, DIAG_TEST_MAX * sizeof(u64)); 1643 1644 /* don't support loopback test when netdev is closed. */ 1645 if (!(nic_dev->flags & HINIC_INTF_UP)) { 1646 netif_err(nic_dev, drv, netdev, 1647 "Do not support loopback test when netdev is closed\n"); 1648 eth_test->flags |= ETH_TEST_FL_FAILED; 1649 data[PORT_DOWN_ERR_IDX] = 1; 1650 return; 1651 } 1652 1653 netif_carrier_off(netdev); 1654 netif_tx_disable(netdev); 1655 1656 err = do_lp_test(nic_dev, eth_test->flags, LP_DEFAULT_TIME, 1657 &test_index); 1658 if (err) { 1659 eth_test->flags |= ETH_TEST_FL_FAILED; 1660 data[test_index] = 1; 1661 } 1662 1663 netif_tx_wake_all_queues(netdev); 1664 1665 err = hinic_port_link_state(nic_dev, &link_state); 1666 if (!err && link_state == HINIC_LINK_STATE_UP) 1667 netif_carrier_on(netdev); 1668 } 1669 1670 static int hinic_set_phys_id(struct net_device *netdev, 1671 enum ethtool_phys_id_state state) 1672 { 1673 struct hinic_dev *nic_dev = netdev_priv(netdev); 1674 int err = 0; 1675 u8 port; 1676 1677 port = nic_dev->hwdev->port_id; 1678 1679 switch (state) { 1680 case ETHTOOL_ID_ACTIVE: 1681 err = hinic_set_led_status(nic_dev->hwdev, port, 1682 HINIC_LED_TYPE_LINK, 1683 HINIC_LED_MODE_FORCE_2HZ); 1684 if (err) 1685 netif_err(nic_dev, drv, netdev, 1686 "Set LED blinking in 2HZ failed\n"); 1687 break; 1688 1689 case ETHTOOL_ID_INACTIVE: 1690 err = hinic_reset_led_status(nic_dev->hwdev, port); 1691 if (err) 1692 netif_err(nic_dev, drv, netdev, 1693 "Reset LED to original status failed\n"); 1694 break; 1695 1696 default: 1697 return -EOPNOTSUPP; 1698 } 1699 1700 return err; 1701 } 1702 1703 static int hinic_get_module_info(struct net_device *netdev, 1704 struct ethtool_modinfo *modinfo) 1705 { 1706 struct hinic_dev *nic_dev = netdev_priv(netdev); 1707 u8 sfp_type_ext; 1708 u8 sfp_type; 1709 int err; 1710 1711 err = hinic_get_sfp_type(nic_dev->hwdev, &sfp_type, &sfp_type_ext); 1712 if (err) 1713 return err; 1714 1715 switch (sfp_type) { 1716 case SFF8024_ID_SFP: 1717 modinfo->type = ETH_MODULE_SFF_8472; 1718 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 1719 break; 1720 case SFF8024_ID_QSFP_8438: 1721 modinfo->type = ETH_MODULE_SFF_8436; 1722 modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; 1723 break; 1724 case SFF8024_ID_QSFP_8436_8636: 1725 if (sfp_type_ext >= 0x3) { 1726 modinfo->type = ETH_MODULE_SFF_8636; 1727 modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; 1728 1729 } else { 1730 modinfo->type = ETH_MODULE_SFF_8436; 1731 modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; 1732 } 1733 break; 1734 case SFF8024_ID_QSFP28_8636: 1735 modinfo->type = ETH_MODULE_SFF_8636; 1736 modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; 1737 break; 1738 default: 1739 netif_warn(nic_dev, drv, netdev, 1740 "Optical module unknown: 0x%x\n", sfp_type); 1741 return -EINVAL; 1742 } 1743 1744 return 0; 1745 } 1746 1747 static int hinic_get_module_eeprom(struct net_device *netdev, 1748 struct ethtool_eeprom *ee, u8 *data) 1749 { 1750 struct hinic_dev *nic_dev = netdev_priv(netdev); 1751 u8 sfp_data[STD_SFP_INFO_MAX_SIZE]; 1752 u16 len; 1753 int err; 1754 1755 if (!ee->len || ((ee->len + ee->offset) > STD_SFP_INFO_MAX_SIZE)) 1756 return -EINVAL; 1757 1758 memset(data, 0, ee->len); 1759 1760 err = hinic_get_sfp_eeprom(nic_dev->hwdev, sfp_data, &len); 1761 if (err) 1762 return err; 1763 1764 memcpy(data, sfp_data + ee->offset, ee->len); 1765 1766 return 0; 1767 } 1768 1769 static int 1770 hinic_get_link_ext_state(struct net_device *netdev, 1771 struct ethtool_link_ext_state_info *link_ext_state_info) 1772 { 1773 struct hinic_dev *nic_dev = netdev_priv(netdev); 1774 1775 if (netif_carrier_ok(netdev)) 1776 return -ENODATA; 1777 1778 if (nic_dev->cable_unplugged) 1779 link_ext_state_info->link_ext_state = 1780 ETHTOOL_LINK_EXT_STATE_NO_CABLE; 1781 else if (nic_dev->module_unrecognized) 1782 link_ext_state_info->link_ext_state = 1783 ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH; 1784 1785 return 0; 1786 } 1787 1788 static const struct ethtool_ops hinic_ethtool_ops = { 1789 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | 1790 ETHTOOL_COALESCE_RX_MAX_FRAMES | 1791 ETHTOOL_COALESCE_TX_USECS | 1792 ETHTOOL_COALESCE_TX_MAX_FRAMES, 1793 1794 .get_link_ksettings = hinic_get_link_ksettings, 1795 .set_link_ksettings = hinic_set_link_ksettings, 1796 .get_drvinfo = hinic_get_drvinfo, 1797 .get_link = ethtool_op_get_link, 1798 .get_link_ext_state = hinic_get_link_ext_state, 1799 .get_ringparam = hinic_get_ringparam, 1800 .set_ringparam = hinic_set_ringparam, 1801 .get_coalesce = hinic_get_coalesce, 1802 .set_coalesce = hinic_set_coalesce, 1803 .get_per_queue_coalesce = hinic_get_per_queue_coalesce, 1804 .set_per_queue_coalesce = hinic_set_per_queue_coalesce, 1805 .get_pauseparam = hinic_get_pauseparam, 1806 .set_pauseparam = hinic_set_pauseparam, 1807 .get_channels = hinic_get_channels, 1808 .set_channels = hinic_set_channels, 1809 .get_rxnfc = hinic_get_rxnfc, 1810 .set_rxnfc = hinic_set_rxnfc, 1811 .get_rxfh_key_size = hinic_get_rxfh_key_size, 1812 .get_rxfh_indir_size = hinic_get_rxfh_indir_size, 1813 .get_rxfh = hinic_get_rxfh, 1814 .set_rxfh = hinic_set_rxfh, 1815 .get_sset_count = hinic_get_sset_count, 1816 .get_ethtool_stats = hinic_get_ethtool_stats, 1817 .get_strings = hinic_get_strings, 1818 .self_test = hinic_diag_test, 1819 .set_phys_id = hinic_set_phys_id, 1820 .get_module_info = hinic_get_module_info, 1821 .get_module_eeprom = hinic_get_module_eeprom, 1822 }; 1823 1824 static const struct ethtool_ops hinicvf_ethtool_ops = { 1825 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | 1826 ETHTOOL_COALESCE_RX_MAX_FRAMES | 1827 ETHTOOL_COALESCE_TX_USECS | 1828 ETHTOOL_COALESCE_TX_MAX_FRAMES, 1829 1830 .get_link_ksettings = hinic_get_link_ksettings, 1831 .get_drvinfo = hinic_get_drvinfo, 1832 .get_link = ethtool_op_get_link, 1833 .get_ringparam = hinic_get_ringparam, 1834 .set_ringparam = hinic_set_ringparam, 1835 .get_coalesce = hinic_get_coalesce, 1836 .set_coalesce = hinic_set_coalesce, 1837 .get_per_queue_coalesce = hinic_get_per_queue_coalesce, 1838 .set_per_queue_coalesce = hinic_set_per_queue_coalesce, 1839 .get_channels = hinic_get_channels, 1840 .set_channels = hinic_set_channels, 1841 .get_rxnfc = hinic_get_rxnfc, 1842 .set_rxnfc = hinic_set_rxnfc, 1843 .get_rxfh_key_size = hinic_get_rxfh_key_size, 1844 .get_rxfh_indir_size = hinic_get_rxfh_indir_size, 1845 .get_rxfh = hinic_get_rxfh, 1846 .set_rxfh = hinic_set_rxfh, 1847 .get_sset_count = hinic_get_sset_count, 1848 .get_ethtool_stats = hinic_get_ethtool_stats, 1849 .get_strings = hinic_get_strings, 1850 }; 1851 1852 void hinic_set_ethtool_ops(struct net_device *netdev) 1853 { 1854 struct hinic_dev *nic_dev = netdev_priv(netdev); 1855 1856 if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) 1857 netdev->ethtool_ops = &hinic_ethtool_ops; 1858 else 1859 netdev->ethtool_ops = &hinicvf_ethtool_ops; 1860 } 1861