1 // SPDX-License-Identifier: GPL-2.0 2 /* Huawei HiNIC PCI Express Linux driver 3 * Copyright(c) 2017 Huawei Technologies Co., Ltd 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * for more details. 13 * 14 */ 15 16 #include <linux/kernel.h> 17 #include <linux/pci.h> 18 #include <linux/device.h> 19 #include <linux/module.h> 20 #include <linux/types.h> 21 #include <linux/errno.h> 22 #include <linux/interrupt.h> 23 #include <linux/etherdevice.h> 24 #include <linux/netdevice.h> 25 #include <linux/if_vlan.h> 26 #include <linux/ethtool.h> 27 #include <linux/vmalloc.h> 28 #include <linux/sfp.h> 29 30 #include "hinic_hw_qp.h" 31 #include "hinic_hw_dev.h" 32 #include "hinic_port.h" 33 #include "hinic_tx.h" 34 #include "hinic_rx.h" 35 #include "hinic_dev.h" 36 37 #define SET_LINK_STR_MAX_LEN 16 38 39 #define GET_SUPPORTED_MODE 0 40 #define GET_ADVERTISED_MODE 1 41 42 #define ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE(ecmd, mode) \ 43 ((ecmd)->supported |= \ 44 (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit)) 45 #define ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE(ecmd, mode) \ 46 ((ecmd)->advertising |= \ 47 (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit)) 48 #define ETHTOOL_ADD_SUPPORTED_LINK_MODE(ecmd, mode) \ 49 ((ecmd)->supported |= SUPPORTED_##mode) 50 #define ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode) \ 51 ((ecmd)->advertising |= ADVERTISED_##mode) 52 53 #define COALESCE_PENDING_LIMIT_UNIT 8 54 #define COALESCE_TIMER_CFG_UNIT 9 55 #define COALESCE_ALL_QUEUE 0xFFFF 56 #define COALESCE_MAX_PENDING_LIMIT (255 * COALESCE_PENDING_LIMIT_UNIT) 57 #define COALESCE_MAX_TIMER_CFG (255 * COALESCE_TIMER_CFG_UNIT) 58 #define OBJ_STR_MAX_LEN 32 59 60 struct hw2ethtool_link_mode { 61 enum ethtool_link_mode_bit_indices link_mode_bit; 62 u32 speed; 63 enum hinic_link_mode hw_link_mode; 64 }; 65 66 struct cmd_link_settings { 67 u64 supported; 68 u64 advertising; 69 70 u32 speed; 71 u8 duplex; 72 u8 port; 73 u8 autoneg; 74 }; 75 76 static u32 hw_to_ethtool_speed[LINK_SPEED_LEVELS] = { 77 SPEED_10, SPEED_100, 78 SPEED_1000, SPEED_10000, 79 SPEED_25000, SPEED_40000, 80 SPEED_100000 81 }; 82 83 static struct hw2ethtool_link_mode 84 hw_to_ethtool_link_mode_table[HINIC_LINK_MODE_NUMBERS] = { 85 { 86 .link_mode_bit = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 87 .speed = SPEED_10000, 88 .hw_link_mode = HINIC_10GE_BASE_KR, 89 }, 90 { 91 .link_mode_bit = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 92 .speed = SPEED_40000, 93 .hw_link_mode = HINIC_40GE_BASE_KR4, 94 }, 95 { 96 .link_mode_bit = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 97 .speed = SPEED_40000, 98 .hw_link_mode = HINIC_40GE_BASE_CR4, 99 }, 100 { 101 .link_mode_bit = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 102 .speed = SPEED_100000, 103 .hw_link_mode = HINIC_100GE_BASE_KR4, 104 }, 105 { 106 .link_mode_bit = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 107 .speed = SPEED_100000, 108 .hw_link_mode = HINIC_100GE_BASE_CR4, 109 }, 110 { 111 .link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 112 .speed = SPEED_25000, 113 .hw_link_mode = HINIC_25GE_BASE_KR_S, 114 }, 115 { 116 .link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 117 .speed = SPEED_25000, 118 .hw_link_mode = HINIC_25GE_BASE_CR_S, 119 }, 120 { 121 .link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 122 .speed = SPEED_25000, 123 .hw_link_mode = HINIC_25GE_BASE_KR, 124 }, 125 { 126 .link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 127 .speed = SPEED_25000, 128 .hw_link_mode = HINIC_25GE_BASE_CR, 129 }, 130 { 131 .link_mode_bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 132 .speed = SPEED_1000, 133 .hw_link_mode = HINIC_GE_BASE_KX, 134 }, 135 }; 136 137 #define LP_DEFAULT_TIME 5 /* seconds */ 138 #define LP_PKT_LEN 1514 139 140 #define PORT_DOWN_ERR_IDX 0 141 enum diag_test_index { 142 INTERNAL_LP_TEST = 0, 143 EXTERNAL_LP_TEST = 1, 144 DIAG_TEST_MAX = 2, 145 }; 146 147 static void set_link_speed(struct ethtool_link_ksettings *link_ksettings, 148 enum hinic_speed speed) 149 { 150 switch (speed) { 151 case HINIC_SPEED_10MB_LINK: 152 link_ksettings->base.speed = SPEED_10; 153 break; 154 155 case HINIC_SPEED_100MB_LINK: 156 link_ksettings->base.speed = SPEED_100; 157 break; 158 159 case HINIC_SPEED_1000MB_LINK: 160 link_ksettings->base.speed = SPEED_1000; 161 break; 162 163 case HINIC_SPEED_10GB_LINK: 164 link_ksettings->base.speed = SPEED_10000; 165 break; 166 167 case HINIC_SPEED_25GB_LINK: 168 link_ksettings->base.speed = SPEED_25000; 169 break; 170 171 case HINIC_SPEED_40GB_LINK: 172 link_ksettings->base.speed = SPEED_40000; 173 break; 174 175 case HINIC_SPEED_100GB_LINK: 176 link_ksettings->base.speed = SPEED_100000; 177 break; 178 179 default: 180 link_ksettings->base.speed = SPEED_UNKNOWN; 181 break; 182 } 183 } 184 185 static int hinic_get_link_mode_index(enum hinic_link_mode link_mode) 186 { 187 int i = 0; 188 189 for (i = 0; i < HINIC_LINK_MODE_NUMBERS; i++) { 190 if (link_mode == hw_to_ethtool_link_mode_table[i].hw_link_mode) 191 break; 192 } 193 194 return i; 195 } 196 197 static void hinic_add_ethtool_link_mode(struct cmd_link_settings *link_settings, 198 enum hinic_link_mode hw_link_mode, 199 u32 name) 200 { 201 enum hinic_link_mode link_mode; 202 int idx = 0; 203 204 for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) { 205 if (hw_link_mode & ((u32)1 << link_mode)) { 206 idx = hinic_get_link_mode_index(link_mode); 207 if (idx >= HINIC_LINK_MODE_NUMBERS) 208 continue; 209 210 if (name == GET_SUPPORTED_MODE) 211 ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE 212 (link_settings, idx); 213 else 214 ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE 215 (link_settings, idx); 216 } 217 } 218 } 219 220 static void hinic_link_port_type(struct cmd_link_settings *link_settings, 221 enum hinic_port_type port_type) 222 { 223 switch (port_type) { 224 case HINIC_PORT_ELEC: 225 case HINIC_PORT_TP: 226 ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, TP); 227 ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, TP); 228 link_settings->port = PORT_TP; 229 break; 230 231 case HINIC_PORT_AOC: 232 case HINIC_PORT_FIBRE: 233 ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE); 234 ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE); 235 link_settings->port = PORT_FIBRE; 236 break; 237 238 case HINIC_PORT_COPPER: 239 ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE); 240 ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE); 241 link_settings->port = PORT_DA; 242 break; 243 244 case HINIC_PORT_BACKPLANE: 245 ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Backplane); 246 ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Backplane); 247 link_settings->port = PORT_NONE; 248 break; 249 250 default: 251 link_settings->port = PORT_OTHER; 252 break; 253 } 254 } 255 256 static int hinic_get_link_ksettings(struct net_device *netdev, 257 struct ethtool_link_ksettings 258 *link_ksettings) 259 { 260 struct hinic_dev *nic_dev = netdev_priv(netdev); 261 struct hinic_link_mode_cmd link_mode = { 0 }; 262 struct hinic_pause_config pause_info = { 0 }; 263 struct cmd_link_settings settings = { 0 }; 264 enum hinic_port_link_state link_state; 265 struct hinic_port_cap port_cap; 266 int err; 267 268 ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); 269 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); 270 271 link_ksettings->base.speed = SPEED_UNKNOWN; 272 link_ksettings->base.autoneg = AUTONEG_DISABLE; 273 link_ksettings->base.duplex = DUPLEX_UNKNOWN; 274 275 err = hinic_port_get_cap(nic_dev, &port_cap); 276 if (err) 277 return err; 278 279 hinic_link_port_type(&settings, port_cap.port_type); 280 link_ksettings->base.port = settings.port; 281 282 err = hinic_port_link_state(nic_dev, &link_state); 283 if (err) 284 return err; 285 286 if (link_state == HINIC_LINK_STATE_UP) { 287 set_link_speed(link_ksettings, port_cap.speed); 288 link_ksettings->base.duplex = 289 (port_cap.duplex == HINIC_DUPLEX_FULL) ? 290 DUPLEX_FULL : DUPLEX_HALF; 291 } 292 293 if (!!(port_cap.autoneg_cap & HINIC_AUTONEG_SUPPORTED)) 294 ethtool_link_ksettings_add_link_mode(link_ksettings, 295 advertising, Autoneg); 296 297 if (port_cap.autoneg_state == HINIC_AUTONEG_ACTIVE) 298 link_ksettings->base.autoneg = AUTONEG_ENABLE; 299 300 err = hinic_get_link_mode(nic_dev->hwdev, &link_mode); 301 if (err || link_mode.supported == HINIC_SUPPORTED_UNKNOWN || 302 link_mode.advertised == HINIC_SUPPORTED_UNKNOWN) 303 return -EIO; 304 305 hinic_add_ethtool_link_mode(&settings, link_mode.supported, 306 GET_SUPPORTED_MODE); 307 hinic_add_ethtool_link_mode(&settings, link_mode.advertised, 308 GET_ADVERTISED_MODE); 309 310 if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) { 311 err = hinic_get_hw_pause_info(nic_dev->hwdev, &pause_info); 312 if (err) 313 return err; 314 ETHTOOL_ADD_SUPPORTED_LINK_MODE(&settings, Pause); 315 if (pause_info.rx_pause && pause_info.tx_pause) { 316 ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause); 317 } else if (pause_info.tx_pause) { 318 ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause); 319 } else if (pause_info.rx_pause) { 320 ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause); 321 ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause); 322 } 323 } 324 325 linkmode_copy(link_ksettings->link_modes.supported, 326 (unsigned long *)&settings.supported); 327 linkmode_copy(link_ksettings->link_modes.advertising, 328 (unsigned long *)&settings.advertising); 329 330 return 0; 331 } 332 333 static int hinic_ethtool_to_hw_speed_level(u32 speed) 334 { 335 int i; 336 337 for (i = 0; i < LINK_SPEED_LEVELS; i++) { 338 if (hw_to_ethtool_speed[i] == speed) 339 break; 340 } 341 342 return i; 343 } 344 345 static bool hinic_is_support_speed(enum hinic_link_mode supported_link, 346 u32 speed) 347 { 348 enum hinic_link_mode link_mode; 349 int idx; 350 351 for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) { 352 if (!(supported_link & ((u32)1 << link_mode))) 353 continue; 354 355 idx = hinic_get_link_mode_index(link_mode); 356 if (idx >= HINIC_LINK_MODE_NUMBERS) 357 continue; 358 359 if (hw_to_ethtool_link_mode_table[idx].speed == speed) 360 return true; 361 } 362 363 return false; 364 } 365 366 static bool hinic_is_speed_legal(struct hinic_dev *nic_dev, u32 speed) 367 { 368 struct hinic_link_mode_cmd link_mode = { 0 }; 369 struct net_device *netdev = nic_dev->netdev; 370 enum nic_speed_level speed_level = 0; 371 int err; 372 373 err = hinic_get_link_mode(nic_dev->hwdev, &link_mode); 374 if (err) 375 return false; 376 377 if (link_mode.supported == HINIC_SUPPORTED_UNKNOWN || 378 link_mode.advertised == HINIC_SUPPORTED_UNKNOWN) 379 return false; 380 381 speed_level = hinic_ethtool_to_hw_speed_level(speed); 382 if (speed_level >= LINK_SPEED_LEVELS || 383 !hinic_is_support_speed(link_mode.supported, speed)) { 384 netif_err(nic_dev, drv, netdev, 385 "Unsupported speed: %d\n", speed); 386 return false; 387 } 388 389 return true; 390 } 391 392 static int get_link_settings_type(struct hinic_dev *nic_dev, 393 u8 autoneg, u32 speed, u32 *set_settings) 394 { 395 struct hinic_port_cap port_cap = { 0 }; 396 int err; 397 398 err = hinic_port_get_cap(nic_dev, &port_cap); 399 if (err) 400 return err; 401 402 /* always set autonegotiation */ 403 if (port_cap.autoneg_cap) 404 *set_settings |= HILINK_LINK_SET_AUTONEG; 405 406 if (autoneg == AUTONEG_ENABLE) { 407 if (!port_cap.autoneg_cap) { 408 netif_err(nic_dev, drv, nic_dev->netdev, "Not support autoneg\n"); 409 return -EOPNOTSUPP; 410 } 411 } else if (speed != (u32)SPEED_UNKNOWN) { 412 /* set speed only when autoneg is disabled */ 413 if (!hinic_is_speed_legal(nic_dev, speed)) 414 return -EINVAL; 415 *set_settings |= HILINK_LINK_SET_SPEED; 416 } else { 417 netif_err(nic_dev, drv, nic_dev->netdev, "Need to set speed when autoneg is off\n"); 418 return -EOPNOTSUPP; 419 } 420 421 return 0; 422 } 423 424 static int set_link_settings_separate_cmd(struct hinic_dev *nic_dev, 425 u32 set_settings, u8 autoneg, 426 u32 speed) 427 { 428 enum nic_speed_level speed_level = 0; 429 int err = 0; 430 431 if (set_settings & HILINK_LINK_SET_AUTONEG) { 432 err = hinic_set_autoneg(nic_dev->hwdev, 433 (autoneg == AUTONEG_ENABLE)); 434 if (err) 435 netif_err(nic_dev, drv, nic_dev->netdev, "%s autoneg failed\n", 436 (autoneg == AUTONEG_ENABLE) ? 437 "Enable" : "Disable"); 438 else 439 netif_info(nic_dev, drv, nic_dev->netdev, "%s autoneg successfully\n", 440 (autoneg == AUTONEG_ENABLE) ? 441 "Enable" : "Disable"); 442 } 443 444 if (!err && (set_settings & HILINK_LINK_SET_SPEED)) { 445 speed_level = hinic_ethtool_to_hw_speed_level(speed); 446 err = hinic_set_speed(nic_dev->hwdev, speed_level); 447 if (err) 448 netif_err(nic_dev, drv, nic_dev->netdev, "Set speed %d failed\n", 449 speed); 450 else 451 netif_info(nic_dev, drv, nic_dev->netdev, "Set speed %d successfully\n", 452 speed); 453 } 454 455 return err; 456 } 457 458 static int hinic_set_settings_to_hw(struct hinic_dev *nic_dev, 459 u32 set_settings, u8 autoneg, u32 speed) 460 { 461 struct hinic_link_ksettings_info settings = {0}; 462 char set_link_str[SET_LINK_STR_MAX_LEN] = {0}; 463 const char *autoneg_str; 464 struct net_device *netdev = nic_dev->netdev; 465 enum nic_speed_level speed_level = 0; 466 int err; 467 468 autoneg_str = (set_settings & HILINK_LINK_SET_AUTONEG) ? 469 (autoneg ? "autong enable " : "autong disable ") : ""; 470 471 if (set_settings & HILINK_LINK_SET_SPEED) { 472 speed_level = hinic_ethtool_to_hw_speed_level(speed); 473 err = snprintf(set_link_str, SET_LINK_STR_MAX_LEN, 474 "speed %d ", speed); 475 if (err >= SET_LINK_STR_MAX_LEN) { 476 netif_err(nic_dev, drv, netdev, "Failed to snprintf link speed, function return(%d) and dest_len(%d)\n", 477 err, SET_LINK_STR_MAX_LEN); 478 return -EFAULT; 479 } 480 } 481 482 settings.func_id = HINIC_HWIF_FUNC_IDX(nic_dev->hwdev->hwif); 483 settings.valid_bitmap = set_settings; 484 settings.autoneg = autoneg; 485 settings.speed = speed_level; 486 487 err = hinic_set_link_settings(nic_dev->hwdev, &settings); 488 if (err != HINIC_MGMT_CMD_UNSUPPORTED) { 489 if (err) 490 netif_err(nic_dev, drv, netdev, "Set %s%sfailed\n", 491 autoneg_str, set_link_str); 492 else 493 netif_info(nic_dev, drv, netdev, "Set %s%ssuccessfully\n", 494 autoneg_str, set_link_str); 495 496 return err; 497 } 498 499 return set_link_settings_separate_cmd(nic_dev, set_settings, autoneg, 500 speed); 501 } 502 503 static int set_link_settings(struct net_device *netdev, u8 autoneg, u32 speed) 504 { 505 struct hinic_dev *nic_dev = netdev_priv(netdev); 506 u32 set_settings = 0; 507 int err; 508 509 err = get_link_settings_type(nic_dev, autoneg, speed, &set_settings); 510 if (err) 511 return err; 512 513 if (set_settings) 514 err = hinic_set_settings_to_hw(nic_dev, set_settings, 515 autoneg, speed); 516 else 517 netif_info(nic_dev, drv, netdev, "Nothing changed, exit without setting anything\n"); 518 519 return err; 520 } 521 522 static int hinic_set_link_ksettings(struct net_device *netdev, const struct 523 ethtool_link_ksettings *link_settings) 524 { 525 /* only support to set autoneg and speed */ 526 return set_link_settings(netdev, link_settings->base.autoneg, 527 link_settings->base.speed); 528 } 529 530 static void hinic_get_drvinfo(struct net_device *netdev, 531 struct ethtool_drvinfo *info) 532 { 533 struct hinic_dev *nic_dev = netdev_priv(netdev); 534 u8 mgmt_ver[HINIC_MGMT_VERSION_MAX_LEN] = {0}; 535 struct hinic_hwdev *hwdev = nic_dev->hwdev; 536 struct hinic_hwif *hwif = hwdev->hwif; 537 int err; 538 539 strscpy(info->driver, HINIC_DRV_NAME, sizeof(info->driver)); 540 strscpy(info->bus_info, pci_name(hwif->pdev), sizeof(info->bus_info)); 541 542 err = hinic_get_mgmt_version(nic_dev, mgmt_ver); 543 if (err) 544 return; 545 546 snprintf(info->fw_version, sizeof(info->fw_version), "%s", mgmt_ver); 547 } 548 549 static void hinic_get_ringparam(struct net_device *netdev, 550 struct ethtool_ringparam *ring, 551 struct kernel_ethtool_ringparam *kernel_ring, 552 struct netlink_ext_ack *extack) 553 { 554 struct hinic_dev *nic_dev = netdev_priv(netdev); 555 556 ring->rx_max_pending = HINIC_MAX_QUEUE_DEPTH; 557 ring->tx_max_pending = HINIC_MAX_QUEUE_DEPTH; 558 ring->rx_pending = nic_dev->rq_depth; 559 ring->tx_pending = nic_dev->sq_depth; 560 } 561 562 static int check_ringparam_valid(struct hinic_dev *nic_dev, 563 struct ethtool_ringparam *ring) 564 { 565 if (ring->rx_jumbo_pending || ring->rx_mini_pending) { 566 netif_err(nic_dev, drv, nic_dev->netdev, 567 "Unsupported rx_jumbo_pending/rx_mini_pending\n"); 568 return -EINVAL; 569 } 570 571 if (ring->tx_pending > HINIC_MAX_QUEUE_DEPTH || 572 ring->tx_pending < HINIC_MIN_QUEUE_DEPTH || 573 ring->rx_pending > HINIC_MAX_QUEUE_DEPTH || 574 ring->rx_pending < HINIC_MIN_QUEUE_DEPTH) { 575 netif_err(nic_dev, drv, nic_dev->netdev, 576 "Queue depth out of range [%d-%d]\n", 577 HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH); 578 return -EINVAL; 579 } 580 581 return 0; 582 } 583 584 static int hinic_set_ringparam(struct net_device *netdev, 585 struct ethtool_ringparam *ring, 586 struct kernel_ethtool_ringparam *kernel_ring, 587 struct netlink_ext_ack *extack) 588 { 589 struct hinic_dev *nic_dev = netdev_priv(netdev); 590 u16 new_sq_depth, new_rq_depth; 591 int err; 592 593 err = check_ringparam_valid(nic_dev, ring); 594 if (err) 595 return err; 596 597 new_sq_depth = (u16)(1U << (u16)ilog2(ring->tx_pending)); 598 new_rq_depth = (u16)(1U << (u16)ilog2(ring->rx_pending)); 599 600 if (new_sq_depth == nic_dev->sq_depth && 601 new_rq_depth == nic_dev->rq_depth) 602 return 0; 603 604 netif_info(nic_dev, drv, netdev, 605 "Change Tx/Rx ring depth from %d/%d to %d/%d\n", 606 nic_dev->sq_depth, nic_dev->rq_depth, 607 new_sq_depth, new_rq_depth); 608 609 nic_dev->sq_depth = new_sq_depth; 610 nic_dev->rq_depth = new_rq_depth; 611 612 if (netif_running(netdev)) { 613 netif_info(nic_dev, drv, netdev, "Restarting netdev\n"); 614 err = hinic_close(netdev); 615 if (err) { 616 netif_err(nic_dev, drv, netdev, 617 "Failed to close netdev\n"); 618 return -EFAULT; 619 } 620 621 err = hinic_open(netdev); 622 if (err) { 623 netif_err(nic_dev, drv, netdev, 624 "Failed to open netdev\n"); 625 return -EFAULT; 626 } 627 } 628 629 return 0; 630 } 631 632 static int __hinic_get_coalesce(struct net_device *netdev, 633 struct ethtool_coalesce *coal, u16 queue) 634 { 635 struct hinic_dev *nic_dev = netdev_priv(netdev); 636 struct hinic_intr_coal_info *rx_intr_coal_info; 637 struct hinic_intr_coal_info *tx_intr_coal_info; 638 639 if (queue == COALESCE_ALL_QUEUE) { 640 /* get tx/rx irq0 as default parameters */ 641 rx_intr_coal_info = &nic_dev->rx_intr_coalesce[0]; 642 tx_intr_coal_info = &nic_dev->tx_intr_coalesce[0]; 643 } else { 644 if (queue >= nic_dev->num_qps) { 645 netif_err(nic_dev, drv, netdev, 646 "Invalid queue_id: %d\n", queue); 647 return -EINVAL; 648 } 649 rx_intr_coal_info = &nic_dev->rx_intr_coalesce[queue]; 650 tx_intr_coal_info = &nic_dev->tx_intr_coalesce[queue]; 651 } 652 653 /* coalesce_timer is in unit of 9us */ 654 coal->rx_coalesce_usecs = rx_intr_coal_info->coalesce_timer_cfg * 655 COALESCE_TIMER_CFG_UNIT; 656 /* coalesced_frames is in unit of 8 */ 657 coal->rx_max_coalesced_frames = rx_intr_coal_info->pending_limt * 658 COALESCE_PENDING_LIMIT_UNIT; 659 coal->tx_coalesce_usecs = tx_intr_coal_info->coalesce_timer_cfg * 660 COALESCE_TIMER_CFG_UNIT; 661 coal->tx_max_coalesced_frames = tx_intr_coal_info->pending_limt * 662 COALESCE_PENDING_LIMIT_UNIT; 663 664 return 0; 665 } 666 667 static int is_coalesce_exceed_limit(const struct ethtool_coalesce *coal) 668 { 669 if (coal->rx_coalesce_usecs > COALESCE_MAX_TIMER_CFG || 670 coal->rx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT || 671 coal->tx_coalesce_usecs > COALESCE_MAX_TIMER_CFG || 672 coal->tx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT) 673 return -ERANGE; 674 675 return 0; 676 } 677 678 static int set_queue_coalesce(struct hinic_dev *nic_dev, u16 q_id, 679 struct hinic_intr_coal_info *coal, 680 bool set_rx_coal) 681 { 682 struct hinic_intr_coal_info *intr_coal = NULL; 683 struct hinic_msix_config interrupt_info = {0}; 684 struct net_device *netdev = nic_dev->netdev; 685 u16 msix_idx; 686 int err; 687 688 intr_coal = set_rx_coal ? &nic_dev->rx_intr_coalesce[q_id] : 689 &nic_dev->tx_intr_coalesce[q_id]; 690 691 intr_coal->coalesce_timer_cfg = coal->coalesce_timer_cfg; 692 intr_coal->pending_limt = coal->pending_limt; 693 694 /* netdev not running or qp not in using, 695 * don't need to set coalesce to hw 696 */ 697 if (!(nic_dev->flags & HINIC_INTF_UP) || 698 q_id >= nic_dev->num_qps) 699 return 0; 700 701 msix_idx = set_rx_coal ? nic_dev->rxqs[q_id].rq->msix_entry : 702 nic_dev->txqs[q_id].sq->msix_entry; 703 interrupt_info.msix_index = msix_idx; 704 interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg; 705 interrupt_info.pending_cnt = intr_coal->pending_limt; 706 interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg; 707 708 err = hinic_set_interrupt_cfg(nic_dev->hwdev, &interrupt_info); 709 if (err) 710 netif_warn(nic_dev, drv, netdev, 711 "Failed to set %s queue%d coalesce", 712 set_rx_coal ? "rx" : "tx", q_id); 713 714 return err; 715 } 716 717 static int __set_hw_coal_param(struct hinic_dev *nic_dev, 718 struct hinic_intr_coal_info *intr_coal, 719 u16 queue, bool set_rx_coal) 720 { 721 int err; 722 u16 i; 723 724 if (queue == COALESCE_ALL_QUEUE) { 725 for (i = 0; i < nic_dev->max_qps; i++) { 726 err = set_queue_coalesce(nic_dev, i, intr_coal, 727 set_rx_coal); 728 if (err) 729 return err; 730 } 731 } else { 732 if (queue >= nic_dev->num_qps) { 733 netif_err(nic_dev, drv, nic_dev->netdev, 734 "Invalid queue_id: %d\n", queue); 735 return -EINVAL; 736 } 737 err = set_queue_coalesce(nic_dev, queue, intr_coal, 738 set_rx_coal); 739 if (err) 740 return err; 741 } 742 743 return 0; 744 } 745 746 static int __hinic_set_coalesce(struct net_device *netdev, 747 struct ethtool_coalesce *coal, u16 queue) 748 { 749 struct hinic_dev *nic_dev = netdev_priv(netdev); 750 struct hinic_intr_coal_info rx_intr_coal = {0}; 751 struct hinic_intr_coal_info tx_intr_coal = {0}; 752 bool set_rx_coal = false; 753 bool set_tx_coal = false; 754 int err; 755 756 err = is_coalesce_exceed_limit(coal); 757 if (err) 758 return err; 759 760 if (coal->rx_coalesce_usecs || coal->rx_max_coalesced_frames) { 761 rx_intr_coal.coalesce_timer_cfg = 762 (u8)(coal->rx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT); 763 rx_intr_coal.pending_limt = (u8)(coal->rx_max_coalesced_frames / 764 COALESCE_PENDING_LIMIT_UNIT); 765 set_rx_coal = true; 766 } 767 768 if (coal->tx_coalesce_usecs || coal->tx_max_coalesced_frames) { 769 tx_intr_coal.coalesce_timer_cfg = 770 (u8)(coal->tx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT); 771 tx_intr_coal.pending_limt = (u8)(coal->tx_max_coalesced_frames / 772 COALESCE_PENDING_LIMIT_UNIT); 773 set_tx_coal = true; 774 } 775 776 /* setting coalesce timer or pending limit to zero will disable 777 * coalesce 778 */ 779 if (set_rx_coal && (!rx_intr_coal.coalesce_timer_cfg || 780 !rx_intr_coal.pending_limt)) 781 netif_warn(nic_dev, drv, netdev, "RX coalesce will be disabled\n"); 782 if (set_tx_coal && (!tx_intr_coal.coalesce_timer_cfg || 783 !tx_intr_coal.pending_limt)) 784 netif_warn(nic_dev, drv, netdev, "TX coalesce will be disabled\n"); 785 786 if (set_rx_coal) { 787 err = __set_hw_coal_param(nic_dev, &rx_intr_coal, queue, true); 788 if (err) 789 return err; 790 } 791 if (set_tx_coal) { 792 err = __set_hw_coal_param(nic_dev, &tx_intr_coal, queue, false); 793 if (err) 794 return err; 795 } 796 return 0; 797 } 798 799 static int hinic_get_coalesce(struct net_device *netdev, 800 struct ethtool_coalesce *coal, 801 struct kernel_ethtool_coalesce *kernel_coal, 802 struct netlink_ext_ack *extack) 803 { 804 return __hinic_get_coalesce(netdev, coal, COALESCE_ALL_QUEUE); 805 } 806 807 static int hinic_set_coalesce(struct net_device *netdev, 808 struct ethtool_coalesce *coal, 809 struct kernel_ethtool_coalesce *kernel_coal, 810 struct netlink_ext_ack *extack) 811 { 812 return __hinic_set_coalesce(netdev, coal, COALESCE_ALL_QUEUE); 813 } 814 815 static int hinic_get_per_queue_coalesce(struct net_device *netdev, u32 queue, 816 struct ethtool_coalesce *coal) 817 { 818 return __hinic_get_coalesce(netdev, coal, queue); 819 } 820 821 static int hinic_set_per_queue_coalesce(struct net_device *netdev, u32 queue, 822 struct ethtool_coalesce *coal) 823 { 824 return __hinic_set_coalesce(netdev, coal, queue); 825 } 826 827 static void hinic_get_pauseparam(struct net_device *netdev, 828 struct ethtool_pauseparam *pause) 829 { 830 struct hinic_dev *nic_dev = netdev_priv(netdev); 831 struct hinic_pause_config pause_info = {0}; 832 struct hinic_nic_cfg *nic_cfg; 833 int err; 834 835 nic_cfg = &nic_dev->hwdev->func_to_io.nic_cfg; 836 837 err = hinic_get_hw_pause_info(nic_dev->hwdev, &pause_info); 838 if (!err) { 839 pause->autoneg = pause_info.auto_neg; 840 if (nic_cfg->pause_set || !pause_info.auto_neg) { 841 pause->rx_pause = nic_cfg->rx_pause; 842 pause->tx_pause = nic_cfg->tx_pause; 843 } else { 844 pause->rx_pause = pause_info.rx_pause; 845 pause->tx_pause = pause_info.tx_pause; 846 } 847 } 848 } 849 850 static int hinic_set_pauseparam(struct net_device *netdev, 851 struct ethtool_pauseparam *pause) 852 { 853 struct hinic_dev *nic_dev = netdev_priv(netdev); 854 struct hinic_pause_config pause_info = {0}; 855 struct hinic_port_cap port_cap = {0}; 856 int err; 857 858 err = hinic_port_get_cap(nic_dev, &port_cap); 859 if (err) 860 return -EIO; 861 862 if (pause->autoneg != port_cap.autoneg_state) 863 return -EOPNOTSUPP; 864 865 pause_info.auto_neg = pause->autoneg; 866 pause_info.rx_pause = pause->rx_pause; 867 pause_info.tx_pause = pause->tx_pause; 868 869 mutex_lock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex); 870 err = hinic_set_hw_pause_info(nic_dev->hwdev, &pause_info); 871 if (err) { 872 mutex_unlock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex); 873 return err; 874 } 875 nic_dev->hwdev->func_to_io.nic_cfg.pause_set = true; 876 nic_dev->hwdev->func_to_io.nic_cfg.auto_neg = pause->autoneg; 877 nic_dev->hwdev->func_to_io.nic_cfg.rx_pause = pause->rx_pause; 878 nic_dev->hwdev->func_to_io.nic_cfg.tx_pause = pause->tx_pause; 879 mutex_unlock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex); 880 881 return 0; 882 } 883 884 static void hinic_get_channels(struct net_device *netdev, 885 struct ethtool_channels *channels) 886 { 887 struct hinic_dev *nic_dev = netdev_priv(netdev); 888 struct hinic_hwdev *hwdev = nic_dev->hwdev; 889 890 channels->max_combined = nic_dev->max_qps; 891 channels->combined_count = hinic_hwdev_num_qps(hwdev); 892 } 893 894 static int hinic_set_channels(struct net_device *netdev, 895 struct ethtool_channels *channels) 896 { 897 struct hinic_dev *nic_dev = netdev_priv(netdev); 898 unsigned int count = channels->combined_count; 899 int err; 900 901 netif_info(nic_dev, drv, netdev, "Set max combined queue number from %d to %d\n", 902 hinic_hwdev_num_qps(nic_dev->hwdev), count); 903 904 if (netif_running(netdev)) { 905 netif_info(nic_dev, drv, netdev, "Restarting netdev\n"); 906 hinic_close(netdev); 907 908 nic_dev->hwdev->nic_cap.num_qps = count; 909 910 err = hinic_open(netdev); 911 if (err) { 912 netif_err(nic_dev, drv, netdev, 913 "Failed to open netdev\n"); 914 return -EFAULT; 915 } 916 } else { 917 nic_dev->hwdev->nic_cap.num_qps = count; 918 } 919 920 return 0; 921 } 922 923 static int hinic_get_rss_hash_opts(struct hinic_dev *nic_dev, 924 struct ethtool_rxnfc *cmd) 925 { 926 struct hinic_rss_type rss_type = { 0 }; 927 int err; 928 929 cmd->data = 0; 930 931 if (!(nic_dev->flags & HINIC_RSS_ENABLE)) 932 return 0; 933 934 err = hinic_get_rss_type(nic_dev, nic_dev->rss_tmpl_idx, 935 &rss_type); 936 if (err) 937 return err; 938 939 cmd->data = RXH_IP_SRC | RXH_IP_DST; 940 switch (cmd->flow_type) { 941 case TCP_V4_FLOW: 942 if (rss_type.tcp_ipv4) 943 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 944 break; 945 case TCP_V6_FLOW: 946 if (rss_type.tcp_ipv6) 947 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 948 break; 949 case UDP_V4_FLOW: 950 if (rss_type.udp_ipv4) 951 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 952 break; 953 case UDP_V6_FLOW: 954 if (rss_type.udp_ipv6) 955 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 956 break; 957 case IPV4_FLOW: 958 case IPV6_FLOW: 959 break; 960 default: 961 cmd->data = 0; 962 return -EINVAL; 963 } 964 965 return 0; 966 } 967 968 static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd, 969 struct hinic_rss_type *rss_type) 970 { 971 u8 rss_l4_en = 0; 972 973 switch (cmd->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 974 case 0: 975 rss_l4_en = 0; 976 break; 977 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 978 rss_l4_en = 1; 979 break; 980 default: 981 return -EINVAL; 982 } 983 984 switch (cmd->flow_type) { 985 case TCP_V4_FLOW: 986 rss_type->tcp_ipv4 = rss_l4_en; 987 break; 988 case TCP_V6_FLOW: 989 rss_type->tcp_ipv6 = rss_l4_en; 990 break; 991 case UDP_V4_FLOW: 992 rss_type->udp_ipv4 = rss_l4_en; 993 break; 994 case UDP_V6_FLOW: 995 rss_type->udp_ipv6 = rss_l4_en; 996 break; 997 default: 998 return -EINVAL; 999 } 1000 1001 return 0; 1002 } 1003 1004 static int hinic_set_rss_hash_opts(struct hinic_dev *nic_dev, 1005 struct ethtool_rxnfc *cmd) 1006 { 1007 struct hinic_rss_type *rss_type = &nic_dev->rss_type; 1008 int err; 1009 1010 if (!(nic_dev->flags & HINIC_RSS_ENABLE)) { 1011 cmd->data = 0; 1012 return -EOPNOTSUPP; 1013 } 1014 1015 /* RSS does not support anything other than hashing 1016 * to queues on src and dst IPs and ports 1017 */ 1018 if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | 1019 RXH_L4_B_2_3)) 1020 return -EINVAL; 1021 1022 /* We need at least the IP SRC and DEST fields for hashing */ 1023 if (!(cmd->data & RXH_IP_SRC) || !(cmd->data & RXH_IP_DST)) 1024 return -EINVAL; 1025 1026 err = hinic_get_rss_type(nic_dev, 1027 nic_dev->rss_tmpl_idx, rss_type); 1028 if (err) 1029 return -EFAULT; 1030 1031 switch (cmd->flow_type) { 1032 case TCP_V4_FLOW: 1033 case TCP_V6_FLOW: 1034 case UDP_V4_FLOW: 1035 case UDP_V6_FLOW: 1036 err = set_l4_rss_hash_ops(cmd, rss_type); 1037 if (err) 1038 return err; 1039 break; 1040 case IPV4_FLOW: 1041 rss_type->ipv4 = 1; 1042 break; 1043 case IPV6_FLOW: 1044 rss_type->ipv6 = 1; 1045 break; 1046 default: 1047 return -EINVAL; 1048 } 1049 1050 err = hinic_set_rss_type(nic_dev, nic_dev->rss_tmpl_idx, 1051 *rss_type); 1052 if (err) 1053 return -EFAULT; 1054 1055 return 0; 1056 } 1057 1058 static int __set_rss_rxfh(struct net_device *netdev, 1059 const u32 *indir, const u8 *key) 1060 { 1061 struct hinic_dev *nic_dev = netdev_priv(netdev); 1062 int err; 1063 1064 if (indir) { 1065 if (!nic_dev->rss_indir_user) { 1066 nic_dev->rss_indir_user = 1067 kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE, 1068 GFP_KERNEL); 1069 if (!nic_dev->rss_indir_user) 1070 return -ENOMEM; 1071 } 1072 1073 memcpy(nic_dev->rss_indir_user, indir, 1074 sizeof(u32) * HINIC_RSS_INDIR_SIZE); 1075 1076 err = hinic_rss_set_indir_tbl(nic_dev, 1077 nic_dev->rss_tmpl_idx, indir); 1078 if (err) 1079 return -EFAULT; 1080 } 1081 1082 if (key) { 1083 if (!nic_dev->rss_hkey_user) { 1084 nic_dev->rss_hkey_user = 1085 kzalloc(HINIC_RSS_KEY_SIZE * 2, GFP_KERNEL); 1086 1087 if (!nic_dev->rss_hkey_user) 1088 return -ENOMEM; 1089 } 1090 1091 memcpy(nic_dev->rss_hkey_user, key, HINIC_RSS_KEY_SIZE); 1092 1093 err = hinic_rss_set_template_tbl(nic_dev, 1094 nic_dev->rss_tmpl_idx, key); 1095 if (err) 1096 return -EFAULT; 1097 } 1098 1099 return 0; 1100 } 1101 1102 static int hinic_get_rxnfc(struct net_device *netdev, 1103 struct ethtool_rxnfc *cmd, u32 *rule_locs) 1104 { 1105 struct hinic_dev *nic_dev = netdev_priv(netdev); 1106 int err = 0; 1107 1108 switch (cmd->cmd) { 1109 case ETHTOOL_GRXRINGS: 1110 cmd->data = nic_dev->num_qps; 1111 break; 1112 case ETHTOOL_GRXFH: 1113 err = hinic_get_rss_hash_opts(nic_dev, cmd); 1114 break; 1115 default: 1116 err = -EOPNOTSUPP; 1117 break; 1118 } 1119 1120 return err; 1121 } 1122 1123 static int hinic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) 1124 { 1125 struct hinic_dev *nic_dev = netdev_priv(netdev); 1126 int err = 0; 1127 1128 switch (cmd->cmd) { 1129 case ETHTOOL_SRXFH: 1130 err = hinic_set_rss_hash_opts(nic_dev, cmd); 1131 break; 1132 default: 1133 err = -EOPNOTSUPP; 1134 break; 1135 } 1136 1137 return err; 1138 } 1139 1140 static int hinic_get_rxfh(struct net_device *netdev, 1141 u32 *indir, u8 *key, u8 *hfunc) 1142 { 1143 struct hinic_dev *nic_dev = netdev_priv(netdev); 1144 u8 hash_engine_type = 0; 1145 int err = 0; 1146 1147 if (!(nic_dev->flags & HINIC_RSS_ENABLE)) 1148 return -EOPNOTSUPP; 1149 1150 if (hfunc) { 1151 err = hinic_rss_get_hash_engine(nic_dev, 1152 nic_dev->rss_tmpl_idx, 1153 &hash_engine_type); 1154 if (err) 1155 return -EFAULT; 1156 1157 *hfunc = hash_engine_type ? ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR; 1158 } 1159 1160 if (indir) { 1161 err = hinic_rss_get_indir_tbl(nic_dev, 1162 nic_dev->rss_tmpl_idx, indir); 1163 if (err) 1164 return -EFAULT; 1165 } 1166 1167 if (key) 1168 err = hinic_rss_get_template_tbl(nic_dev, 1169 nic_dev->rss_tmpl_idx, key); 1170 1171 return err; 1172 } 1173 1174 static int hinic_set_rxfh(struct net_device *netdev, const u32 *indir, 1175 const u8 *key, const u8 hfunc) 1176 { 1177 struct hinic_dev *nic_dev = netdev_priv(netdev); 1178 int err = 0; 1179 1180 if (!(nic_dev->flags & HINIC_RSS_ENABLE)) 1181 return -EOPNOTSUPP; 1182 1183 if (hfunc != ETH_RSS_HASH_NO_CHANGE) { 1184 if (hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR) 1185 return -EOPNOTSUPP; 1186 1187 nic_dev->rss_hash_engine = (hfunc == ETH_RSS_HASH_XOR) ? 1188 HINIC_RSS_HASH_ENGINE_TYPE_XOR : 1189 HINIC_RSS_HASH_ENGINE_TYPE_TOEP; 1190 err = hinic_rss_set_hash_engine 1191 (nic_dev, nic_dev->rss_tmpl_idx, 1192 nic_dev->rss_hash_engine); 1193 if (err) 1194 return -EFAULT; 1195 } 1196 1197 err = __set_rss_rxfh(netdev, indir, key); 1198 1199 return err; 1200 } 1201 1202 static u32 hinic_get_rxfh_key_size(struct net_device *netdev) 1203 { 1204 return HINIC_RSS_KEY_SIZE; 1205 } 1206 1207 static u32 hinic_get_rxfh_indir_size(struct net_device *netdev) 1208 { 1209 return HINIC_RSS_INDIR_SIZE; 1210 } 1211 1212 #define HINIC_FUNC_STAT(_stat_item) { \ 1213 .name = #_stat_item, \ 1214 .size = sizeof_field(struct hinic_vport_stats, _stat_item), \ 1215 .offset = offsetof(struct hinic_vport_stats, _stat_item) \ 1216 } 1217 1218 static struct hinic_stats hinic_function_stats[] = { 1219 HINIC_FUNC_STAT(tx_unicast_pkts_vport), 1220 HINIC_FUNC_STAT(tx_unicast_bytes_vport), 1221 HINIC_FUNC_STAT(tx_multicast_pkts_vport), 1222 HINIC_FUNC_STAT(tx_multicast_bytes_vport), 1223 HINIC_FUNC_STAT(tx_broadcast_pkts_vport), 1224 HINIC_FUNC_STAT(tx_broadcast_bytes_vport), 1225 1226 HINIC_FUNC_STAT(rx_unicast_pkts_vport), 1227 HINIC_FUNC_STAT(rx_unicast_bytes_vport), 1228 HINIC_FUNC_STAT(rx_multicast_pkts_vport), 1229 HINIC_FUNC_STAT(rx_multicast_bytes_vport), 1230 HINIC_FUNC_STAT(rx_broadcast_pkts_vport), 1231 HINIC_FUNC_STAT(rx_broadcast_bytes_vport), 1232 1233 HINIC_FUNC_STAT(tx_discard_vport), 1234 HINIC_FUNC_STAT(rx_discard_vport), 1235 HINIC_FUNC_STAT(tx_err_vport), 1236 HINIC_FUNC_STAT(rx_err_vport), 1237 }; 1238 1239 static char hinic_test_strings[][ETH_GSTRING_LEN] = { 1240 "Internal lb test (on/offline)", 1241 "External lb test (external_lb)", 1242 }; 1243 1244 #define HINIC_PORT_STAT(_stat_item) { \ 1245 .name = #_stat_item, \ 1246 .size = sizeof_field(struct hinic_phy_port_stats, _stat_item), \ 1247 .offset = offsetof(struct hinic_phy_port_stats, _stat_item) \ 1248 } 1249 1250 static struct hinic_stats hinic_port_stats[] = { 1251 HINIC_PORT_STAT(mac_rx_total_pkt_num), 1252 HINIC_PORT_STAT(mac_rx_total_oct_num), 1253 HINIC_PORT_STAT(mac_rx_bad_pkt_num), 1254 HINIC_PORT_STAT(mac_rx_bad_oct_num), 1255 HINIC_PORT_STAT(mac_rx_good_pkt_num), 1256 HINIC_PORT_STAT(mac_rx_good_oct_num), 1257 HINIC_PORT_STAT(mac_rx_uni_pkt_num), 1258 HINIC_PORT_STAT(mac_rx_multi_pkt_num), 1259 HINIC_PORT_STAT(mac_rx_broad_pkt_num), 1260 HINIC_PORT_STAT(mac_tx_total_pkt_num), 1261 HINIC_PORT_STAT(mac_tx_total_oct_num), 1262 HINIC_PORT_STAT(mac_tx_bad_pkt_num), 1263 HINIC_PORT_STAT(mac_tx_bad_oct_num), 1264 HINIC_PORT_STAT(mac_tx_good_pkt_num), 1265 HINIC_PORT_STAT(mac_tx_good_oct_num), 1266 HINIC_PORT_STAT(mac_tx_uni_pkt_num), 1267 HINIC_PORT_STAT(mac_tx_multi_pkt_num), 1268 HINIC_PORT_STAT(mac_tx_broad_pkt_num), 1269 HINIC_PORT_STAT(mac_rx_fragment_pkt_num), 1270 HINIC_PORT_STAT(mac_rx_undersize_pkt_num), 1271 HINIC_PORT_STAT(mac_rx_undermin_pkt_num), 1272 HINIC_PORT_STAT(mac_rx_64_oct_pkt_num), 1273 HINIC_PORT_STAT(mac_rx_65_127_oct_pkt_num), 1274 HINIC_PORT_STAT(mac_rx_128_255_oct_pkt_num), 1275 HINIC_PORT_STAT(mac_rx_256_511_oct_pkt_num), 1276 HINIC_PORT_STAT(mac_rx_512_1023_oct_pkt_num), 1277 HINIC_PORT_STAT(mac_rx_1024_1518_oct_pkt_num), 1278 HINIC_PORT_STAT(mac_rx_1519_2047_oct_pkt_num), 1279 HINIC_PORT_STAT(mac_rx_2048_4095_oct_pkt_num), 1280 HINIC_PORT_STAT(mac_rx_4096_8191_oct_pkt_num), 1281 HINIC_PORT_STAT(mac_rx_8192_9216_oct_pkt_num), 1282 HINIC_PORT_STAT(mac_rx_9217_12287_oct_pkt_num), 1283 HINIC_PORT_STAT(mac_rx_12288_16383_oct_pkt_num), 1284 HINIC_PORT_STAT(mac_rx_1519_max_good_pkt_num), 1285 HINIC_PORT_STAT(mac_rx_1519_max_bad_pkt_num), 1286 HINIC_PORT_STAT(mac_rx_oversize_pkt_num), 1287 HINIC_PORT_STAT(mac_rx_jabber_pkt_num), 1288 HINIC_PORT_STAT(mac_rx_pause_num), 1289 HINIC_PORT_STAT(mac_rx_pfc_pkt_num), 1290 HINIC_PORT_STAT(mac_rx_pfc_pri0_pkt_num), 1291 HINIC_PORT_STAT(mac_rx_pfc_pri1_pkt_num), 1292 HINIC_PORT_STAT(mac_rx_pfc_pri2_pkt_num), 1293 HINIC_PORT_STAT(mac_rx_pfc_pri3_pkt_num), 1294 HINIC_PORT_STAT(mac_rx_pfc_pri4_pkt_num), 1295 HINIC_PORT_STAT(mac_rx_pfc_pri5_pkt_num), 1296 HINIC_PORT_STAT(mac_rx_pfc_pri6_pkt_num), 1297 HINIC_PORT_STAT(mac_rx_pfc_pri7_pkt_num), 1298 HINIC_PORT_STAT(mac_rx_control_pkt_num), 1299 HINIC_PORT_STAT(mac_rx_sym_err_pkt_num), 1300 HINIC_PORT_STAT(mac_rx_fcs_err_pkt_num), 1301 HINIC_PORT_STAT(mac_rx_send_app_good_pkt_num), 1302 HINIC_PORT_STAT(mac_rx_send_app_bad_pkt_num), 1303 HINIC_PORT_STAT(mac_tx_fragment_pkt_num), 1304 HINIC_PORT_STAT(mac_tx_undersize_pkt_num), 1305 HINIC_PORT_STAT(mac_tx_undermin_pkt_num), 1306 HINIC_PORT_STAT(mac_tx_64_oct_pkt_num), 1307 HINIC_PORT_STAT(mac_tx_65_127_oct_pkt_num), 1308 HINIC_PORT_STAT(mac_tx_128_255_oct_pkt_num), 1309 HINIC_PORT_STAT(mac_tx_256_511_oct_pkt_num), 1310 HINIC_PORT_STAT(mac_tx_512_1023_oct_pkt_num), 1311 HINIC_PORT_STAT(mac_tx_1024_1518_oct_pkt_num), 1312 HINIC_PORT_STAT(mac_tx_1519_2047_oct_pkt_num), 1313 HINIC_PORT_STAT(mac_tx_2048_4095_oct_pkt_num), 1314 HINIC_PORT_STAT(mac_tx_4096_8191_oct_pkt_num), 1315 HINIC_PORT_STAT(mac_tx_8192_9216_oct_pkt_num), 1316 HINIC_PORT_STAT(mac_tx_9217_12287_oct_pkt_num), 1317 HINIC_PORT_STAT(mac_tx_12288_16383_oct_pkt_num), 1318 HINIC_PORT_STAT(mac_tx_1519_max_good_pkt_num), 1319 HINIC_PORT_STAT(mac_tx_1519_max_bad_pkt_num), 1320 HINIC_PORT_STAT(mac_tx_oversize_pkt_num), 1321 HINIC_PORT_STAT(mac_tx_jabber_pkt_num), 1322 HINIC_PORT_STAT(mac_tx_pause_num), 1323 HINIC_PORT_STAT(mac_tx_pfc_pkt_num), 1324 HINIC_PORT_STAT(mac_tx_pfc_pri0_pkt_num), 1325 HINIC_PORT_STAT(mac_tx_pfc_pri1_pkt_num), 1326 HINIC_PORT_STAT(mac_tx_pfc_pri2_pkt_num), 1327 HINIC_PORT_STAT(mac_tx_pfc_pri3_pkt_num), 1328 HINIC_PORT_STAT(mac_tx_pfc_pri4_pkt_num), 1329 HINIC_PORT_STAT(mac_tx_pfc_pri5_pkt_num), 1330 HINIC_PORT_STAT(mac_tx_pfc_pri6_pkt_num), 1331 HINIC_PORT_STAT(mac_tx_pfc_pri7_pkt_num), 1332 HINIC_PORT_STAT(mac_tx_control_pkt_num), 1333 HINIC_PORT_STAT(mac_tx_err_all_pkt_num), 1334 HINIC_PORT_STAT(mac_tx_from_app_good_pkt_num), 1335 HINIC_PORT_STAT(mac_tx_from_app_bad_pkt_num), 1336 }; 1337 1338 #define HINIC_TXQ_STAT(_stat_item) { \ 1339 .name = "txq%d_"#_stat_item, \ 1340 .size = sizeof_field(struct hinic_txq_stats, _stat_item), \ 1341 .offset = offsetof(struct hinic_txq_stats, _stat_item) \ 1342 } 1343 1344 static struct hinic_stats hinic_tx_queue_stats[] = { 1345 HINIC_TXQ_STAT(pkts), 1346 HINIC_TXQ_STAT(bytes), 1347 HINIC_TXQ_STAT(tx_busy), 1348 HINIC_TXQ_STAT(tx_wake), 1349 HINIC_TXQ_STAT(tx_dropped), 1350 HINIC_TXQ_STAT(big_frags_pkts), 1351 }; 1352 1353 #define HINIC_RXQ_STAT(_stat_item) { \ 1354 .name = "rxq%d_"#_stat_item, \ 1355 .size = sizeof_field(struct hinic_rxq_stats, _stat_item), \ 1356 .offset = offsetof(struct hinic_rxq_stats, _stat_item) \ 1357 } 1358 1359 static struct hinic_stats hinic_rx_queue_stats[] = { 1360 HINIC_RXQ_STAT(pkts), 1361 HINIC_RXQ_STAT(bytes), 1362 HINIC_RXQ_STAT(errors), 1363 HINIC_RXQ_STAT(csum_errors), 1364 HINIC_RXQ_STAT(other_errors), 1365 }; 1366 1367 static void get_drv_queue_stats(struct hinic_dev *nic_dev, u64 *data) 1368 { 1369 struct hinic_txq_stats txq_stats; 1370 struct hinic_rxq_stats rxq_stats; 1371 u16 i = 0, j = 0, qid = 0; 1372 char *p; 1373 1374 for (qid = 0; qid < nic_dev->num_qps; qid++) { 1375 if (!nic_dev->txqs) 1376 break; 1377 1378 hinic_txq_get_stats(&nic_dev->txqs[qid], &txq_stats); 1379 for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++, i++) { 1380 p = (char *)&txq_stats + 1381 hinic_tx_queue_stats[j].offset; 1382 data[i] = (hinic_tx_queue_stats[j].size == 1383 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1384 } 1385 } 1386 1387 for (qid = 0; qid < nic_dev->num_qps; qid++) { 1388 if (!nic_dev->rxqs) 1389 break; 1390 1391 hinic_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats); 1392 for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++, i++) { 1393 p = (char *)&rxq_stats + 1394 hinic_rx_queue_stats[j].offset; 1395 data[i] = (hinic_rx_queue_stats[j].size == 1396 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1397 } 1398 } 1399 } 1400 1401 static void hinic_get_ethtool_stats(struct net_device *netdev, 1402 struct ethtool_stats *stats, u64 *data) 1403 { 1404 struct hinic_dev *nic_dev = netdev_priv(netdev); 1405 struct hinic_vport_stats vport_stats = {0}; 1406 struct hinic_phy_port_stats *port_stats; 1407 u16 i = 0, j = 0; 1408 char *p; 1409 int err; 1410 1411 err = hinic_get_vport_stats(nic_dev, &vport_stats); 1412 if (err) 1413 netif_err(nic_dev, drv, netdev, 1414 "Failed to get vport stats from firmware\n"); 1415 1416 for (j = 0; j < ARRAY_SIZE(hinic_function_stats); j++, i++) { 1417 p = (char *)&vport_stats + hinic_function_stats[j].offset; 1418 data[i] = (hinic_function_stats[j].size == 1419 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1420 } 1421 1422 port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL); 1423 if (!port_stats) { 1424 memset(&data[i], 0, 1425 ARRAY_SIZE(hinic_port_stats) * sizeof(*data)); 1426 i += ARRAY_SIZE(hinic_port_stats); 1427 goto get_drv_stats; 1428 } 1429 1430 err = hinic_get_phy_port_stats(nic_dev, port_stats); 1431 if (err) 1432 netif_err(nic_dev, drv, netdev, 1433 "Failed to get port stats from firmware\n"); 1434 1435 for (j = 0; j < ARRAY_SIZE(hinic_port_stats); j++, i++) { 1436 p = (char *)port_stats + hinic_port_stats[j].offset; 1437 data[i] = (hinic_port_stats[j].size == 1438 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1439 } 1440 1441 kfree(port_stats); 1442 1443 get_drv_stats: 1444 get_drv_queue_stats(nic_dev, data + i); 1445 } 1446 1447 static int hinic_get_sset_count(struct net_device *netdev, int sset) 1448 { 1449 struct hinic_dev *nic_dev = netdev_priv(netdev); 1450 int count, q_num; 1451 1452 switch (sset) { 1453 case ETH_SS_TEST: 1454 return ARRAY_SIZE(hinic_test_strings); 1455 case ETH_SS_STATS: 1456 q_num = nic_dev->num_qps; 1457 count = ARRAY_SIZE(hinic_function_stats) + 1458 (ARRAY_SIZE(hinic_tx_queue_stats) + 1459 ARRAY_SIZE(hinic_rx_queue_stats)) * q_num; 1460 1461 count += ARRAY_SIZE(hinic_port_stats); 1462 1463 return count; 1464 default: 1465 return -EOPNOTSUPP; 1466 } 1467 } 1468 1469 static void hinic_get_strings(struct net_device *netdev, 1470 u32 stringset, u8 *data) 1471 { 1472 struct hinic_dev *nic_dev = netdev_priv(netdev); 1473 char *p = (char *)data; 1474 u16 i, j; 1475 1476 switch (stringset) { 1477 case ETH_SS_TEST: 1478 memcpy(data, *hinic_test_strings, sizeof(hinic_test_strings)); 1479 return; 1480 case ETH_SS_STATS: 1481 for (i = 0; i < ARRAY_SIZE(hinic_function_stats); i++) { 1482 memcpy(p, hinic_function_stats[i].name, 1483 ETH_GSTRING_LEN); 1484 p += ETH_GSTRING_LEN; 1485 } 1486 1487 for (i = 0; i < ARRAY_SIZE(hinic_port_stats); i++) { 1488 memcpy(p, hinic_port_stats[i].name, 1489 ETH_GSTRING_LEN); 1490 p += ETH_GSTRING_LEN; 1491 } 1492 1493 for (i = 0; i < nic_dev->num_qps; i++) { 1494 for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++) { 1495 sprintf(p, hinic_tx_queue_stats[j].name, i); 1496 p += ETH_GSTRING_LEN; 1497 } 1498 } 1499 1500 for (i = 0; i < nic_dev->num_qps; i++) { 1501 for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++) { 1502 sprintf(p, hinic_rx_queue_stats[j].name, i); 1503 p += ETH_GSTRING_LEN; 1504 } 1505 } 1506 1507 return; 1508 default: 1509 return; 1510 } 1511 } 1512 1513 static int hinic_run_lp_test(struct hinic_dev *nic_dev, u32 test_time) 1514 { 1515 u8 *lb_test_rx_buf = nic_dev->lb_test_rx_buf; 1516 struct net_device *netdev = nic_dev->netdev; 1517 struct sk_buff *skb_tmp = NULL; 1518 struct sk_buff *skb = NULL; 1519 u32 cnt = test_time * 5; 1520 u8 *test_data = NULL; 1521 u32 i; 1522 u8 j; 1523 1524 skb_tmp = alloc_skb(LP_PKT_LEN, GFP_ATOMIC); 1525 if (!skb_tmp) 1526 return -ENOMEM; 1527 1528 test_data = __skb_put(skb_tmp, LP_PKT_LEN); 1529 1530 memset(test_data, 0xFF, 2 * ETH_ALEN); 1531 test_data[ETH_ALEN] = 0xFE; 1532 test_data[2 * ETH_ALEN] = 0x08; 1533 test_data[2 * ETH_ALEN + 1] = 0x0; 1534 1535 for (i = ETH_HLEN; i < LP_PKT_LEN; i++) 1536 test_data[i] = i & 0xFF; 1537 1538 skb_tmp->queue_mapping = 0; 1539 skb_tmp->ip_summed = CHECKSUM_COMPLETE; 1540 skb_tmp->dev = netdev; 1541 1542 for (i = 0; i < cnt; i++) { 1543 nic_dev->lb_test_rx_idx = 0; 1544 memset(lb_test_rx_buf, 0, LP_PKT_CNT * LP_PKT_LEN); 1545 1546 for (j = 0; j < LP_PKT_CNT; j++) { 1547 skb = pskb_copy(skb_tmp, GFP_ATOMIC); 1548 if (!skb) { 1549 dev_kfree_skb_any(skb_tmp); 1550 netif_err(nic_dev, drv, netdev, 1551 "Copy skb failed for loopback test\n"); 1552 return -ENOMEM; 1553 } 1554 1555 /* mark index for every pkt */ 1556 skb->data[LP_PKT_LEN - 1] = j; 1557 1558 if (hinic_lb_xmit_frame(skb, netdev)) { 1559 dev_kfree_skb_any(skb); 1560 dev_kfree_skb_any(skb_tmp); 1561 netif_err(nic_dev, drv, netdev, 1562 "Xmit pkt failed for loopback test\n"); 1563 return -EBUSY; 1564 } 1565 } 1566 1567 /* wait till all pkts received to RX buffer */ 1568 msleep(200); 1569 1570 for (j = 0; j < LP_PKT_CNT; j++) { 1571 if (memcmp(lb_test_rx_buf + j * LP_PKT_LEN, 1572 skb_tmp->data, LP_PKT_LEN - 1) || 1573 (*(lb_test_rx_buf + j * LP_PKT_LEN + 1574 LP_PKT_LEN - 1) != j)) { 1575 dev_kfree_skb_any(skb_tmp); 1576 netif_err(nic_dev, drv, netdev, 1577 "Compare pkt failed in loopback test(index=0x%02x, data[%d]=0x%02x)\n", 1578 j + i * LP_PKT_CNT, 1579 LP_PKT_LEN - 1, 1580 *(lb_test_rx_buf + j * LP_PKT_LEN + 1581 LP_PKT_LEN - 1)); 1582 return -EIO; 1583 } 1584 } 1585 } 1586 1587 dev_kfree_skb_any(skb_tmp); 1588 return 0; 1589 } 1590 1591 static int do_lp_test(struct hinic_dev *nic_dev, u32 flags, u32 test_time, 1592 enum diag_test_index *test_index) 1593 { 1594 struct net_device *netdev = nic_dev->netdev; 1595 u8 *lb_test_rx_buf = NULL; 1596 int err = 0; 1597 1598 if (!(flags & ETH_TEST_FL_EXTERNAL_LB)) { 1599 *test_index = INTERNAL_LP_TEST; 1600 if (hinic_set_loopback_mode(nic_dev->hwdev, 1601 HINIC_INTERNAL_LP_MODE, true)) { 1602 netif_err(nic_dev, drv, netdev, 1603 "Failed to set port loopback mode before loopback test\n"); 1604 return -EIO; 1605 } 1606 } else { 1607 *test_index = EXTERNAL_LP_TEST; 1608 } 1609 1610 lb_test_rx_buf = vmalloc(LP_PKT_CNT * LP_PKT_LEN); 1611 if (!lb_test_rx_buf) { 1612 err = -ENOMEM; 1613 } else { 1614 nic_dev->lb_test_rx_buf = lb_test_rx_buf; 1615 nic_dev->lb_pkt_len = LP_PKT_LEN; 1616 nic_dev->flags |= HINIC_LP_TEST; 1617 err = hinic_run_lp_test(nic_dev, test_time); 1618 nic_dev->flags &= ~HINIC_LP_TEST; 1619 msleep(100); 1620 vfree(lb_test_rx_buf); 1621 nic_dev->lb_test_rx_buf = NULL; 1622 } 1623 1624 if (!(flags & ETH_TEST_FL_EXTERNAL_LB)) { 1625 if (hinic_set_loopback_mode(nic_dev->hwdev, 1626 HINIC_INTERNAL_LP_MODE, false)) { 1627 netif_err(nic_dev, drv, netdev, 1628 "Failed to cancel port loopback mode after loopback test\n"); 1629 err = -EIO; 1630 } 1631 } 1632 1633 return err; 1634 } 1635 1636 static void hinic_diag_test(struct net_device *netdev, 1637 struct ethtool_test *eth_test, u64 *data) 1638 { 1639 struct hinic_dev *nic_dev = netdev_priv(netdev); 1640 enum hinic_port_link_state link_state; 1641 enum diag_test_index test_index = 0; 1642 int err = 0; 1643 1644 memset(data, 0, DIAG_TEST_MAX * sizeof(u64)); 1645 1646 /* don't support loopback test when netdev is closed. */ 1647 if (!(nic_dev->flags & HINIC_INTF_UP)) { 1648 netif_err(nic_dev, drv, netdev, 1649 "Do not support loopback test when netdev is closed\n"); 1650 eth_test->flags |= ETH_TEST_FL_FAILED; 1651 data[PORT_DOWN_ERR_IDX] = 1; 1652 return; 1653 } 1654 1655 netif_carrier_off(netdev); 1656 netif_tx_disable(netdev); 1657 1658 err = do_lp_test(nic_dev, eth_test->flags, LP_DEFAULT_TIME, 1659 &test_index); 1660 if (err) { 1661 eth_test->flags |= ETH_TEST_FL_FAILED; 1662 data[test_index] = 1; 1663 } 1664 1665 netif_tx_wake_all_queues(netdev); 1666 1667 err = hinic_port_link_state(nic_dev, &link_state); 1668 if (!err && link_state == HINIC_LINK_STATE_UP) 1669 netif_carrier_on(netdev); 1670 } 1671 1672 static int hinic_set_phys_id(struct net_device *netdev, 1673 enum ethtool_phys_id_state state) 1674 { 1675 struct hinic_dev *nic_dev = netdev_priv(netdev); 1676 int err = 0; 1677 u8 port; 1678 1679 port = nic_dev->hwdev->port_id; 1680 1681 switch (state) { 1682 case ETHTOOL_ID_ACTIVE: 1683 err = hinic_set_led_status(nic_dev->hwdev, port, 1684 HINIC_LED_TYPE_LINK, 1685 HINIC_LED_MODE_FORCE_2HZ); 1686 if (err) 1687 netif_err(nic_dev, drv, netdev, 1688 "Set LED blinking in 2HZ failed\n"); 1689 break; 1690 1691 case ETHTOOL_ID_INACTIVE: 1692 err = hinic_reset_led_status(nic_dev->hwdev, port); 1693 if (err) 1694 netif_err(nic_dev, drv, netdev, 1695 "Reset LED to original status failed\n"); 1696 break; 1697 1698 default: 1699 return -EOPNOTSUPP; 1700 } 1701 1702 return err; 1703 } 1704 1705 static int hinic_get_module_info(struct net_device *netdev, 1706 struct ethtool_modinfo *modinfo) 1707 { 1708 struct hinic_dev *nic_dev = netdev_priv(netdev); 1709 u8 sfp_type_ext; 1710 u8 sfp_type; 1711 int err; 1712 1713 err = hinic_get_sfp_type(nic_dev->hwdev, &sfp_type, &sfp_type_ext); 1714 if (err) 1715 return err; 1716 1717 switch (sfp_type) { 1718 case SFF8024_ID_SFP: 1719 modinfo->type = ETH_MODULE_SFF_8472; 1720 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 1721 break; 1722 case SFF8024_ID_QSFP_8438: 1723 modinfo->type = ETH_MODULE_SFF_8436; 1724 modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; 1725 break; 1726 case SFF8024_ID_QSFP_8436_8636: 1727 if (sfp_type_ext >= 0x3) { 1728 modinfo->type = ETH_MODULE_SFF_8636; 1729 modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; 1730 1731 } else { 1732 modinfo->type = ETH_MODULE_SFF_8436; 1733 modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; 1734 } 1735 break; 1736 case SFF8024_ID_QSFP28_8636: 1737 modinfo->type = ETH_MODULE_SFF_8636; 1738 modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; 1739 break; 1740 default: 1741 netif_warn(nic_dev, drv, netdev, 1742 "Optical module unknown: 0x%x\n", sfp_type); 1743 return -EINVAL; 1744 } 1745 1746 return 0; 1747 } 1748 1749 static int hinic_get_module_eeprom(struct net_device *netdev, 1750 struct ethtool_eeprom *ee, u8 *data) 1751 { 1752 struct hinic_dev *nic_dev = netdev_priv(netdev); 1753 u8 sfp_data[STD_SFP_INFO_MAX_SIZE]; 1754 u16 len; 1755 int err; 1756 1757 if (!ee->len || ((ee->len + ee->offset) > STD_SFP_INFO_MAX_SIZE)) 1758 return -EINVAL; 1759 1760 memset(data, 0, ee->len); 1761 1762 err = hinic_get_sfp_eeprom(nic_dev->hwdev, sfp_data, &len); 1763 if (err) 1764 return err; 1765 1766 memcpy(data, sfp_data + ee->offset, ee->len); 1767 1768 return 0; 1769 } 1770 1771 static int 1772 hinic_get_link_ext_state(struct net_device *netdev, 1773 struct ethtool_link_ext_state_info *link_ext_state_info) 1774 { 1775 struct hinic_dev *nic_dev = netdev_priv(netdev); 1776 1777 if (netif_carrier_ok(netdev)) 1778 return -ENODATA; 1779 1780 if (nic_dev->cable_unplugged) 1781 link_ext_state_info->link_ext_state = 1782 ETHTOOL_LINK_EXT_STATE_NO_CABLE; 1783 else if (nic_dev->module_unrecognized) 1784 link_ext_state_info->link_ext_state = 1785 ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH; 1786 1787 return 0; 1788 } 1789 1790 static const struct ethtool_ops hinic_ethtool_ops = { 1791 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | 1792 ETHTOOL_COALESCE_RX_MAX_FRAMES | 1793 ETHTOOL_COALESCE_TX_USECS | 1794 ETHTOOL_COALESCE_TX_MAX_FRAMES, 1795 1796 .get_link_ksettings = hinic_get_link_ksettings, 1797 .set_link_ksettings = hinic_set_link_ksettings, 1798 .get_drvinfo = hinic_get_drvinfo, 1799 .get_link = ethtool_op_get_link, 1800 .get_link_ext_state = hinic_get_link_ext_state, 1801 .get_ringparam = hinic_get_ringparam, 1802 .set_ringparam = hinic_set_ringparam, 1803 .get_coalesce = hinic_get_coalesce, 1804 .set_coalesce = hinic_set_coalesce, 1805 .get_per_queue_coalesce = hinic_get_per_queue_coalesce, 1806 .set_per_queue_coalesce = hinic_set_per_queue_coalesce, 1807 .get_pauseparam = hinic_get_pauseparam, 1808 .set_pauseparam = hinic_set_pauseparam, 1809 .get_channels = hinic_get_channels, 1810 .set_channels = hinic_set_channels, 1811 .get_rxnfc = hinic_get_rxnfc, 1812 .set_rxnfc = hinic_set_rxnfc, 1813 .get_rxfh_key_size = hinic_get_rxfh_key_size, 1814 .get_rxfh_indir_size = hinic_get_rxfh_indir_size, 1815 .get_rxfh = hinic_get_rxfh, 1816 .set_rxfh = hinic_set_rxfh, 1817 .get_sset_count = hinic_get_sset_count, 1818 .get_ethtool_stats = hinic_get_ethtool_stats, 1819 .get_strings = hinic_get_strings, 1820 .self_test = hinic_diag_test, 1821 .set_phys_id = hinic_set_phys_id, 1822 .get_module_info = hinic_get_module_info, 1823 .get_module_eeprom = hinic_get_module_eeprom, 1824 }; 1825 1826 static const struct ethtool_ops hinicvf_ethtool_ops = { 1827 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | 1828 ETHTOOL_COALESCE_RX_MAX_FRAMES | 1829 ETHTOOL_COALESCE_TX_USECS | 1830 ETHTOOL_COALESCE_TX_MAX_FRAMES, 1831 1832 .get_link_ksettings = hinic_get_link_ksettings, 1833 .get_drvinfo = hinic_get_drvinfo, 1834 .get_link = ethtool_op_get_link, 1835 .get_ringparam = hinic_get_ringparam, 1836 .set_ringparam = hinic_set_ringparam, 1837 .get_coalesce = hinic_get_coalesce, 1838 .set_coalesce = hinic_set_coalesce, 1839 .get_per_queue_coalesce = hinic_get_per_queue_coalesce, 1840 .set_per_queue_coalesce = hinic_set_per_queue_coalesce, 1841 .get_channels = hinic_get_channels, 1842 .set_channels = hinic_set_channels, 1843 .get_rxnfc = hinic_get_rxnfc, 1844 .set_rxnfc = hinic_set_rxnfc, 1845 .get_rxfh_key_size = hinic_get_rxfh_key_size, 1846 .get_rxfh_indir_size = hinic_get_rxfh_indir_size, 1847 .get_rxfh = hinic_get_rxfh, 1848 .set_rxfh = hinic_set_rxfh, 1849 .get_sset_count = hinic_get_sset_count, 1850 .get_ethtool_stats = hinic_get_ethtool_stats, 1851 .get_strings = hinic_get_strings, 1852 }; 1853 1854 void hinic_set_ethtool_ops(struct net_device *netdev) 1855 { 1856 struct hinic_dev *nic_dev = netdev_priv(netdev); 1857 1858 if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) 1859 netdev->ethtool_ops = &hinic_ethtool_ops; 1860 else 1861 netdev->ethtool_ops = &hinicvf_ethtool_ops; 1862 } 1863