1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/string.h> 6 #include <linux/phy.h> 7 8 #include "hns3_enet.h" 9 10 struct hns3_stats { 11 char stats_string[ETH_GSTRING_LEN]; 12 int stats_offset; 13 }; 14 15 /* tqp related stats */ 16 #define HNS3_TQP_STAT(_string, _member) { \ 17 .stats_string = _string, \ 18 .stats_offset = offsetof(struct hns3_enet_ring, stats) +\ 19 offsetof(struct ring_stats, _member), \ 20 } 21 22 static const struct hns3_stats hns3_txq_stats[] = { 23 /* Tx per-queue statistics */ 24 HNS3_TQP_STAT("io_err_cnt", io_err_cnt), 25 HNS3_TQP_STAT("dropped", sw_err_cnt), 26 HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt), 27 HNS3_TQP_STAT("packets", tx_pkts), 28 HNS3_TQP_STAT("bytes", tx_bytes), 29 HNS3_TQP_STAT("errors", tx_err_cnt), 30 HNS3_TQP_STAT("wake", restart_queue), 31 HNS3_TQP_STAT("busy", tx_busy), 32 HNS3_TQP_STAT("copy", tx_copy), 33 }; 34 35 #define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats) 36 37 static const struct hns3_stats hns3_rxq_stats[] = { 38 /* Rx per-queue statistics */ 39 HNS3_TQP_STAT("io_err_cnt", io_err_cnt), 40 HNS3_TQP_STAT("dropped", sw_err_cnt), 41 HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt), 42 HNS3_TQP_STAT("packets", rx_pkts), 43 HNS3_TQP_STAT("bytes", rx_bytes), 44 HNS3_TQP_STAT("errors", rx_err_cnt), 45 HNS3_TQP_STAT("reuse_pg_cnt", reuse_pg_cnt), 46 HNS3_TQP_STAT("err_pkt_len", err_pkt_len), 47 HNS3_TQP_STAT("non_vld_descs", non_vld_descs), 48 HNS3_TQP_STAT("err_bd_num", err_bd_num), 49 HNS3_TQP_STAT("l2_err", l2_err), 50 HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err), 51 HNS3_TQP_STAT("multicast", rx_multicast), 52 HNS3_TQP_STAT("non_reuse_pg", non_reuse_pg), 53 }; 54 55 #define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats) 56 57 #define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT) 58 59 #define HNS3_SELF_TEST_TYPE_NUM 3 60 #define HNS3_NIC_LB_TEST_PKT_NUM 1 61 #define HNS3_NIC_LB_TEST_RING_ID 0 62 #define HNS3_NIC_LB_TEST_PACKET_SIZE 128 63 64 /* Nic loopback test err */ 65 #define HNS3_NIC_LB_TEST_NO_MEM_ERR 1 66 #define HNS3_NIC_LB_TEST_TX_CNT_ERR 2 67 #define HNS3_NIC_LB_TEST_RX_CNT_ERR 3 68 69 struct hns3_link_mode_mapping { 70 u32 hns3_link_mode; 71 u32 ethtool_link_mode; 72 }; 73 74 static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) 75 { 76 struct hnae3_handle *h = hns3_get_handle(ndev); 77 bool vlan_filter_enable; 78 int ret; 79 80 if (!h->ae_algo->ops->set_loopback || 81 !h->ae_algo->ops->set_promisc_mode) 82 return -EOPNOTSUPP; 83 84 switch (loop) { 85 case HNAE3_LOOP_SERIAL_SERDES: 86 case HNAE3_LOOP_PARALLEL_SERDES: 87 case HNAE3_LOOP_APP: 88 ret = h->ae_algo->ops->set_loopback(h, loop, en); 89 break; 90 default: 91 ret = -ENOTSUPP; 92 break; 93 } 94 95 if (ret) 96 return ret; 97 98 if (en) { 99 h->ae_algo->ops->set_promisc_mode(h, true, true); 100 } else { 101 /* recover promisc mode before loopback test */ 102 hns3_update_promisc_mode(ndev, h->netdev_flags); 103 vlan_filter_enable = ndev->flags & IFF_PROMISC ? false : true; 104 hns3_enable_vlan_filter(ndev, vlan_filter_enable); 105 } 106 107 return ret; 108 } 109 110 static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) 111 { 112 struct hnae3_handle *h = hns3_get_handle(ndev); 113 int ret; 114 115 ret = hns3_nic_reset_all_ring(h); 116 if (ret) 117 return ret; 118 119 ret = hns3_lp_setup(ndev, loop_mode, true); 120 usleep_range(10000, 20000); 121 122 return ret; 123 } 124 125 static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode) 126 { 127 int ret; 128 129 ret = hns3_lp_setup(ndev, loop_mode, false); 130 if (ret) { 131 netdev_err(ndev, "lb_setup return error: %d\n", ret); 132 return ret; 133 } 134 135 usleep_range(10000, 20000); 136 137 return 0; 138 } 139 140 static void hns3_lp_setup_skb(struct sk_buff *skb) 141 { 142 struct net_device *ndev = skb->dev; 143 unsigned char *packet; 144 struct ethhdr *ethh; 145 unsigned int i; 146 147 skb_reserve(skb, NET_IP_ALIGN); 148 ethh = skb_put(skb, sizeof(struct ethhdr)); 149 packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE); 150 151 memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN); 152 ethh->h_dest[5] += 0x1f; 153 eth_zero_addr(ethh->h_source); 154 ethh->h_proto = htons(ETH_P_ARP); 155 skb_reset_mac_header(skb); 156 157 for (i = 0; i < HNS3_NIC_LB_TEST_PACKET_SIZE; i++) 158 packet[i] = (unsigned char)(i & 0xff); 159 } 160 161 static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring, 162 struct sk_buff *skb) 163 { 164 struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector; 165 unsigned char *packet = skb->data; 166 u32 i; 167 168 for (i = 0; i < skb->len; i++) 169 if (packet[i] != (unsigned char)(i & 0xff)) 170 break; 171 172 /* The packet is correctly received */ 173 if (i == skb->len) 174 tqp_vector->rx_group.total_packets++; 175 else 176 print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1, 177 skb->data, skb->len, true); 178 179 dev_kfree_skb_any(skb); 180 } 181 182 static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget) 183 { 184 struct hnae3_handle *h = priv->ae_handle; 185 struct hnae3_knic_private_info *kinfo; 186 u32 i, rcv_good_pkt_total = 0; 187 188 kinfo = &h->kinfo; 189 for (i = kinfo->num_tqps; i < kinfo->num_tqps * 2; i++) { 190 struct hns3_enet_ring *ring = priv->ring_data[i].ring; 191 struct hns3_enet_ring_group *rx_group; 192 u64 pre_rx_pkt; 193 194 rx_group = &ring->tqp_vector->rx_group; 195 pre_rx_pkt = rx_group->total_packets; 196 197 preempt_disable(); 198 hns3_clean_rx_ring(ring, budget, hns3_lb_check_skb_data); 199 preempt_enable(); 200 201 rcv_good_pkt_total += (rx_group->total_packets - pre_rx_pkt); 202 rx_group->total_packets = pre_rx_pkt; 203 } 204 return rcv_good_pkt_total; 205 } 206 207 static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid, 208 u32 end_ringid, u32 budget) 209 { 210 u32 i; 211 212 for (i = start_ringid; i <= end_ringid; i++) { 213 struct hns3_enet_ring *ring = priv->ring_data[i].ring; 214 215 hns3_clean_tx_ring(ring); 216 } 217 } 218 219 /** 220 * hns3_lp_run_test - run loopback test 221 * @ndev: net device 222 * @mode: loopback type 223 */ 224 static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode) 225 { 226 struct hns3_nic_priv *priv = netdev_priv(ndev); 227 struct sk_buff *skb; 228 u32 i, good_cnt; 229 int ret_val = 0; 230 231 skb = alloc_skb(HNS3_NIC_LB_TEST_PACKET_SIZE + ETH_HLEN + NET_IP_ALIGN, 232 GFP_KERNEL); 233 if (!skb) 234 return HNS3_NIC_LB_TEST_NO_MEM_ERR; 235 236 skb->dev = ndev; 237 hns3_lp_setup_skb(skb); 238 skb->queue_mapping = HNS3_NIC_LB_TEST_RING_ID; 239 240 good_cnt = 0; 241 for (i = 0; i < HNS3_NIC_LB_TEST_PKT_NUM; i++) { 242 netdev_tx_t tx_ret; 243 244 skb_get(skb); 245 tx_ret = hns3_nic_net_xmit(skb, ndev); 246 if (tx_ret == NETDEV_TX_OK) 247 good_cnt++; 248 else 249 netdev_err(ndev, "hns3_lb_run_test xmit failed: %d\n", 250 tx_ret); 251 } 252 if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) { 253 ret_val = HNS3_NIC_LB_TEST_TX_CNT_ERR; 254 netdev_err(ndev, "mode %d sent fail, cnt=0x%x, budget=0x%x\n", 255 mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM); 256 goto out; 257 } 258 259 /* Allow 200 milliseconds for packets to go from Tx to Rx */ 260 msleep(200); 261 262 good_cnt = hns3_lb_check_rx_ring(priv, HNS3_NIC_LB_TEST_PKT_NUM); 263 if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) { 264 ret_val = HNS3_NIC_LB_TEST_RX_CNT_ERR; 265 netdev_err(ndev, "mode %d recv fail, cnt=0x%x, budget=0x%x\n", 266 mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM); 267 } 268 269 out: 270 hns3_lb_clear_tx_ring(priv, HNS3_NIC_LB_TEST_RING_ID, 271 HNS3_NIC_LB_TEST_RING_ID, 272 HNS3_NIC_LB_TEST_PKT_NUM); 273 274 kfree_skb(skb); 275 return ret_val; 276 } 277 278 /** 279 * hns3_nic_self_test - self test 280 * @ndev: net device 281 * @eth_test: test cmd 282 * @data: test result 283 */ 284 static void hns3_self_test(struct net_device *ndev, 285 struct ethtool_test *eth_test, u64 *data) 286 { 287 struct hns3_nic_priv *priv = netdev_priv(ndev); 288 struct hnae3_handle *h = priv->ae_handle; 289 int st_param[HNS3_SELF_TEST_TYPE_NUM][2]; 290 bool if_running = netif_running(ndev); 291 #if IS_ENABLED(CONFIG_VLAN_8021Q) 292 bool dis_vlan_filter; 293 #endif 294 int test_index = 0; 295 u32 i; 296 297 if (hns3_nic_resetting(ndev)) { 298 netdev_err(ndev, "dev resetting!"); 299 return; 300 } 301 302 /* Only do offline selftest, or pass by default */ 303 if (eth_test->flags != ETH_TEST_FL_OFFLINE) 304 return; 305 306 st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP; 307 st_param[HNAE3_LOOP_APP][1] = 308 h->flags & HNAE3_SUPPORT_APP_LOOPBACK; 309 310 st_param[HNAE3_LOOP_SERIAL_SERDES][0] = HNAE3_LOOP_SERIAL_SERDES; 311 st_param[HNAE3_LOOP_SERIAL_SERDES][1] = 312 h->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; 313 314 st_param[HNAE3_LOOP_PARALLEL_SERDES][0] = 315 HNAE3_LOOP_PARALLEL_SERDES; 316 st_param[HNAE3_LOOP_PARALLEL_SERDES][1] = 317 h->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; 318 319 if (if_running) 320 ndev->netdev_ops->ndo_stop(ndev); 321 322 #if IS_ENABLED(CONFIG_VLAN_8021Q) 323 /* Disable the vlan filter for selftest does not support it */ 324 dis_vlan_filter = (ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) && 325 h->ae_algo->ops->enable_vlan_filter; 326 if (dis_vlan_filter) 327 h->ae_algo->ops->enable_vlan_filter(h, false); 328 #endif 329 330 set_bit(HNS3_NIC_STATE_TESTING, &priv->state); 331 332 for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) { 333 enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0]; 334 335 if (!st_param[i][1]) 336 continue; 337 338 data[test_index] = hns3_lp_up(ndev, loop_type); 339 if (!data[test_index]) 340 data[test_index] = hns3_lp_run_test(ndev, loop_type); 341 342 hns3_lp_down(ndev, loop_type); 343 344 if (data[test_index]) 345 eth_test->flags |= ETH_TEST_FL_FAILED; 346 347 test_index++; 348 } 349 350 clear_bit(HNS3_NIC_STATE_TESTING, &priv->state); 351 352 #if IS_ENABLED(CONFIG_VLAN_8021Q) 353 if (dis_vlan_filter) 354 h->ae_algo->ops->enable_vlan_filter(h, true); 355 #endif 356 357 if (if_running) 358 ndev->netdev_ops->ndo_open(ndev); 359 } 360 361 static int hns3_get_sset_count(struct net_device *netdev, int stringset) 362 { 363 struct hnae3_handle *h = hns3_get_handle(netdev); 364 const struct hnae3_ae_ops *ops = h->ae_algo->ops; 365 366 if (!ops->get_sset_count) 367 return -EOPNOTSUPP; 368 369 switch (stringset) { 370 case ETH_SS_STATS: 371 return ((HNS3_TQP_STATS_COUNT * h->kinfo.num_tqps) + 372 ops->get_sset_count(h, stringset)); 373 374 case ETH_SS_TEST: 375 return ops->get_sset_count(h, stringset); 376 377 default: 378 return -EOPNOTSUPP; 379 } 380 } 381 382 static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, 383 u32 stat_count, u32 num_tqps, const char *prefix) 384 { 385 #define MAX_PREFIX_SIZE (6 + 4) 386 u32 size_left; 387 u32 i, j; 388 u32 n1; 389 390 for (i = 0; i < num_tqps; i++) { 391 for (j = 0; j < stat_count; j++) { 392 data[ETH_GSTRING_LEN - 1] = '\0'; 393 394 /* first, prepend the prefix string */ 395 n1 = snprintf(data, MAX_PREFIX_SIZE, "%s%d_", 396 prefix, i); 397 n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1); 398 size_left = (ETH_GSTRING_LEN - 1) - n1; 399 400 /* now, concatenate the stats string to it */ 401 strncat(data, stats[j].stats_string, size_left); 402 data += ETH_GSTRING_LEN; 403 } 404 } 405 406 return data; 407 } 408 409 static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data) 410 { 411 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 412 const char tx_prefix[] = "txq"; 413 const char rx_prefix[] = "rxq"; 414 415 /* get strings for Tx */ 416 data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT, 417 kinfo->num_tqps, tx_prefix); 418 419 /* get strings for Rx */ 420 data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT, 421 kinfo->num_tqps, rx_prefix); 422 423 return data; 424 } 425 426 static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 427 { 428 struct hnae3_handle *h = hns3_get_handle(netdev); 429 const struct hnae3_ae_ops *ops = h->ae_algo->ops; 430 char *buff = (char *)data; 431 432 if (!ops->get_strings) 433 return; 434 435 switch (stringset) { 436 case ETH_SS_STATS: 437 buff = hns3_get_strings_tqps(h, buff); 438 h->ae_algo->ops->get_strings(h, stringset, (u8 *)buff); 439 break; 440 case ETH_SS_TEST: 441 ops->get_strings(h, stringset, data); 442 break; 443 default: 444 break; 445 } 446 } 447 448 static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) 449 { 450 struct hns3_nic_priv *nic_priv = (struct hns3_nic_priv *)handle->priv; 451 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 452 struct hns3_enet_ring *ring; 453 u8 *stat; 454 int i, j; 455 456 /* get stats for Tx */ 457 for (i = 0; i < kinfo->num_tqps; i++) { 458 ring = nic_priv->ring_data[i].ring; 459 for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) { 460 stat = (u8 *)ring + hns3_txq_stats[j].stats_offset; 461 *data++ = *(u64 *)stat; 462 } 463 } 464 465 /* get stats for Rx */ 466 for (i = 0; i < kinfo->num_tqps; i++) { 467 ring = nic_priv->ring_data[i + kinfo->num_tqps].ring; 468 for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) { 469 stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset; 470 *data++ = *(u64 *)stat; 471 } 472 } 473 474 return data; 475 } 476 477 /* hns3_get_stats - get detail statistics. 478 * @netdev: net device 479 * @stats: statistics info. 480 * @data: statistics data. 481 */ 482 static void hns3_get_stats(struct net_device *netdev, 483 struct ethtool_stats *stats, u64 *data) 484 { 485 struct hnae3_handle *h = hns3_get_handle(netdev); 486 u64 *p = data; 487 488 if (hns3_nic_resetting(netdev)) { 489 netdev_err(netdev, "dev resetting, could not get stats\n"); 490 return; 491 } 492 493 if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) { 494 netdev_err(netdev, "could not get any statistics\n"); 495 return; 496 } 497 498 h->ae_algo->ops->update_stats(h, &netdev->stats); 499 500 /* get per-queue stats */ 501 p = hns3_get_stats_tqps(h, p); 502 503 /* get MAC & other misc hardware stats */ 504 h->ae_algo->ops->get_stats(h, p); 505 } 506 507 static void hns3_get_drvinfo(struct net_device *netdev, 508 struct ethtool_drvinfo *drvinfo) 509 { 510 struct hns3_nic_priv *priv = netdev_priv(netdev); 511 struct hnae3_handle *h = priv->ae_handle; 512 513 strncpy(drvinfo->version, hns3_driver_version, 514 sizeof(drvinfo->version)); 515 drvinfo->version[sizeof(drvinfo->version) - 1] = '\0'; 516 517 strncpy(drvinfo->driver, h->pdev->driver->name, 518 sizeof(drvinfo->driver)); 519 drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0'; 520 521 strncpy(drvinfo->bus_info, pci_name(h->pdev), 522 sizeof(drvinfo->bus_info)); 523 drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0'; 524 525 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", 526 priv->ae_handle->ae_algo->ops->get_fw_version(h)); 527 } 528 529 static u32 hns3_get_link(struct net_device *netdev) 530 { 531 struct hnae3_handle *h = hns3_get_handle(netdev); 532 533 if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_status) 534 return h->ae_algo->ops->get_status(h); 535 else 536 return 0; 537 } 538 539 static void hns3_get_ringparam(struct net_device *netdev, 540 struct ethtool_ringparam *param) 541 { 542 struct hns3_nic_priv *priv = netdev_priv(netdev); 543 struct hnae3_handle *h = priv->ae_handle; 544 int queue_num = h->kinfo.num_tqps; 545 546 if (hns3_nic_resetting(netdev)) { 547 netdev_err(netdev, "dev resetting!"); 548 return; 549 } 550 551 param->tx_max_pending = HNS3_RING_MAX_PENDING; 552 param->rx_max_pending = HNS3_RING_MAX_PENDING; 553 554 param->tx_pending = priv->ring_data[0].ring->desc_num; 555 param->rx_pending = priv->ring_data[queue_num].ring->desc_num; 556 } 557 558 static void hns3_get_pauseparam(struct net_device *netdev, 559 struct ethtool_pauseparam *param) 560 { 561 struct hnae3_handle *h = hns3_get_handle(netdev); 562 563 if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_pauseparam) 564 h->ae_algo->ops->get_pauseparam(h, ¶m->autoneg, 565 ¶m->rx_pause, ¶m->tx_pause); 566 } 567 568 static int hns3_set_pauseparam(struct net_device *netdev, 569 struct ethtool_pauseparam *param) 570 { 571 struct hnae3_handle *h = hns3_get_handle(netdev); 572 573 if (h->ae_algo->ops->set_pauseparam) 574 return h->ae_algo->ops->set_pauseparam(h, param->autoneg, 575 param->rx_pause, 576 param->tx_pause); 577 return -EOPNOTSUPP; 578 } 579 580 static void hns3_get_ksettings(struct hnae3_handle *h, 581 struct ethtool_link_ksettings *cmd) 582 { 583 const struct hnae3_ae_ops *ops = h->ae_algo->ops; 584 585 /* 1.auto_neg & speed & duplex from cmd */ 586 if (ops->get_ksettings_an_result) 587 ops->get_ksettings_an_result(h, 588 &cmd->base.autoneg, 589 &cmd->base.speed, 590 &cmd->base.duplex); 591 592 /* 2.get link mode*/ 593 if (ops->get_link_mode) 594 ops->get_link_mode(h, 595 cmd->link_modes.supported, 596 cmd->link_modes.advertising); 597 598 /* 3.mdix_ctrl&mdix get from phy reg */ 599 if (ops->get_mdix_mode) 600 ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl, 601 &cmd->base.eth_tp_mdix); 602 } 603 604 static int hns3_get_link_ksettings(struct net_device *netdev, 605 struct ethtool_link_ksettings *cmd) 606 { 607 struct hnae3_handle *h = hns3_get_handle(netdev); 608 const struct hnae3_ae_ops *ops; 609 u8 module_type; 610 u8 media_type; 611 u8 link_stat; 612 613 if (!h->ae_algo || !h->ae_algo->ops) 614 return -EOPNOTSUPP; 615 616 ops = h->ae_algo->ops; 617 if (ops->get_media_type) 618 ops->get_media_type(h, &media_type, &module_type); 619 else 620 return -EOPNOTSUPP; 621 622 switch (media_type) { 623 case HNAE3_MEDIA_TYPE_NONE: 624 cmd->base.port = PORT_NONE; 625 hns3_get_ksettings(h, cmd); 626 break; 627 case HNAE3_MEDIA_TYPE_FIBER: 628 if (module_type == HNAE3_MODULE_TYPE_CR) 629 cmd->base.port = PORT_DA; 630 else 631 cmd->base.port = PORT_FIBRE; 632 633 hns3_get_ksettings(h, cmd); 634 break; 635 case HNAE3_MEDIA_TYPE_BACKPLANE: 636 cmd->base.port = PORT_NONE; 637 hns3_get_ksettings(h, cmd); 638 break; 639 case HNAE3_MEDIA_TYPE_COPPER: 640 cmd->base.port = PORT_TP; 641 if (!netdev->phydev) 642 hns3_get_ksettings(h, cmd); 643 else 644 phy_ethtool_ksettings_get(netdev->phydev, cmd); 645 break; 646 default: 647 648 netdev_warn(netdev, "Unknown media type"); 649 return 0; 650 } 651 652 /* mdio_support */ 653 cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; 654 655 link_stat = hns3_get_link(netdev); 656 if (!link_stat) { 657 cmd->base.speed = SPEED_UNKNOWN; 658 cmd->base.duplex = DUPLEX_UNKNOWN; 659 } 660 661 return 0; 662 } 663 664 static int hns3_check_ksettings_param(struct net_device *netdev, 665 const struct ethtool_link_ksettings *cmd) 666 { 667 struct hnae3_handle *handle = hns3_get_handle(netdev); 668 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 669 u8 module_type = HNAE3_MODULE_TYPE_UNKNOWN; 670 u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN; 671 u8 autoneg; 672 u32 speed; 673 u8 duplex; 674 int ret; 675 676 if (ops->get_ksettings_an_result) { 677 ops->get_ksettings_an_result(handle, &autoneg, &speed, &duplex); 678 if (cmd->base.autoneg == autoneg && cmd->base.speed == speed && 679 cmd->base.duplex == duplex) 680 return 0; 681 } 682 683 if (ops->get_media_type) 684 ops->get_media_type(handle, &media_type, &module_type); 685 686 if (cmd->base.duplex != DUPLEX_FULL && 687 media_type != HNAE3_MEDIA_TYPE_COPPER) { 688 netdev_err(netdev, 689 "only copper port supports half duplex!"); 690 return -EINVAL; 691 } 692 693 if (ops->check_port_speed) { 694 ret = ops->check_port_speed(handle, cmd->base.speed); 695 if (ret) { 696 netdev_err(netdev, "unsupported speed\n"); 697 return ret; 698 } 699 } 700 701 return 0; 702 } 703 704 static int hns3_set_link_ksettings(struct net_device *netdev, 705 const struct ethtool_link_ksettings *cmd) 706 { 707 struct hnae3_handle *handle = hns3_get_handle(netdev); 708 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 709 int ret = 0; 710 711 /* Chip don't support this mode. */ 712 if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF) 713 return -EINVAL; 714 715 /* Only support ksettings_set for netdev with phy attached for now */ 716 if (netdev->phydev) 717 return phy_ethtool_ksettings_set(netdev->phydev, cmd); 718 719 if (handle->pdev->revision == 0x20) 720 return -EOPNOTSUPP; 721 722 ret = hns3_check_ksettings_param(netdev, cmd); 723 if (ret) 724 return ret; 725 726 if (ops->set_autoneg) { 727 ret = ops->set_autoneg(handle, cmd->base.autoneg); 728 if (ret) 729 return ret; 730 } 731 732 if (ops->cfg_mac_speed_dup_h) 733 ret = ops->cfg_mac_speed_dup_h(handle, cmd->base.speed, 734 cmd->base.duplex); 735 736 return ret; 737 } 738 739 static u32 hns3_get_rss_key_size(struct net_device *netdev) 740 { 741 struct hnae3_handle *h = hns3_get_handle(netdev); 742 743 if (!h->ae_algo || !h->ae_algo->ops || 744 !h->ae_algo->ops->get_rss_key_size) 745 return 0; 746 747 return h->ae_algo->ops->get_rss_key_size(h); 748 } 749 750 static u32 hns3_get_rss_indir_size(struct net_device *netdev) 751 { 752 struct hnae3_handle *h = hns3_get_handle(netdev); 753 754 if (!h->ae_algo || !h->ae_algo->ops || 755 !h->ae_algo->ops->get_rss_indir_size) 756 return 0; 757 758 return h->ae_algo->ops->get_rss_indir_size(h); 759 } 760 761 static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key, 762 u8 *hfunc) 763 { 764 struct hnae3_handle *h = hns3_get_handle(netdev); 765 766 if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss) 767 return -EOPNOTSUPP; 768 769 return h->ae_algo->ops->get_rss(h, indir, key, hfunc); 770 } 771 772 static int hns3_set_rss(struct net_device *netdev, const u32 *indir, 773 const u8 *key, const u8 hfunc) 774 { 775 struct hnae3_handle *h = hns3_get_handle(netdev); 776 777 if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss) 778 return -EOPNOTSUPP; 779 780 if ((h->pdev->revision == 0x20 && 781 hfunc != ETH_RSS_HASH_TOP) || (hfunc != ETH_RSS_HASH_NO_CHANGE && 782 hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)) { 783 netdev_err(netdev, "hash func not supported\n"); 784 return -EOPNOTSUPP; 785 } 786 787 if (!indir) { 788 netdev_err(netdev, 789 "set rss failed for indir is empty\n"); 790 return -EOPNOTSUPP; 791 } 792 793 return h->ae_algo->ops->set_rss(h, indir, key, hfunc); 794 } 795 796 static int hns3_get_rxnfc(struct net_device *netdev, 797 struct ethtool_rxnfc *cmd, 798 u32 *rule_locs) 799 { 800 struct hnae3_handle *h = hns3_get_handle(netdev); 801 802 if (!h->ae_algo || !h->ae_algo->ops) 803 return -EOPNOTSUPP; 804 805 switch (cmd->cmd) { 806 case ETHTOOL_GRXRINGS: 807 cmd->data = h->kinfo.num_tqps; 808 return 0; 809 case ETHTOOL_GRXFH: 810 if (h->ae_algo->ops->get_rss_tuple) 811 return h->ae_algo->ops->get_rss_tuple(h, cmd); 812 return -EOPNOTSUPP; 813 case ETHTOOL_GRXCLSRLCNT: 814 if (h->ae_algo->ops->get_fd_rule_cnt) 815 return h->ae_algo->ops->get_fd_rule_cnt(h, cmd); 816 return -EOPNOTSUPP; 817 case ETHTOOL_GRXCLSRULE: 818 if (h->ae_algo->ops->get_fd_rule_info) 819 return h->ae_algo->ops->get_fd_rule_info(h, cmd); 820 return -EOPNOTSUPP; 821 case ETHTOOL_GRXCLSRLALL: 822 if (h->ae_algo->ops->get_fd_all_rules) 823 return h->ae_algo->ops->get_fd_all_rules(h, cmd, 824 rule_locs); 825 return -EOPNOTSUPP; 826 default: 827 return -EOPNOTSUPP; 828 } 829 } 830 831 static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, 832 u32 tx_desc_num, u32 rx_desc_num) 833 { 834 struct hnae3_handle *h = priv->ae_handle; 835 int i; 836 837 h->kinfo.num_tx_desc = tx_desc_num; 838 h->kinfo.num_rx_desc = rx_desc_num; 839 840 for (i = 0; i < h->kinfo.num_tqps; i++) { 841 priv->ring_data[i].ring->desc_num = tx_desc_num; 842 priv->ring_data[i + h->kinfo.num_tqps].ring->desc_num = 843 rx_desc_num; 844 } 845 846 return hns3_init_all_ring(priv); 847 } 848 849 static int hns3_set_ringparam(struct net_device *ndev, 850 struct ethtool_ringparam *param) 851 { 852 struct hns3_nic_priv *priv = netdev_priv(ndev); 853 struct hnae3_handle *h = priv->ae_handle; 854 bool if_running = netif_running(ndev); 855 u32 old_tx_desc_num, new_tx_desc_num; 856 u32 old_rx_desc_num, new_rx_desc_num; 857 int queue_num = h->kinfo.num_tqps; 858 int ret; 859 860 if (hns3_nic_resetting(ndev)) 861 return -EBUSY; 862 863 if (param->rx_mini_pending || param->rx_jumbo_pending) 864 return -EINVAL; 865 866 if (param->tx_pending > HNS3_RING_MAX_PENDING || 867 param->tx_pending < HNS3_RING_MIN_PENDING || 868 param->rx_pending > HNS3_RING_MAX_PENDING || 869 param->rx_pending < HNS3_RING_MIN_PENDING) { 870 netdev_err(ndev, "Queue depth out of range [%d-%d]\n", 871 HNS3_RING_MIN_PENDING, HNS3_RING_MAX_PENDING); 872 return -EINVAL; 873 } 874 875 /* Hardware requires that its descriptors must be multiple of eight */ 876 new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE); 877 new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE); 878 old_tx_desc_num = priv->ring_data[0].ring->desc_num; 879 old_rx_desc_num = priv->ring_data[queue_num].ring->desc_num; 880 if (old_tx_desc_num == new_tx_desc_num && 881 old_rx_desc_num == new_rx_desc_num) 882 return 0; 883 884 netdev_info(ndev, 885 "Changing Tx/Rx ring depth from %d/%d to %d/%d\n", 886 old_tx_desc_num, old_rx_desc_num, 887 new_tx_desc_num, new_rx_desc_num); 888 889 if (if_running) 890 ndev->netdev_ops->ndo_stop(ndev); 891 892 ret = hns3_uninit_all_ring(priv); 893 if (ret) 894 return ret; 895 896 ret = hns3_change_all_ring_bd_num(priv, new_tx_desc_num, 897 new_rx_desc_num); 898 if (ret) { 899 ret = hns3_change_all_ring_bd_num(priv, old_tx_desc_num, 900 old_rx_desc_num); 901 if (ret) { 902 netdev_err(ndev, 903 "Revert to old bd num fail, ret=%d.\n", ret); 904 return ret; 905 } 906 } 907 908 if (if_running) 909 ret = ndev->netdev_ops->ndo_open(ndev); 910 911 return ret; 912 } 913 914 static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) 915 { 916 struct hnae3_handle *h = hns3_get_handle(netdev); 917 918 if (!h->ae_algo || !h->ae_algo->ops) 919 return -EOPNOTSUPP; 920 921 switch (cmd->cmd) { 922 case ETHTOOL_SRXFH: 923 if (h->ae_algo->ops->set_rss_tuple) 924 return h->ae_algo->ops->set_rss_tuple(h, cmd); 925 return -EOPNOTSUPP; 926 case ETHTOOL_SRXCLSRLINS: 927 if (h->ae_algo->ops->add_fd_entry) 928 return h->ae_algo->ops->add_fd_entry(h, cmd); 929 return -EOPNOTSUPP; 930 case ETHTOOL_SRXCLSRLDEL: 931 if (h->ae_algo->ops->del_fd_entry) 932 return h->ae_algo->ops->del_fd_entry(h, cmd); 933 return -EOPNOTSUPP; 934 default: 935 return -EOPNOTSUPP; 936 } 937 } 938 939 static int hns3_nway_reset(struct net_device *netdev) 940 { 941 struct hnae3_handle *handle = hns3_get_handle(netdev); 942 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 943 struct phy_device *phy = netdev->phydev; 944 int autoneg; 945 946 if (!netif_running(netdev)) 947 return 0; 948 949 if (hns3_nic_resetting(netdev)) { 950 netdev_err(netdev, "dev resetting!"); 951 return -EBUSY; 952 } 953 954 if (!ops->get_autoneg || !ops->restart_autoneg) 955 return -EOPNOTSUPP; 956 957 autoneg = ops->get_autoneg(handle); 958 if (autoneg != AUTONEG_ENABLE) { 959 netdev_err(netdev, 960 "Autoneg is off, don't support to restart it\n"); 961 return -EINVAL; 962 } 963 964 if (phy) 965 return genphy_restart_aneg(phy); 966 967 if (handle->pdev->revision == 0x20) 968 return -EOPNOTSUPP; 969 970 return ops->restart_autoneg(handle); 971 } 972 973 static void hns3_get_channels(struct net_device *netdev, 974 struct ethtool_channels *ch) 975 { 976 struct hnae3_handle *h = hns3_get_handle(netdev); 977 978 if (h->ae_algo->ops->get_channels) 979 h->ae_algo->ops->get_channels(h, ch); 980 } 981 982 static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue, 983 struct ethtool_coalesce *cmd) 984 { 985 struct hns3_enet_tqp_vector *tx_vector, *rx_vector; 986 struct hns3_nic_priv *priv = netdev_priv(netdev); 987 struct hnae3_handle *h = priv->ae_handle; 988 u16 queue_num = h->kinfo.num_tqps; 989 990 if (hns3_nic_resetting(netdev)) 991 return -EBUSY; 992 993 if (queue >= queue_num) { 994 netdev_err(netdev, 995 "Invalid queue value %d! Queue max id=%d\n", 996 queue, queue_num - 1); 997 return -EINVAL; 998 } 999 1000 tx_vector = priv->ring_data[queue].ring->tqp_vector; 1001 rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector; 1002 1003 cmd->use_adaptive_tx_coalesce = 1004 tx_vector->tx_group.coal.gl_adapt_enable; 1005 cmd->use_adaptive_rx_coalesce = 1006 rx_vector->rx_group.coal.gl_adapt_enable; 1007 1008 cmd->tx_coalesce_usecs = tx_vector->tx_group.coal.int_gl; 1009 cmd->rx_coalesce_usecs = rx_vector->rx_group.coal.int_gl; 1010 1011 cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting; 1012 cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting; 1013 1014 return 0; 1015 } 1016 1017 static int hns3_get_coalesce(struct net_device *netdev, 1018 struct ethtool_coalesce *cmd) 1019 { 1020 return hns3_get_coalesce_per_queue(netdev, 0, cmd); 1021 } 1022 1023 static int hns3_check_gl_coalesce_para(struct net_device *netdev, 1024 struct ethtool_coalesce *cmd) 1025 { 1026 u32 rx_gl, tx_gl; 1027 1028 if (cmd->rx_coalesce_usecs > HNS3_INT_GL_MAX) { 1029 netdev_err(netdev, 1030 "Invalid rx-usecs value, rx-usecs range is 0-%d\n", 1031 HNS3_INT_GL_MAX); 1032 return -EINVAL; 1033 } 1034 1035 if (cmd->tx_coalesce_usecs > HNS3_INT_GL_MAX) { 1036 netdev_err(netdev, 1037 "Invalid tx-usecs value, tx-usecs range is 0-%d\n", 1038 HNS3_INT_GL_MAX); 1039 return -EINVAL; 1040 } 1041 1042 rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs); 1043 if (rx_gl != cmd->rx_coalesce_usecs) { 1044 netdev_info(netdev, 1045 "rx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n", 1046 cmd->rx_coalesce_usecs, rx_gl); 1047 } 1048 1049 tx_gl = hns3_gl_round_down(cmd->tx_coalesce_usecs); 1050 if (tx_gl != cmd->tx_coalesce_usecs) { 1051 netdev_info(netdev, 1052 "tx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n", 1053 cmd->tx_coalesce_usecs, tx_gl); 1054 } 1055 1056 return 0; 1057 } 1058 1059 static int hns3_check_rl_coalesce_para(struct net_device *netdev, 1060 struct ethtool_coalesce *cmd) 1061 { 1062 u32 rl; 1063 1064 if (cmd->tx_coalesce_usecs_high != cmd->rx_coalesce_usecs_high) { 1065 netdev_err(netdev, 1066 "tx_usecs_high must be same as rx_usecs_high.\n"); 1067 return -EINVAL; 1068 } 1069 1070 if (cmd->rx_coalesce_usecs_high > HNS3_INT_RL_MAX) { 1071 netdev_err(netdev, 1072 "Invalid usecs_high value, usecs_high range is 0-%d\n", 1073 HNS3_INT_RL_MAX); 1074 return -EINVAL; 1075 } 1076 1077 rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high); 1078 if (rl != cmd->rx_coalesce_usecs_high) { 1079 netdev_info(netdev, 1080 "usecs_high(%d) rounded down to %d, because it must be multiple of 4.\n", 1081 cmd->rx_coalesce_usecs_high, rl); 1082 } 1083 1084 return 0; 1085 } 1086 1087 static int hns3_check_coalesce_para(struct net_device *netdev, 1088 struct ethtool_coalesce *cmd) 1089 { 1090 int ret; 1091 1092 ret = hns3_check_gl_coalesce_para(netdev, cmd); 1093 if (ret) { 1094 netdev_err(netdev, 1095 "Check gl coalesce param fail. ret = %d\n", ret); 1096 return ret; 1097 } 1098 1099 ret = hns3_check_rl_coalesce_para(netdev, cmd); 1100 if (ret) { 1101 netdev_err(netdev, 1102 "Check rl coalesce param fail. ret = %d\n", ret); 1103 return ret; 1104 } 1105 1106 if (cmd->use_adaptive_tx_coalesce == 1 || 1107 cmd->use_adaptive_rx_coalesce == 1) { 1108 netdev_info(netdev, 1109 "adaptive-tx=%d and adaptive-rx=%d, tx_usecs or rx_usecs will changed dynamically.\n", 1110 cmd->use_adaptive_tx_coalesce, 1111 cmd->use_adaptive_rx_coalesce); 1112 } 1113 1114 return 0; 1115 } 1116 1117 static void hns3_set_coalesce_per_queue(struct net_device *netdev, 1118 struct ethtool_coalesce *cmd, 1119 u32 queue) 1120 { 1121 struct hns3_enet_tqp_vector *tx_vector, *rx_vector; 1122 struct hns3_nic_priv *priv = netdev_priv(netdev); 1123 struct hnae3_handle *h = priv->ae_handle; 1124 int queue_num = h->kinfo.num_tqps; 1125 1126 tx_vector = priv->ring_data[queue].ring->tqp_vector; 1127 rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector; 1128 1129 tx_vector->tx_group.coal.gl_adapt_enable = 1130 cmd->use_adaptive_tx_coalesce; 1131 rx_vector->rx_group.coal.gl_adapt_enable = 1132 cmd->use_adaptive_rx_coalesce; 1133 1134 tx_vector->tx_group.coal.int_gl = cmd->tx_coalesce_usecs; 1135 rx_vector->rx_group.coal.int_gl = cmd->rx_coalesce_usecs; 1136 1137 hns3_set_vector_coalesce_tx_gl(tx_vector, 1138 tx_vector->tx_group.coal.int_gl); 1139 hns3_set_vector_coalesce_rx_gl(rx_vector, 1140 rx_vector->rx_group.coal.int_gl); 1141 1142 hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting); 1143 hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting); 1144 } 1145 1146 static int hns3_set_coalesce(struct net_device *netdev, 1147 struct ethtool_coalesce *cmd) 1148 { 1149 struct hnae3_handle *h = hns3_get_handle(netdev); 1150 u16 queue_num = h->kinfo.num_tqps; 1151 int ret; 1152 int i; 1153 1154 if (hns3_nic_resetting(netdev)) 1155 return -EBUSY; 1156 1157 ret = hns3_check_coalesce_para(netdev, cmd); 1158 if (ret) 1159 return ret; 1160 1161 h->kinfo.int_rl_setting = 1162 hns3_rl_round_down(cmd->rx_coalesce_usecs_high); 1163 1164 for (i = 0; i < queue_num; i++) 1165 hns3_set_coalesce_per_queue(netdev, cmd, i); 1166 1167 return 0; 1168 } 1169 1170 static int hns3_get_regs_len(struct net_device *netdev) 1171 { 1172 struct hnae3_handle *h = hns3_get_handle(netdev); 1173 1174 if (!h->ae_algo->ops->get_regs_len) 1175 return -EOPNOTSUPP; 1176 1177 return h->ae_algo->ops->get_regs_len(h); 1178 } 1179 1180 static void hns3_get_regs(struct net_device *netdev, 1181 struct ethtool_regs *cmd, void *data) 1182 { 1183 struct hnae3_handle *h = hns3_get_handle(netdev); 1184 1185 if (!h->ae_algo->ops->get_regs) 1186 return; 1187 1188 h->ae_algo->ops->get_regs(h, &cmd->version, data); 1189 } 1190 1191 static int hns3_set_phys_id(struct net_device *netdev, 1192 enum ethtool_phys_id_state state) 1193 { 1194 struct hnae3_handle *h = hns3_get_handle(netdev); 1195 1196 if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_led_id) 1197 return -EOPNOTSUPP; 1198 1199 return h->ae_algo->ops->set_led_id(h, state); 1200 } 1201 1202 static u32 hns3_get_msglevel(struct net_device *netdev) 1203 { 1204 struct hnae3_handle *h = hns3_get_handle(netdev); 1205 1206 return h->msg_enable; 1207 } 1208 1209 static void hns3_set_msglevel(struct net_device *netdev, u32 msg_level) 1210 { 1211 struct hnae3_handle *h = hns3_get_handle(netdev); 1212 1213 h->msg_enable = msg_level; 1214 } 1215 1216 /* Translate local fec value into ethtool value. */ 1217 static unsigned int loc_to_eth_fec(u8 loc_fec) 1218 { 1219 u32 eth_fec = 0; 1220 1221 if (loc_fec & BIT(HNAE3_FEC_AUTO)) 1222 eth_fec |= ETHTOOL_FEC_AUTO; 1223 if (loc_fec & BIT(HNAE3_FEC_RS)) 1224 eth_fec |= ETHTOOL_FEC_RS; 1225 if (loc_fec & BIT(HNAE3_FEC_BASER)) 1226 eth_fec |= ETHTOOL_FEC_BASER; 1227 1228 /* if nothing is set, then FEC is off */ 1229 if (!eth_fec) 1230 eth_fec = ETHTOOL_FEC_OFF; 1231 1232 return eth_fec; 1233 } 1234 1235 /* Translate ethtool fec value into local value. */ 1236 static unsigned int eth_to_loc_fec(unsigned int eth_fec) 1237 { 1238 u32 loc_fec = 0; 1239 1240 if (eth_fec & ETHTOOL_FEC_OFF) 1241 return loc_fec; 1242 1243 if (eth_fec & ETHTOOL_FEC_AUTO) 1244 loc_fec |= BIT(HNAE3_FEC_AUTO); 1245 if (eth_fec & ETHTOOL_FEC_RS) 1246 loc_fec |= BIT(HNAE3_FEC_RS); 1247 if (eth_fec & ETHTOOL_FEC_BASER) 1248 loc_fec |= BIT(HNAE3_FEC_BASER); 1249 1250 return loc_fec; 1251 } 1252 1253 static int hns3_get_fecparam(struct net_device *netdev, 1254 struct ethtool_fecparam *fec) 1255 { 1256 struct hnae3_handle *handle = hns3_get_handle(netdev); 1257 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 1258 u8 fec_ability; 1259 u8 fec_mode; 1260 1261 if (handle->pdev->revision == 0x20) 1262 return -EOPNOTSUPP; 1263 1264 if (!ops->get_fec) 1265 return -EOPNOTSUPP; 1266 1267 ops->get_fec(handle, &fec_ability, &fec_mode); 1268 1269 fec->fec = loc_to_eth_fec(fec_ability); 1270 fec->active_fec = loc_to_eth_fec(fec_mode); 1271 1272 return 0; 1273 } 1274 1275 static int hns3_set_fecparam(struct net_device *netdev, 1276 struct ethtool_fecparam *fec) 1277 { 1278 struct hnae3_handle *handle = hns3_get_handle(netdev); 1279 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 1280 u32 fec_mode; 1281 1282 if (handle->pdev->revision == 0x20) 1283 return -EOPNOTSUPP; 1284 1285 if (!ops->set_fec) 1286 return -EOPNOTSUPP; 1287 fec_mode = eth_to_loc_fec(fec->fec); 1288 return ops->set_fec(handle, fec_mode); 1289 } 1290 1291 static const struct ethtool_ops hns3vf_ethtool_ops = { 1292 .get_drvinfo = hns3_get_drvinfo, 1293 .get_ringparam = hns3_get_ringparam, 1294 .set_ringparam = hns3_set_ringparam, 1295 .get_strings = hns3_get_strings, 1296 .get_ethtool_stats = hns3_get_stats, 1297 .get_sset_count = hns3_get_sset_count, 1298 .get_rxnfc = hns3_get_rxnfc, 1299 .set_rxnfc = hns3_set_rxnfc, 1300 .get_rxfh_key_size = hns3_get_rss_key_size, 1301 .get_rxfh_indir_size = hns3_get_rss_indir_size, 1302 .get_rxfh = hns3_get_rss, 1303 .set_rxfh = hns3_set_rss, 1304 .get_link_ksettings = hns3_get_link_ksettings, 1305 .get_channels = hns3_get_channels, 1306 .get_coalesce = hns3_get_coalesce, 1307 .set_coalesce = hns3_set_coalesce, 1308 .get_regs_len = hns3_get_regs_len, 1309 .get_regs = hns3_get_regs, 1310 .get_link = hns3_get_link, 1311 .get_msglevel = hns3_get_msglevel, 1312 .set_msglevel = hns3_set_msglevel, 1313 }; 1314 1315 static const struct ethtool_ops hns3_ethtool_ops = { 1316 .self_test = hns3_self_test, 1317 .get_drvinfo = hns3_get_drvinfo, 1318 .get_link = hns3_get_link, 1319 .get_ringparam = hns3_get_ringparam, 1320 .set_ringparam = hns3_set_ringparam, 1321 .get_pauseparam = hns3_get_pauseparam, 1322 .set_pauseparam = hns3_set_pauseparam, 1323 .get_strings = hns3_get_strings, 1324 .get_ethtool_stats = hns3_get_stats, 1325 .get_sset_count = hns3_get_sset_count, 1326 .get_rxnfc = hns3_get_rxnfc, 1327 .set_rxnfc = hns3_set_rxnfc, 1328 .get_rxfh_key_size = hns3_get_rss_key_size, 1329 .get_rxfh_indir_size = hns3_get_rss_indir_size, 1330 .get_rxfh = hns3_get_rss, 1331 .set_rxfh = hns3_set_rss, 1332 .get_link_ksettings = hns3_get_link_ksettings, 1333 .set_link_ksettings = hns3_set_link_ksettings, 1334 .nway_reset = hns3_nway_reset, 1335 .get_channels = hns3_get_channels, 1336 .set_channels = hns3_set_channels, 1337 .get_coalesce = hns3_get_coalesce, 1338 .set_coalesce = hns3_set_coalesce, 1339 .get_regs_len = hns3_get_regs_len, 1340 .get_regs = hns3_get_regs, 1341 .set_phys_id = hns3_set_phys_id, 1342 .get_msglevel = hns3_get_msglevel, 1343 .set_msglevel = hns3_set_msglevel, 1344 .get_fecparam = hns3_get_fecparam, 1345 .set_fecparam = hns3_set_fecparam, 1346 }; 1347 1348 void hns3_ethtool_set_ops(struct net_device *netdev) 1349 { 1350 struct hnae3_handle *h = hns3_get_handle(netdev); 1351 1352 if (h->flags & HNAE3_SUPPORT_VF) 1353 netdev->ethtool_ops = &hns3vf_ethtool_ops; 1354 else 1355 netdev->ethtool_ops = &hns3_ethtool_ops; 1356 } 1357