1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/string.h> 6 #include <linux/phy.h> 7 8 #include "hns3_enet.h" 9 10 struct hns3_stats { 11 char stats_string[ETH_GSTRING_LEN]; 12 int stats_offset; 13 }; 14 15 /* tqp related stats */ 16 #define HNS3_TQP_STAT(_string, _member) { \ 17 .stats_string = _string, \ 18 .stats_offset = offsetof(struct hns3_enet_ring, stats) +\ 19 offsetof(struct ring_stats, _member), \ 20 } 21 22 static const struct hns3_stats hns3_txq_stats[] = { 23 /* Tx per-queue statistics */ 24 HNS3_TQP_STAT("io_err_cnt", io_err_cnt), 25 HNS3_TQP_STAT("dropped", sw_err_cnt), 26 HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt), 27 HNS3_TQP_STAT("packets", tx_pkts), 28 HNS3_TQP_STAT("bytes", tx_bytes), 29 HNS3_TQP_STAT("errors", tx_err_cnt), 30 HNS3_TQP_STAT("wake", restart_queue), 31 HNS3_TQP_STAT("busy", tx_busy), 32 }; 33 34 #define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats) 35 36 static const struct hns3_stats hns3_rxq_stats[] = { 37 /* Rx per-queue statistics */ 38 HNS3_TQP_STAT("io_err_cnt", io_err_cnt), 39 HNS3_TQP_STAT("dropped", sw_err_cnt), 40 HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt), 41 HNS3_TQP_STAT("packets", rx_pkts), 42 HNS3_TQP_STAT("bytes", rx_bytes), 43 HNS3_TQP_STAT("errors", rx_err_cnt), 44 HNS3_TQP_STAT("reuse_pg_cnt", reuse_pg_cnt), 45 HNS3_TQP_STAT("err_pkt_len", err_pkt_len), 46 HNS3_TQP_STAT("non_vld_descs", non_vld_descs), 47 HNS3_TQP_STAT("err_bd_num", err_bd_num), 48 HNS3_TQP_STAT("l2_err", l2_err), 49 HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err), 50 HNS3_TQP_STAT("multicast", rx_multicast), 51 }; 52 53 #define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats) 54 55 #define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT) 56 57 #define HNS3_SELF_TEST_TYPE_NUM 3 58 #define HNS3_NIC_LB_TEST_PKT_NUM 1 59 #define HNS3_NIC_LB_TEST_RING_ID 0 60 #define HNS3_NIC_LB_TEST_PACKET_SIZE 128 61 62 /* Nic loopback test err */ 63 #define HNS3_NIC_LB_TEST_NO_MEM_ERR 1 64 #define HNS3_NIC_LB_TEST_TX_CNT_ERR 2 65 #define HNS3_NIC_LB_TEST_RX_CNT_ERR 3 66 67 struct hns3_link_mode_mapping { 68 u32 hns3_link_mode; 69 u32 ethtool_link_mode; 70 }; 71 72 static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) 73 { 74 struct hnae3_handle *h = hns3_get_handle(ndev); 75 bool vlan_filter_enable; 76 int ret; 77 78 if (!h->ae_algo->ops->set_loopback || 79 !h->ae_algo->ops->set_promisc_mode) 80 return -EOPNOTSUPP; 81 82 switch (loop) { 83 case HNAE3_LOOP_SERIAL_SERDES: 84 case HNAE3_LOOP_PARALLEL_SERDES: 85 case HNAE3_LOOP_APP: 86 ret = h->ae_algo->ops->set_loopback(h, loop, en); 87 break; 88 default: 89 ret = -ENOTSUPP; 90 break; 91 } 92 93 if (ret) 94 return ret; 95 96 if (en) { 97 h->ae_algo->ops->set_promisc_mode(h, true, true); 98 } else { 99 /* recover promisc mode before loopback test */ 100 hns3_update_promisc_mode(ndev, h->netdev_flags); 101 vlan_filter_enable = ndev->flags & IFF_PROMISC ? false : true; 102 hns3_enable_vlan_filter(ndev, vlan_filter_enable); 103 } 104 105 return ret; 106 } 107 108 static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) 109 { 110 struct hnae3_handle *h = hns3_get_handle(ndev); 111 int ret; 112 113 ret = hns3_nic_reset_all_ring(h); 114 if (ret) 115 return ret; 116 117 ret = hns3_lp_setup(ndev, loop_mode, true); 118 usleep_range(10000, 20000); 119 120 return ret; 121 } 122 123 static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode) 124 { 125 int ret; 126 127 ret = hns3_lp_setup(ndev, loop_mode, false); 128 if (ret) { 129 netdev_err(ndev, "lb_setup return error: %d\n", ret); 130 return ret; 131 } 132 133 usleep_range(10000, 20000); 134 135 return 0; 136 } 137 138 static void hns3_lp_setup_skb(struct sk_buff *skb) 139 { 140 struct net_device *ndev = skb->dev; 141 unsigned char *packet; 142 struct ethhdr *ethh; 143 unsigned int i; 144 145 skb_reserve(skb, NET_IP_ALIGN); 146 ethh = skb_put(skb, sizeof(struct ethhdr)); 147 packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE); 148 149 memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN); 150 ethh->h_dest[5] += 0x1f; 151 eth_zero_addr(ethh->h_source); 152 ethh->h_proto = htons(ETH_P_ARP); 153 skb_reset_mac_header(skb); 154 155 for (i = 0; i < HNS3_NIC_LB_TEST_PACKET_SIZE; i++) 156 packet[i] = (unsigned char)(i & 0xff); 157 } 158 159 static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring, 160 struct sk_buff *skb) 161 { 162 struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector; 163 unsigned char *packet = skb->data; 164 u32 i; 165 166 for (i = 0; i < skb->len; i++) 167 if (packet[i] != (unsigned char)(i & 0xff)) 168 break; 169 170 /* The packet is correctly received */ 171 if (i == skb->len) 172 tqp_vector->rx_group.total_packets++; 173 else 174 print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1, 175 skb->data, skb->len, true); 176 177 dev_kfree_skb_any(skb); 178 } 179 180 static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget) 181 { 182 struct hnae3_handle *h = priv->ae_handle; 183 struct hnae3_knic_private_info *kinfo; 184 u32 i, rcv_good_pkt_total = 0; 185 186 kinfo = &h->kinfo; 187 for (i = kinfo->num_tqps; i < kinfo->num_tqps * 2; i++) { 188 struct hns3_enet_ring *ring = priv->ring_data[i].ring; 189 struct hns3_enet_ring_group *rx_group; 190 u64 pre_rx_pkt; 191 192 rx_group = &ring->tqp_vector->rx_group; 193 pre_rx_pkt = rx_group->total_packets; 194 195 preempt_disable(); 196 hns3_clean_rx_ring(ring, budget, hns3_lb_check_skb_data); 197 preempt_enable(); 198 199 rcv_good_pkt_total += (rx_group->total_packets - pre_rx_pkt); 200 rx_group->total_packets = pre_rx_pkt; 201 } 202 return rcv_good_pkt_total; 203 } 204 205 static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid, 206 u32 end_ringid, u32 budget) 207 { 208 u32 i; 209 210 for (i = start_ringid; i <= end_ringid; i++) { 211 struct hns3_enet_ring *ring = priv->ring_data[i].ring; 212 213 hns3_clean_tx_ring(ring); 214 } 215 } 216 217 /** 218 * hns3_lp_run_test - run loopback test 219 * @ndev: net device 220 * @mode: loopback type 221 */ 222 static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode) 223 { 224 struct hns3_nic_priv *priv = netdev_priv(ndev); 225 struct sk_buff *skb; 226 u32 i, good_cnt; 227 int ret_val = 0; 228 229 skb = alloc_skb(HNS3_NIC_LB_TEST_PACKET_SIZE + ETH_HLEN + NET_IP_ALIGN, 230 GFP_KERNEL); 231 if (!skb) 232 return HNS3_NIC_LB_TEST_NO_MEM_ERR; 233 234 skb->dev = ndev; 235 hns3_lp_setup_skb(skb); 236 skb->queue_mapping = HNS3_NIC_LB_TEST_RING_ID; 237 238 good_cnt = 0; 239 for (i = 0; i < HNS3_NIC_LB_TEST_PKT_NUM; i++) { 240 netdev_tx_t tx_ret; 241 242 skb_get(skb); 243 tx_ret = hns3_nic_net_xmit(skb, ndev); 244 if (tx_ret == NETDEV_TX_OK) 245 good_cnt++; 246 else 247 netdev_err(ndev, "hns3_lb_run_test xmit failed: %d\n", 248 tx_ret); 249 } 250 if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) { 251 ret_val = HNS3_NIC_LB_TEST_TX_CNT_ERR; 252 netdev_err(ndev, "mode %d sent fail, cnt=0x%x, budget=0x%x\n", 253 mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM); 254 goto out; 255 } 256 257 /* Allow 200 milliseconds for packets to go from Tx to Rx */ 258 msleep(200); 259 260 good_cnt = hns3_lb_check_rx_ring(priv, HNS3_NIC_LB_TEST_PKT_NUM); 261 if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) { 262 ret_val = HNS3_NIC_LB_TEST_RX_CNT_ERR; 263 netdev_err(ndev, "mode %d recv fail, cnt=0x%x, budget=0x%x\n", 264 mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM); 265 } 266 267 out: 268 hns3_lb_clear_tx_ring(priv, HNS3_NIC_LB_TEST_RING_ID, 269 HNS3_NIC_LB_TEST_RING_ID, 270 HNS3_NIC_LB_TEST_PKT_NUM); 271 272 kfree_skb(skb); 273 return ret_val; 274 } 275 276 /** 277 * hns3_nic_self_test - self test 278 * @ndev: net device 279 * @eth_test: test cmd 280 * @data: test result 281 */ 282 static void hns3_self_test(struct net_device *ndev, 283 struct ethtool_test *eth_test, u64 *data) 284 { 285 struct hns3_nic_priv *priv = netdev_priv(ndev); 286 struct hnae3_handle *h = priv->ae_handle; 287 int st_param[HNS3_SELF_TEST_TYPE_NUM][2]; 288 bool if_running = netif_running(ndev); 289 #if IS_ENABLED(CONFIG_VLAN_8021Q) 290 bool dis_vlan_filter; 291 #endif 292 int test_index = 0; 293 u32 i; 294 295 if (hns3_nic_resetting(ndev)) { 296 netdev_err(ndev, "dev resetting!"); 297 return; 298 } 299 300 /* Only do offline selftest, or pass by default */ 301 if (eth_test->flags != ETH_TEST_FL_OFFLINE) 302 return; 303 304 st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP; 305 st_param[HNAE3_LOOP_APP][1] = 306 h->flags & HNAE3_SUPPORT_APP_LOOPBACK; 307 308 st_param[HNAE3_LOOP_SERIAL_SERDES][0] = HNAE3_LOOP_SERIAL_SERDES; 309 st_param[HNAE3_LOOP_SERIAL_SERDES][1] = 310 h->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; 311 312 st_param[HNAE3_LOOP_PARALLEL_SERDES][0] = 313 HNAE3_LOOP_PARALLEL_SERDES; 314 st_param[HNAE3_LOOP_PARALLEL_SERDES][1] = 315 h->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; 316 317 if (if_running) 318 ndev->netdev_ops->ndo_stop(ndev); 319 320 #if IS_ENABLED(CONFIG_VLAN_8021Q) 321 /* Disable the vlan filter for selftest does not support it */ 322 dis_vlan_filter = (ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) && 323 h->ae_algo->ops->enable_vlan_filter; 324 if (dis_vlan_filter) 325 h->ae_algo->ops->enable_vlan_filter(h, false); 326 #endif 327 328 set_bit(HNS3_NIC_STATE_TESTING, &priv->state); 329 330 for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) { 331 enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0]; 332 333 if (!st_param[i][1]) 334 continue; 335 336 data[test_index] = hns3_lp_up(ndev, loop_type); 337 if (!data[test_index]) 338 data[test_index] = hns3_lp_run_test(ndev, loop_type); 339 340 hns3_lp_down(ndev, loop_type); 341 342 if (data[test_index]) 343 eth_test->flags |= ETH_TEST_FL_FAILED; 344 345 test_index++; 346 } 347 348 clear_bit(HNS3_NIC_STATE_TESTING, &priv->state); 349 350 #if IS_ENABLED(CONFIG_VLAN_8021Q) 351 if (dis_vlan_filter) 352 h->ae_algo->ops->enable_vlan_filter(h, true); 353 #endif 354 355 if (if_running) 356 ndev->netdev_ops->ndo_open(ndev); 357 } 358 359 static int hns3_get_sset_count(struct net_device *netdev, int stringset) 360 { 361 struct hnae3_handle *h = hns3_get_handle(netdev); 362 const struct hnae3_ae_ops *ops = h->ae_algo->ops; 363 364 if (!ops->get_sset_count) 365 return -EOPNOTSUPP; 366 367 switch (stringset) { 368 case ETH_SS_STATS: 369 return ((HNS3_TQP_STATS_COUNT * h->kinfo.num_tqps) + 370 ops->get_sset_count(h, stringset)); 371 372 case ETH_SS_TEST: 373 return ops->get_sset_count(h, stringset); 374 375 default: 376 return -EOPNOTSUPP; 377 } 378 } 379 380 static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, 381 u32 stat_count, u32 num_tqps, const char *prefix) 382 { 383 #define MAX_PREFIX_SIZE (6 + 4) 384 u32 size_left; 385 u32 i, j; 386 u32 n1; 387 388 for (i = 0; i < num_tqps; i++) { 389 for (j = 0; j < stat_count; j++) { 390 data[ETH_GSTRING_LEN - 1] = '\0'; 391 392 /* first, prepend the prefix string */ 393 n1 = snprintf(data, MAX_PREFIX_SIZE, "%s%d_", 394 prefix, i); 395 n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1); 396 size_left = (ETH_GSTRING_LEN - 1) - n1; 397 398 /* now, concatenate the stats string to it */ 399 strncat(data, stats[j].stats_string, size_left); 400 data += ETH_GSTRING_LEN; 401 } 402 } 403 404 return data; 405 } 406 407 static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data) 408 { 409 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 410 const char tx_prefix[] = "txq"; 411 const char rx_prefix[] = "rxq"; 412 413 /* get strings for Tx */ 414 data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT, 415 kinfo->num_tqps, tx_prefix); 416 417 /* get strings for Rx */ 418 data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT, 419 kinfo->num_tqps, rx_prefix); 420 421 return data; 422 } 423 424 static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 425 { 426 struct hnae3_handle *h = hns3_get_handle(netdev); 427 const struct hnae3_ae_ops *ops = h->ae_algo->ops; 428 char *buff = (char *)data; 429 430 if (!ops->get_strings) 431 return; 432 433 switch (stringset) { 434 case ETH_SS_STATS: 435 buff = hns3_get_strings_tqps(h, buff); 436 h->ae_algo->ops->get_strings(h, stringset, (u8 *)buff); 437 break; 438 case ETH_SS_TEST: 439 ops->get_strings(h, stringset, data); 440 break; 441 default: 442 break; 443 } 444 } 445 446 static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) 447 { 448 struct hns3_nic_priv *nic_priv = (struct hns3_nic_priv *)handle->priv; 449 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 450 struct hns3_enet_ring *ring; 451 u8 *stat; 452 int i, j; 453 454 /* get stats for Tx */ 455 for (i = 0; i < kinfo->num_tqps; i++) { 456 ring = nic_priv->ring_data[i].ring; 457 for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) { 458 stat = (u8 *)ring + hns3_txq_stats[j].stats_offset; 459 *data++ = *(u64 *)stat; 460 } 461 } 462 463 /* get stats for Rx */ 464 for (i = 0; i < kinfo->num_tqps; i++) { 465 ring = nic_priv->ring_data[i + kinfo->num_tqps].ring; 466 for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) { 467 stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset; 468 *data++ = *(u64 *)stat; 469 } 470 } 471 472 return data; 473 } 474 475 /* hns3_get_stats - get detail statistics. 476 * @netdev: net device 477 * @stats: statistics info. 478 * @data: statistics data. 479 */ 480 static void hns3_get_stats(struct net_device *netdev, 481 struct ethtool_stats *stats, u64 *data) 482 { 483 struct hnae3_handle *h = hns3_get_handle(netdev); 484 u64 *p = data; 485 486 if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) { 487 netdev_err(netdev, "could not get any statistics\n"); 488 return; 489 } 490 491 h->ae_algo->ops->update_stats(h, &netdev->stats); 492 493 /* get per-queue stats */ 494 p = hns3_get_stats_tqps(h, p); 495 496 /* get MAC & other misc hardware stats */ 497 h->ae_algo->ops->get_stats(h, p); 498 } 499 500 static void hns3_get_drvinfo(struct net_device *netdev, 501 struct ethtool_drvinfo *drvinfo) 502 { 503 struct hns3_nic_priv *priv = netdev_priv(netdev); 504 struct hnae3_handle *h = priv->ae_handle; 505 506 strncpy(drvinfo->version, hns3_driver_version, 507 sizeof(drvinfo->version)); 508 drvinfo->version[sizeof(drvinfo->version) - 1] = '\0'; 509 510 strncpy(drvinfo->driver, h->pdev->driver->name, 511 sizeof(drvinfo->driver)); 512 drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0'; 513 514 strncpy(drvinfo->bus_info, pci_name(h->pdev), 515 sizeof(drvinfo->bus_info)); 516 drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0'; 517 518 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", 519 priv->ae_handle->ae_algo->ops->get_fw_version(h)); 520 } 521 522 static u32 hns3_get_link(struct net_device *netdev) 523 { 524 struct hnae3_handle *h = hns3_get_handle(netdev); 525 526 if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_status) 527 return h->ae_algo->ops->get_status(h); 528 else 529 return 0; 530 } 531 532 static void hns3_get_ringparam(struct net_device *netdev, 533 struct ethtool_ringparam *param) 534 { 535 struct hns3_nic_priv *priv = netdev_priv(netdev); 536 struct hnae3_handle *h = priv->ae_handle; 537 int queue_num = h->kinfo.num_tqps; 538 539 if (hns3_nic_resetting(netdev)) { 540 netdev_err(netdev, "dev resetting!"); 541 return; 542 } 543 544 param->tx_max_pending = HNS3_RING_MAX_PENDING; 545 param->rx_max_pending = HNS3_RING_MAX_PENDING; 546 547 param->tx_pending = priv->ring_data[0].ring->desc_num; 548 param->rx_pending = priv->ring_data[queue_num].ring->desc_num; 549 } 550 551 static void hns3_get_pauseparam(struct net_device *netdev, 552 struct ethtool_pauseparam *param) 553 { 554 struct hnae3_handle *h = hns3_get_handle(netdev); 555 556 if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_pauseparam) 557 h->ae_algo->ops->get_pauseparam(h, ¶m->autoneg, 558 ¶m->rx_pause, ¶m->tx_pause); 559 } 560 561 static int hns3_set_pauseparam(struct net_device *netdev, 562 struct ethtool_pauseparam *param) 563 { 564 struct hnae3_handle *h = hns3_get_handle(netdev); 565 566 if (h->ae_algo->ops->set_pauseparam) 567 return h->ae_algo->ops->set_pauseparam(h, param->autoneg, 568 param->rx_pause, 569 param->tx_pause); 570 return -EOPNOTSUPP; 571 } 572 573 static void hns3_get_ksettings(struct hnae3_handle *h, 574 struct ethtool_link_ksettings *cmd) 575 { 576 const struct hnae3_ae_ops *ops = h->ae_algo->ops; 577 578 /* 1.auto_neg & speed & duplex from cmd */ 579 if (ops->get_ksettings_an_result) 580 ops->get_ksettings_an_result(h, 581 &cmd->base.autoneg, 582 &cmd->base.speed, 583 &cmd->base.duplex); 584 585 /* 2.get link mode*/ 586 if (ops->get_link_mode) 587 ops->get_link_mode(h, 588 cmd->link_modes.supported, 589 cmd->link_modes.advertising); 590 591 /* 3.mdix_ctrl&mdix get from phy reg */ 592 if (ops->get_mdix_mode) 593 ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl, 594 &cmd->base.eth_tp_mdix); 595 } 596 597 static int hns3_get_link_ksettings(struct net_device *netdev, 598 struct ethtool_link_ksettings *cmd) 599 { 600 struct hnae3_handle *h = hns3_get_handle(netdev); 601 const struct hnae3_ae_ops *ops; 602 u8 media_type; 603 u8 link_stat; 604 605 if (!h->ae_algo || !h->ae_algo->ops) 606 return -EOPNOTSUPP; 607 608 ops = h->ae_algo->ops; 609 if (ops->get_media_type) 610 ops->get_media_type(h, &media_type); 611 else 612 return -EOPNOTSUPP; 613 614 switch (media_type) { 615 case HNAE3_MEDIA_TYPE_NONE: 616 cmd->base.port = PORT_NONE; 617 hns3_get_ksettings(h, cmd); 618 break; 619 case HNAE3_MEDIA_TYPE_FIBER: 620 cmd->base.port = PORT_FIBRE; 621 hns3_get_ksettings(h, cmd); 622 break; 623 case HNAE3_MEDIA_TYPE_COPPER: 624 cmd->base.port = PORT_TP; 625 if (!netdev->phydev) 626 hns3_get_ksettings(h, cmd); 627 else 628 phy_ethtool_ksettings_get(netdev->phydev, cmd); 629 break; 630 default: 631 632 netdev_warn(netdev, "Unknown media type"); 633 return 0; 634 } 635 636 /* mdio_support */ 637 cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; 638 639 link_stat = hns3_get_link(netdev); 640 if (!link_stat) { 641 cmd->base.speed = SPEED_UNKNOWN; 642 cmd->base.duplex = DUPLEX_UNKNOWN; 643 } 644 645 return 0; 646 } 647 648 static int hns3_set_link_ksettings(struct net_device *netdev, 649 const struct ethtool_link_ksettings *cmd) 650 { 651 /* Only support ksettings_set for netdev with phy attached for now */ 652 if (netdev->phydev) 653 return phy_ethtool_ksettings_set(netdev->phydev, cmd); 654 655 return -EOPNOTSUPP; 656 } 657 658 static u32 hns3_get_rss_key_size(struct net_device *netdev) 659 { 660 struct hnae3_handle *h = hns3_get_handle(netdev); 661 662 if (!h->ae_algo || !h->ae_algo->ops || 663 !h->ae_algo->ops->get_rss_key_size) 664 return 0; 665 666 return h->ae_algo->ops->get_rss_key_size(h); 667 } 668 669 static u32 hns3_get_rss_indir_size(struct net_device *netdev) 670 { 671 struct hnae3_handle *h = hns3_get_handle(netdev); 672 673 if (!h->ae_algo || !h->ae_algo->ops || 674 !h->ae_algo->ops->get_rss_indir_size) 675 return 0; 676 677 return h->ae_algo->ops->get_rss_indir_size(h); 678 } 679 680 static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key, 681 u8 *hfunc) 682 { 683 struct hnae3_handle *h = hns3_get_handle(netdev); 684 685 if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss) 686 return -EOPNOTSUPP; 687 688 return h->ae_algo->ops->get_rss(h, indir, key, hfunc); 689 } 690 691 static int hns3_set_rss(struct net_device *netdev, const u32 *indir, 692 const u8 *key, const u8 hfunc) 693 { 694 struct hnae3_handle *h = hns3_get_handle(netdev); 695 696 if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss) 697 return -EOPNOTSUPP; 698 699 if ((h->pdev->revision == 0x20 && 700 hfunc != ETH_RSS_HASH_TOP) || (hfunc != ETH_RSS_HASH_NO_CHANGE && 701 hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)) { 702 netdev_err(netdev, "hash func not supported\n"); 703 return -EOPNOTSUPP; 704 } 705 706 if (!indir) { 707 netdev_err(netdev, 708 "set rss failed for indir is empty\n"); 709 return -EOPNOTSUPP; 710 } 711 712 return h->ae_algo->ops->set_rss(h, indir, key, hfunc); 713 } 714 715 static int hns3_get_rxnfc(struct net_device *netdev, 716 struct ethtool_rxnfc *cmd, 717 u32 *rule_locs) 718 { 719 struct hnae3_handle *h = hns3_get_handle(netdev); 720 721 if (!h->ae_algo || !h->ae_algo->ops) 722 return -EOPNOTSUPP; 723 724 switch (cmd->cmd) { 725 case ETHTOOL_GRXRINGS: 726 cmd->data = h->kinfo.num_tqps; 727 return 0; 728 case ETHTOOL_GRXFH: 729 if (h->ae_algo->ops->get_rss_tuple) 730 return h->ae_algo->ops->get_rss_tuple(h, cmd); 731 return -EOPNOTSUPP; 732 case ETHTOOL_GRXCLSRLCNT: 733 if (h->ae_algo->ops->get_fd_rule_cnt) 734 return h->ae_algo->ops->get_fd_rule_cnt(h, cmd); 735 return -EOPNOTSUPP; 736 case ETHTOOL_GRXCLSRULE: 737 if (h->ae_algo->ops->get_fd_rule_info) 738 return h->ae_algo->ops->get_fd_rule_info(h, cmd); 739 return -EOPNOTSUPP; 740 case ETHTOOL_GRXCLSRLALL: 741 if (h->ae_algo->ops->get_fd_all_rules) 742 return h->ae_algo->ops->get_fd_all_rules(h, cmd, 743 rule_locs); 744 return -EOPNOTSUPP; 745 default: 746 return -EOPNOTSUPP; 747 } 748 } 749 750 static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, 751 u32 tx_desc_num, u32 rx_desc_num) 752 { 753 struct hnae3_handle *h = priv->ae_handle; 754 int i; 755 756 h->kinfo.num_tx_desc = tx_desc_num; 757 h->kinfo.num_rx_desc = rx_desc_num; 758 759 for (i = 0; i < h->kinfo.num_tqps; i++) { 760 priv->ring_data[i].ring->desc_num = tx_desc_num; 761 priv->ring_data[i + h->kinfo.num_tqps].ring->desc_num = 762 rx_desc_num; 763 } 764 765 return hns3_init_all_ring(priv); 766 } 767 768 static int hns3_set_ringparam(struct net_device *ndev, 769 struct ethtool_ringparam *param) 770 { 771 struct hns3_nic_priv *priv = netdev_priv(ndev); 772 struct hnae3_handle *h = priv->ae_handle; 773 bool if_running = netif_running(ndev); 774 u32 old_tx_desc_num, new_tx_desc_num; 775 u32 old_rx_desc_num, new_rx_desc_num; 776 int queue_num = h->kinfo.num_tqps; 777 int ret; 778 779 if (hns3_nic_resetting(ndev)) 780 return -EBUSY; 781 782 if (param->rx_mini_pending || param->rx_jumbo_pending) 783 return -EINVAL; 784 785 if (param->tx_pending > HNS3_RING_MAX_PENDING || 786 param->tx_pending < HNS3_RING_MIN_PENDING || 787 param->rx_pending > HNS3_RING_MAX_PENDING || 788 param->rx_pending < HNS3_RING_MIN_PENDING) { 789 netdev_err(ndev, "Queue depth out of range [%d-%d]\n", 790 HNS3_RING_MIN_PENDING, HNS3_RING_MAX_PENDING); 791 return -EINVAL; 792 } 793 794 /* Hardware requires that its descriptors must be multiple of eight */ 795 new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE); 796 new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE); 797 old_tx_desc_num = priv->ring_data[0].ring->desc_num; 798 old_rx_desc_num = priv->ring_data[queue_num].ring->desc_num; 799 if (old_tx_desc_num == new_tx_desc_num && 800 old_rx_desc_num == new_rx_desc_num) 801 return 0; 802 803 netdev_info(ndev, 804 "Changing Tx/Rx ring depth from %d/%d to %d/%d\n", 805 old_tx_desc_num, old_rx_desc_num, 806 new_tx_desc_num, new_rx_desc_num); 807 808 if (if_running) 809 ndev->netdev_ops->ndo_stop(ndev); 810 811 ret = hns3_uninit_all_ring(priv); 812 if (ret) 813 return ret; 814 815 ret = hns3_change_all_ring_bd_num(priv, new_tx_desc_num, 816 new_rx_desc_num); 817 if (ret) { 818 ret = hns3_change_all_ring_bd_num(priv, old_tx_desc_num, 819 old_rx_desc_num); 820 if (ret) { 821 netdev_err(ndev, 822 "Revert to old bd num fail, ret=%d.\n", ret); 823 return ret; 824 } 825 } 826 827 if (if_running) 828 ret = ndev->netdev_ops->ndo_open(ndev); 829 830 return ret; 831 } 832 833 static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) 834 { 835 struct hnae3_handle *h = hns3_get_handle(netdev); 836 837 if (!h->ae_algo || !h->ae_algo->ops) 838 return -EOPNOTSUPP; 839 840 switch (cmd->cmd) { 841 case ETHTOOL_SRXFH: 842 if (h->ae_algo->ops->set_rss_tuple) 843 return h->ae_algo->ops->set_rss_tuple(h, cmd); 844 return -EOPNOTSUPP; 845 case ETHTOOL_SRXCLSRLINS: 846 if (h->ae_algo->ops->add_fd_entry) 847 return h->ae_algo->ops->add_fd_entry(h, cmd); 848 return -EOPNOTSUPP; 849 case ETHTOOL_SRXCLSRLDEL: 850 if (h->ae_algo->ops->del_fd_entry) 851 return h->ae_algo->ops->del_fd_entry(h, cmd); 852 return -EOPNOTSUPP; 853 default: 854 return -EOPNOTSUPP; 855 } 856 } 857 858 static int hns3_nway_reset(struct net_device *netdev) 859 { 860 struct phy_device *phy = netdev->phydev; 861 862 if (!netif_running(netdev)) 863 return 0; 864 865 /* Only support nway_reset for netdev with phy attached for now */ 866 if (!phy) 867 return -EOPNOTSUPP; 868 869 if (phy->autoneg != AUTONEG_ENABLE) 870 return -EINVAL; 871 872 return genphy_restart_aneg(phy); 873 } 874 875 static void hns3_get_channels(struct net_device *netdev, 876 struct ethtool_channels *ch) 877 { 878 struct hnae3_handle *h = hns3_get_handle(netdev); 879 880 if (h->ae_algo->ops->get_channels) 881 h->ae_algo->ops->get_channels(h, ch); 882 } 883 884 static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue, 885 struct ethtool_coalesce *cmd) 886 { 887 struct hns3_enet_tqp_vector *tx_vector, *rx_vector; 888 struct hns3_nic_priv *priv = netdev_priv(netdev); 889 struct hnae3_handle *h = priv->ae_handle; 890 u16 queue_num = h->kinfo.num_tqps; 891 892 if (hns3_nic_resetting(netdev)) 893 return -EBUSY; 894 895 if (queue >= queue_num) { 896 netdev_err(netdev, 897 "Invalid queue value %d! Queue max id=%d\n", 898 queue, queue_num - 1); 899 return -EINVAL; 900 } 901 902 tx_vector = priv->ring_data[queue].ring->tqp_vector; 903 rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector; 904 905 cmd->use_adaptive_tx_coalesce = 906 tx_vector->tx_group.coal.gl_adapt_enable; 907 cmd->use_adaptive_rx_coalesce = 908 rx_vector->rx_group.coal.gl_adapt_enable; 909 910 cmd->tx_coalesce_usecs = tx_vector->tx_group.coal.int_gl; 911 cmd->rx_coalesce_usecs = rx_vector->rx_group.coal.int_gl; 912 913 cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting; 914 cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting; 915 916 return 0; 917 } 918 919 static int hns3_get_coalesce(struct net_device *netdev, 920 struct ethtool_coalesce *cmd) 921 { 922 return hns3_get_coalesce_per_queue(netdev, 0, cmd); 923 } 924 925 static int hns3_check_gl_coalesce_para(struct net_device *netdev, 926 struct ethtool_coalesce *cmd) 927 { 928 u32 rx_gl, tx_gl; 929 930 if (cmd->rx_coalesce_usecs > HNS3_INT_GL_MAX) { 931 netdev_err(netdev, 932 "Invalid rx-usecs value, rx-usecs range is 0-%d\n", 933 HNS3_INT_GL_MAX); 934 return -EINVAL; 935 } 936 937 if (cmd->tx_coalesce_usecs > HNS3_INT_GL_MAX) { 938 netdev_err(netdev, 939 "Invalid tx-usecs value, tx-usecs range is 0-%d\n", 940 HNS3_INT_GL_MAX); 941 return -EINVAL; 942 } 943 944 rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs); 945 if (rx_gl != cmd->rx_coalesce_usecs) { 946 netdev_info(netdev, 947 "rx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n", 948 cmd->rx_coalesce_usecs, rx_gl); 949 } 950 951 tx_gl = hns3_gl_round_down(cmd->tx_coalesce_usecs); 952 if (tx_gl != cmd->tx_coalesce_usecs) { 953 netdev_info(netdev, 954 "tx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n", 955 cmd->tx_coalesce_usecs, tx_gl); 956 } 957 958 return 0; 959 } 960 961 static int hns3_check_rl_coalesce_para(struct net_device *netdev, 962 struct ethtool_coalesce *cmd) 963 { 964 u32 rl; 965 966 if (cmd->tx_coalesce_usecs_high != cmd->rx_coalesce_usecs_high) { 967 netdev_err(netdev, 968 "tx_usecs_high must be same as rx_usecs_high.\n"); 969 return -EINVAL; 970 } 971 972 if (cmd->rx_coalesce_usecs_high > HNS3_INT_RL_MAX) { 973 netdev_err(netdev, 974 "Invalid usecs_high value, usecs_high range is 0-%d\n", 975 HNS3_INT_RL_MAX); 976 return -EINVAL; 977 } 978 979 rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high); 980 if (rl != cmd->rx_coalesce_usecs_high) { 981 netdev_info(netdev, 982 "usecs_high(%d) rounded down to %d, because it must be multiple of 4.\n", 983 cmd->rx_coalesce_usecs_high, rl); 984 } 985 986 return 0; 987 } 988 989 static int hns3_check_coalesce_para(struct net_device *netdev, 990 struct ethtool_coalesce *cmd) 991 { 992 int ret; 993 994 ret = hns3_check_gl_coalesce_para(netdev, cmd); 995 if (ret) { 996 netdev_err(netdev, 997 "Check gl coalesce param fail. ret = %d\n", ret); 998 return ret; 999 } 1000 1001 ret = hns3_check_rl_coalesce_para(netdev, cmd); 1002 if (ret) { 1003 netdev_err(netdev, 1004 "Check rl coalesce param fail. ret = %d\n", ret); 1005 return ret; 1006 } 1007 1008 if (cmd->use_adaptive_tx_coalesce == 1 || 1009 cmd->use_adaptive_rx_coalesce == 1) { 1010 netdev_info(netdev, 1011 "adaptive-tx=%d and adaptive-rx=%d, tx_usecs or rx_usecs will changed dynamically.\n", 1012 cmd->use_adaptive_tx_coalesce, 1013 cmd->use_adaptive_rx_coalesce); 1014 } 1015 1016 return 0; 1017 } 1018 1019 static void hns3_set_coalesce_per_queue(struct net_device *netdev, 1020 struct ethtool_coalesce *cmd, 1021 u32 queue) 1022 { 1023 struct hns3_enet_tqp_vector *tx_vector, *rx_vector; 1024 struct hns3_nic_priv *priv = netdev_priv(netdev); 1025 struct hnae3_handle *h = priv->ae_handle; 1026 int queue_num = h->kinfo.num_tqps; 1027 1028 tx_vector = priv->ring_data[queue].ring->tqp_vector; 1029 rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector; 1030 1031 tx_vector->tx_group.coal.gl_adapt_enable = 1032 cmd->use_adaptive_tx_coalesce; 1033 rx_vector->rx_group.coal.gl_adapt_enable = 1034 cmd->use_adaptive_rx_coalesce; 1035 1036 tx_vector->tx_group.coal.int_gl = cmd->tx_coalesce_usecs; 1037 rx_vector->rx_group.coal.int_gl = cmd->rx_coalesce_usecs; 1038 1039 hns3_set_vector_coalesce_tx_gl(tx_vector, 1040 tx_vector->tx_group.coal.int_gl); 1041 hns3_set_vector_coalesce_rx_gl(rx_vector, 1042 rx_vector->rx_group.coal.int_gl); 1043 1044 hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting); 1045 hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting); 1046 } 1047 1048 static int hns3_set_coalesce(struct net_device *netdev, 1049 struct ethtool_coalesce *cmd) 1050 { 1051 struct hnae3_handle *h = hns3_get_handle(netdev); 1052 u16 queue_num = h->kinfo.num_tqps; 1053 int ret; 1054 int i; 1055 1056 if (hns3_nic_resetting(netdev)) 1057 return -EBUSY; 1058 1059 ret = hns3_check_coalesce_para(netdev, cmd); 1060 if (ret) 1061 return ret; 1062 1063 h->kinfo.int_rl_setting = 1064 hns3_rl_round_down(cmd->rx_coalesce_usecs_high); 1065 1066 for (i = 0; i < queue_num; i++) 1067 hns3_set_coalesce_per_queue(netdev, cmd, i); 1068 1069 return 0; 1070 } 1071 1072 static int hns3_get_regs_len(struct net_device *netdev) 1073 { 1074 struct hnae3_handle *h = hns3_get_handle(netdev); 1075 1076 if (!h->ae_algo->ops->get_regs_len) 1077 return -EOPNOTSUPP; 1078 1079 return h->ae_algo->ops->get_regs_len(h); 1080 } 1081 1082 static void hns3_get_regs(struct net_device *netdev, 1083 struct ethtool_regs *cmd, void *data) 1084 { 1085 struct hnae3_handle *h = hns3_get_handle(netdev); 1086 1087 if (!h->ae_algo->ops->get_regs) 1088 return; 1089 1090 h->ae_algo->ops->get_regs(h, &cmd->version, data); 1091 } 1092 1093 static int hns3_set_phys_id(struct net_device *netdev, 1094 enum ethtool_phys_id_state state) 1095 { 1096 struct hnae3_handle *h = hns3_get_handle(netdev); 1097 1098 if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_led_id) 1099 return -EOPNOTSUPP; 1100 1101 return h->ae_algo->ops->set_led_id(h, state); 1102 } 1103 1104 static const struct ethtool_ops hns3vf_ethtool_ops = { 1105 .get_drvinfo = hns3_get_drvinfo, 1106 .get_ringparam = hns3_get_ringparam, 1107 .set_ringparam = hns3_set_ringparam, 1108 .get_strings = hns3_get_strings, 1109 .get_ethtool_stats = hns3_get_stats, 1110 .get_sset_count = hns3_get_sset_count, 1111 .get_rxnfc = hns3_get_rxnfc, 1112 .set_rxnfc = hns3_set_rxnfc, 1113 .get_rxfh_key_size = hns3_get_rss_key_size, 1114 .get_rxfh_indir_size = hns3_get_rss_indir_size, 1115 .get_rxfh = hns3_get_rss, 1116 .set_rxfh = hns3_set_rss, 1117 .get_link_ksettings = hns3_get_link_ksettings, 1118 .get_channels = hns3_get_channels, 1119 .get_coalesce = hns3_get_coalesce, 1120 .set_coalesce = hns3_set_coalesce, 1121 .get_regs_len = hns3_get_regs_len, 1122 .get_regs = hns3_get_regs, 1123 .get_link = hns3_get_link, 1124 }; 1125 1126 static const struct ethtool_ops hns3_ethtool_ops = { 1127 .self_test = hns3_self_test, 1128 .get_drvinfo = hns3_get_drvinfo, 1129 .get_link = hns3_get_link, 1130 .get_ringparam = hns3_get_ringparam, 1131 .set_ringparam = hns3_set_ringparam, 1132 .get_pauseparam = hns3_get_pauseparam, 1133 .set_pauseparam = hns3_set_pauseparam, 1134 .get_strings = hns3_get_strings, 1135 .get_ethtool_stats = hns3_get_stats, 1136 .get_sset_count = hns3_get_sset_count, 1137 .get_rxnfc = hns3_get_rxnfc, 1138 .set_rxnfc = hns3_set_rxnfc, 1139 .get_rxfh_key_size = hns3_get_rss_key_size, 1140 .get_rxfh_indir_size = hns3_get_rss_indir_size, 1141 .get_rxfh = hns3_get_rss, 1142 .set_rxfh = hns3_set_rss, 1143 .get_link_ksettings = hns3_get_link_ksettings, 1144 .set_link_ksettings = hns3_set_link_ksettings, 1145 .nway_reset = hns3_nway_reset, 1146 .get_channels = hns3_get_channels, 1147 .set_channels = hns3_set_channels, 1148 .get_coalesce = hns3_get_coalesce, 1149 .set_coalesce = hns3_set_coalesce, 1150 .get_regs_len = hns3_get_regs_len, 1151 .get_regs = hns3_get_regs, 1152 .set_phys_id = hns3_set_phys_id, 1153 }; 1154 1155 void hns3_ethtool_set_ops(struct net_device *netdev) 1156 { 1157 struct hnae3_handle *h = hns3_get_handle(netdev); 1158 1159 if (h->flags & HNAE3_SUPPORT_VF) 1160 netdev->ethtool_ops = &hns3vf_ethtool_ops; 1161 else 1162 netdev->ethtool_ops = &hns3_ethtool_ops; 1163 } 1164