1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/string.h> 6 #include <linux/phy.h> 7 8 #include "hns3_enet.h" 9 10 struct hns3_stats { 11 char stats_string[ETH_GSTRING_LEN]; 12 int stats_offset; 13 }; 14 15 /* tqp related stats */ 16 #define HNS3_TQP_STAT(_string, _member) { \ 17 .stats_string = _string, \ 18 .stats_offset = offsetof(struct hns3_enet_ring, stats) +\ 19 offsetof(struct ring_stats, _member), \ 20 } 21 22 static const struct hns3_stats hns3_txq_stats[] = { 23 /* Tx per-queue statistics */ 24 HNS3_TQP_STAT("io_err_cnt", io_err_cnt), 25 HNS3_TQP_STAT("tx_dropped", sw_err_cnt), 26 HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt), 27 HNS3_TQP_STAT("packets", tx_pkts), 28 HNS3_TQP_STAT("bytes", tx_bytes), 29 HNS3_TQP_STAT("errors", tx_err_cnt), 30 HNS3_TQP_STAT("tx_wake", restart_queue), 31 HNS3_TQP_STAT("tx_busy", tx_busy), 32 }; 33 34 #define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats) 35 36 static const struct hns3_stats hns3_rxq_stats[] = { 37 /* Rx per-queue statistics */ 38 HNS3_TQP_STAT("io_err_cnt", io_err_cnt), 39 HNS3_TQP_STAT("rx_dropped", sw_err_cnt), 40 HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt), 41 HNS3_TQP_STAT("packets", rx_pkts), 42 HNS3_TQP_STAT("bytes", rx_bytes), 43 HNS3_TQP_STAT("errors", rx_err_cnt), 44 HNS3_TQP_STAT("reuse_pg_cnt", reuse_pg_cnt), 45 HNS3_TQP_STAT("err_pkt_len", err_pkt_len), 46 HNS3_TQP_STAT("non_vld_descs", non_vld_descs), 47 HNS3_TQP_STAT("err_bd_num", err_bd_num), 48 HNS3_TQP_STAT("l2_err", l2_err), 49 HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err), 50 }; 51 52 #define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats) 53 54 #define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT) 55 56 #define HNS3_SELF_TEST_TYPE_NUM 2 57 #define HNS3_NIC_LB_TEST_PKT_NUM 1 58 #define HNS3_NIC_LB_TEST_RING_ID 0 59 #define HNS3_NIC_LB_TEST_PACKET_SIZE 128 60 61 /* Nic loopback test err */ 62 #define HNS3_NIC_LB_TEST_NO_MEM_ERR 1 63 #define HNS3_NIC_LB_TEST_TX_CNT_ERR 2 64 #define HNS3_NIC_LB_TEST_RX_CNT_ERR 3 65 66 struct hns3_link_mode_mapping { 67 u32 hns3_link_mode; 68 u32 ethtool_link_mode; 69 }; 70 71 static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) 72 { 73 struct hnae3_handle *h = hns3_get_handle(ndev); 74 int ret; 75 76 if (!h->ae_algo->ops->set_loopback || 77 !h->ae_algo->ops->set_promisc_mode) 78 return -EOPNOTSUPP; 79 80 switch (loop) { 81 case HNAE3_MAC_INTER_LOOP_SERDES: 82 case HNAE3_MAC_INTER_LOOP_MAC: 83 ret = h->ae_algo->ops->set_loopback(h, loop, en); 84 break; 85 default: 86 ret = -ENOTSUPP; 87 break; 88 } 89 90 if (ret) 91 return ret; 92 93 h->ae_algo->ops->set_promisc_mode(h, en, en); 94 95 return ret; 96 } 97 98 static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) 99 { 100 struct hnae3_handle *h = hns3_get_handle(ndev); 101 int ret; 102 103 ret = hns3_nic_reset_all_ring(h); 104 if (ret) 105 return ret; 106 107 ret = hns3_lp_setup(ndev, loop_mode, true); 108 usleep_range(10000, 20000); 109 110 return 0; 111 } 112 113 static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode) 114 { 115 int ret; 116 117 ret = hns3_lp_setup(ndev, loop_mode, false); 118 if (ret) { 119 netdev_err(ndev, "lb_setup return error: %d\n", ret); 120 return ret; 121 } 122 123 usleep_range(10000, 20000); 124 125 return 0; 126 } 127 128 static void hns3_lp_setup_skb(struct sk_buff *skb) 129 { 130 struct net_device *ndev = skb->dev; 131 unsigned char *packet; 132 struct ethhdr *ethh; 133 unsigned int i; 134 135 skb_reserve(skb, NET_IP_ALIGN); 136 ethh = skb_put(skb, sizeof(struct ethhdr)); 137 packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE); 138 139 memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN); 140 ethh->h_dest[5] += 0x1f; 141 eth_zero_addr(ethh->h_source); 142 ethh->h_proto = htons(ETH_P_ARP); 143 skb_reset_mac_header(skb); 144 145 for (i = 0; i < HNS3_NIC_LB_TEST_PACKET_SIZE; i++) 146 packet[i] = (unsigned char)(i & 0xff); 147 } 148 149 static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring, 150 struct sk_buff *skb) 151 { 152 struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector; 153 unsigned char *packet = skb->data; 154 u32 i; 155 156 for (i = 0; i < skb->len; i++) 157 if (packet[i] != (unsigned char)(i & 0xff)) 158 break; 159 160 /* The packet is correctly received */ 161 if (i == skb->len) 162 tqp_vector->rx_group.total_packets++; 163 else 164 print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1, 165 skb->data, skb->len, true); 166 167 dev_kfree_skb_any(skb); 168 } 169 170 static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget) 171 { 172 struct hnae3_handle *h = priv->ae_handle; 173 struct hnae3_knic_private_info *kinfo; 174 u32 i, rcv_good_pkt_total = 0; 175 176 kinfo = &h->kinfo; 177 for (i = kinfo->num_tqps; i < kinfo->num_tqps * 2; i++) { 178 struct hns3_enet_ring *ring = priv->ring_data[i].ring; 179 struct hns3_enet_ring_group *rx_group; 180 u64 pre_rx_pkt; 181 182 rx_group = &ring->tqp_vector->rx_group; 183 pre_rx_pkt = rx_group->total_packets; 184 185 preempt_disable(); 186 hns3_clean_rx_ring(ring, budget, hns3_lb_check_skb_data); 187 preempt_enable(); 188 189 rcv_good_pkt_total += (rx_group->total_packets - pre_rx_pkt); 190 rx_group->total_packets = pre_rx_pkt; 191 } 192 return rcv_good_pkt_total; 193 } 194 195 static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid, 196 u32 end_ringid, u32 budget) 197 { 198 u32 i; 199 200 for (i = start_ringid; i <= end_ringid; i++) { 201 struct hns3_enet_ring *ring = priv->ring_data[i].ring; 202 203 hns3_clean_tx_ring(ring, budget); 204 } 205 } 206 207 /** 208 * hns3_lp_run_test - run loopback test 209 * @ndev: net device 210 * @mode: loopback type 211 */ 212 static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode) 213 { 214 struct hns3_nic_priv *priv = netdev_priv(ndev); 215 struct sk_buff *skb; 216 u32 i, good_cnt; 217 int ret_val = 0; 218 219 skb = alloc_skb(HNS3_NIC_LB_TEST_PACKET_SIZE + ETH_HLEN + NET_IP_ALIGN, 220 GFP_KERNEL); 221 if (!skb) 222 return HNS3_NIC_LB_TEST_NO_MEM_ERR; 223 224 skb->dev = ndev; 225 hns3_lp_setup_skb(skb); 226 skb->queue_mapping = HNS3_NIC_LB_TEST_RING_ID; 227 228 good_cnt = 0; 229 for (i = 0; i < HNS3_NIC_LB_TEST_PKT_NUM; i++) { 230 netdev_tx_t tx_ret; 231 232 skb_get(skb); 233 tx_ret = hns3_nic_net_xmit(skb, ndev); 234 if (tx_ret == NETDEV_TX_OK) 235 good_cnt++; 236 else 237 netdev_err(ndev, "hns3_lb_run_test xmit failed: %d\n", 238 tx_ret); 239 } 240 if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) { 241 ret_val = HNS3_NIC_LB_TEST_TX_CNT_ERR; 242 netdev_err(ndev, "mode %d sent fail, cnt=0x%x, budget=0x%x\n", 243 mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM); 244 goto out; 245 } 246 247 /* Allow 200 milliseconds for packets to go from Tx to Rx */ 248 msleep(200); 249 250 good_cnt = hns3_lb_check_rx_ring(priv, HNS3_NIC_LB_TEST_PKT_NUM); 251 if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) { 252 ret_val = HNS3_NIC_LB_TEST_RX_CNT_ERR; 253 netdev_err(ndev, "mode %d recv fail, cnt=0x%x, budget=0x%x\n", 254 mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM); 255 } 256 257 out: 258 hns3_lb_clear_tx_ring(priv, HNS3_NIC_LB_TEST_RING_ID, 259 HNS3_NIC_LB_TEST_RING_ID, 260 HNS3_NIC_LB_TEST_PKT_NUM); 261 262 kfree_skb(skb); 263 return ret_val; 264 } 265 266 /** 267 * hns3_nic_self_test - self test 268 * @ndev: net device 269 * @eth_test: test cmd 270 * @data: test result 271 */ 272 static void hns3_self_test(struct net_device *ndev, 273 struct ethtool_test *eth_test, u64 *data) 274 { 275 struct hns3_nic_priv *priv = netdev_priv(ndev); 276 struct hnae3_handle *h = priv->ae_handle; 277 int st_param[HNS3_SELF_TEST_TYPE_NUM][2]; 278 bool if_running = netif_running(ndev); 279 #if IS_ENABLED(CONFIG_VLAN_8021Q) 280 bool dis_vlan_filter; 281 #endif 282 int test_index = 0; 283 u32 i; 284 285 /* Only do offline selftest, or pass by default */ 286 if (eth_test->flags != ETH_TEST_FL_OFFLINE) 287 return; 288 289 st_param[HNAE3_MAC_INTER_LOOP_MAC][0] = HNAE3_MAC_INTER_LOOP_MAC; 290 st_param[HNAE3_MAC_INTER_LOOP_MAC][1] = 291 h->flags & HNAE3_SUPPORT_MAC_LOOPBACK; 292 293 st_param[HNAE3_MAC_INTER_LOOP_SERDES][0] = HNAE3_MAC_INTER_LOOP_SERDES; 294 st_param[HNAE3_MAC_INTER_LOOP_SERDES][1] = 295 h->flags & HNAE3_SUPPORT_SERDES_LOOPBACK; 296 297 if (if_running) 298 ndev->netdev_ops->ndo_stop(ndev); 299 300 #if IS_ENABLED(CONFIG_VLAN_8021Q) 301 /* Disable the vlan filter for selftest does not support it */ 302 dis_vlan_filter = (ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) && 303 h->ae_algo->ops->enable_vlan_filter; 304 if (dis_vlan_filter) 305 h->ae_algo->ops->enable_vlan_filter(h, false); 306 #endif 307 308 set_bit(HNS3_NIC_STATE_TESTING, &priv->state); 309 310 for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) { 311 enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0]; 312 313 if (!st_param[i][1]) 314 continue; 315 316 data[test_index] = hns3_lp_up(ndev, loop_type); 317 if (!data[test_index]) { 318 data[test_index] = hns3_lp_run_test(ndev, loop_type); 319 hns3_lp_down(ndev, loop_type); 320 } 321 322 if (data[test_index]) 323 eth_test->flags |= ETH_TEST_FL_FAILED; 324 325 test_index++; 326 } 327 328 clear_bit(HNS3_NIC_STATE_TESTING, &priv->state); 329 330 #if IS_ENABLED(CONFIG_VLAN_8021Q) 331 if (dis_vlan_filter) 332 h->ae_algo->ops->enable_vlan_filter(h, true); 333 #endif 334 335 if (if_running) 336 ndev->netdev_ops->ndo_open(ndev); 337 } 338 339 static int hns3_get_sset_count(struct net_device *netdev, int stringset) 340 { 341 struct hnae3_handle *h = hns3_get_handle(netdev); 342 const struct hnae3_ae_ops *ops = h->ae_algo->ops; 343 344 if (!ops->get_sset_count) 345 return -EOPNOTSUPP; 346 347 switch (stringset) { 348 case ETH_SS_STATS: 349 return ((HNS3_TQP_STATS_COUNT * h->kinfo.num_tqps) + 350 ops->get_sset_count(h, stringset)); 351 352 case ETH_SS_TEST: 353 return ops->get_sset_count(h, stringset); 354 } 355 356 return 0; 357 } 358 359 static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, 360 u32 stat_count, u32 num_tqps, const char *prefix) 361 { 362 #define MAX_PREFIX_SIZE (6 + 4) 363 u32 size_left; 364 u32 i, j; 365 u32 n1; 366 367 for (i = 0; i < num_tqps; i++) { 368 for (j = 0; j < stat_count; j++) { 369 data[ETH_GSTRING_LEN - 1] = '\0'; 370 371 /* first, prepend the prefix string */ 372 n1 = snprintf(data, MAX_PREFIX_SIZE, "%s#%d_", 373 prefix, i); 374 n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1); 375 size_left = (ETH_GSTRING_LEN - 1) - n1; 376 377 /* now, concatenate the stats string to it */ 378 strncat(data, stats[j].stats_string, size_left); 379 data += ETH_GSTRING_LEN; 380 } 381 } 382 383 return data; 384 } 385 386 static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data) 387 { 388 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 389 const char tx_prefix[] = "txq"; 390 const char rx_prefix[] = "rxq"; 391 392 /* get strings for Tx */ 393 data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT, 394 kinfo->num_tqps, tx_prefix); 395 396 /* get strings for Rx */ 397 data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT, 398 kinfo->num_tqps, rx_prefix); 399 400 return data; 401 } 402 403 static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 404 { 405 struct hnae3_handle *h = hns3_get_handle(netdev); 406 const struct hnae3_ae_ops *ops = h->ae_algo->ops; 407 char *buff = (char *)data; 408 409 if (!ops->get_strings) 410 return; 411 412 switch (stringset) { 413 case ETH_SS_STATS: 414 buff = hns3_get_strings_tqps(h, buff); 415 h->ae_algo->ops->get_strings(h, stringset, (u8 *)buff); 416 break; 417 case ETH_SS_TEST: 418 ops->get_strings(h, stringset, data); 419 break; 420 } 421 } 422 423 static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) 424 { 425 struct hns3_nic_priv *nic_priv = (struct hns3_nic_priv *)handle->priv; 426 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 427 struct hns3_enet_ring *ring; 428 u8 *stat; 429 int i, j; 430 431 /* get stats for Tx */ 432 for (i = 0; i < kinfo->num_tqps; i++) { 433 ring = nic_priv->ring_data[i].ring; 434 for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) { 435 stat = (u8 *)ring + hns3_txq_stats[j].stats_offset; 436 *data++ = *(u64 *)stat; 437 } 438 } 439 440 /* get stats for Rx */ 441 for (i = 0; i < kinfo->num_tqps; i++) { 442 ring = nic_priv->ring_data[i + kinfo->num_tqps].ring; 443 for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) { 444 stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset; 445 *data++ = *(u64 *)stat; 446 } 447 } 448 449 return data; 450 } 451 452 /* hns3_get_stats - get detail statistics. 453 * @netdev: net device 454 * @stats: statistics info. 455 * @data: statistics data. 456 */ 457 static void hns3_get_stats(struct net_device *netdev, 458 struct ethtool_stats *stats, u64 *data) 459 { 460 struct hnae3_handle *h = hns3_get_handle(netdev); 461 u64 *p = data; 462 463 if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) { 464 netdev_err(netdev, "could not get any statistics\n"); 465 return; 466 } 467 468 h->ae_algo->ops->update_stats(h, &netdev->stats); 469 470 /* get per-queue stats */ 471 p = hns3_get_stats_tqps(h, p); 472 473 /* get MAC & other misc hardware stats */ 474 h->ae_algo->ops->get_stats(h, p); 475 } 476 477 static void hns3_get_drvinfo(struct net_device *netdev, 478 struct ethtool_drvinfo *drvinfo) 479 { 480 struct hns3_nic_priv *priv = netdev_priv(netdev); 481 struct hnae3_handle *h = priv->ae_handle; 482 483 strncpy(drvinfo->version, hns3_driver_version, 484 sizeof(drvinfo->version)); 485 drvinfo->version[sizeof(drvinfo->version) - 1] = '\0'; 486 487 strncpy(drvinfo->driver, h->pdev->driver->name, 488 sizeof(drvinfo->driver)); 489 drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0'; 490 491 strncpy(drvinfo->bus_info, pci_name(h->pdev), 492 sizeof(drvinfo->bus_info)); 493 drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0'; 494 495 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", 496 priv->ae_handle->ae_algo->ops->get_fw_version(h)); 497 } 498 499 static u32 hns3_get_link(struct net_device *netdev) 500 { 501 struct hnae3_handle *h = hns3_get_handle(netdev); 502 503 if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_status) 504 return h->ae_algo->ops->get_status(h); 505 else 506 return 0; 507 } 508 509 static void hns3_get_ringparam(struct net_device *netdev, 510 struct ethtool_ringparam *param) 511 { 512 struct hns3_nic_priv *priv = netdev_priv(netdev); 513 struct hnae3_handle *h = priv->ae_handle; 514 int queue_num = h->kinfo.num_tqps; 515 516 param->tx_max_pending = HNS3_RING_MAX_PENDING; 517 param->rx_max_pending = HNS3_RING_MAX_PENDING; 518 519 param->tx_pending = priv->ring_data[0].ring->desc_num; 520 param->rx_pending = priv->ring_data[queue_num].ring->desc_num; 521 } 522 523 static void hns3_get_pauseparam(struct net_device *netdev, 524 struct ethtool_pauseparam *param) 525 { 526 struct hnae3_handle *h = hns3_get_handle(netdev); 527 528 if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_pauseparam) 529 h->ae_algo->ops->get_pauseparam(h, ¶m->autoneg, 530 ¶m->rx_pause, ¶m->tx_pause); 531 } 532 533 static int hns3_set_pauseparam(struct net_device *netdev, 534 struct ethtool_pauseparam *param) 535 { 536 struct hnae3_handle *h = hns3_get_handle(netdev); 537 538 if (h->ae_algo->ops->set_pauseparam) 539 return h->ae_algo->ops->set_pauseparam(h, param->autoneg, 540 param->rx_pause, 541 param->tx_pause); 542 return -EOPNOTSUPP; 543 } 544 545 static int hns3_get_link_ksettings(struct net_device *netdev, 546 struct ethtool_link_ksettings *cmd) 547 { 548 struct hnae3_handle *h = hns3_get_handle(netdev); 549 const struct hnae3_ae_ops *ops; 550 u8 link_stat; 551 552 if (!h->ae_algo || !h->ae_algo->ops) 553 return -EOPNOTSUPP; 554 555 ops = h->ae_algo->ops; 556 if (ops->get_port_type) 557 ops->get_port_type(h, &cmd->base.port); 558 else 559 return -EOPNOTSUPP; 560 561 switch (cmd->base.port) { 562 case PORT_FIBRE: 563 /* 1.auto_neg & speed & duplex from cmd */ 564 if (ops->get_ksettings_an_result) 565 ops->get_ksettings_an_result(h, 566 &cmd->base.autoneg, 567 &cmd->base.speed, 568 &cmd->base.duplex); 569 else 570 return -EOPNOTSUPP; 571 572 /* 2.get link mode*/ 573 if (ops->get_link_mode) 574 ops->get_link_mode(h, 575 cmd->link_modes.supported, 576 cmd->link_modes.advertising); 577 578 /* 3.mdix_ctrl&mdix get from phy reg */ 579 if (ops->get_mdix_mode) 580 ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl, 581 &cmd->base.eth_tp_mdix); 582 583 break; 584 case PORT_TP: 585 if (!netdev->phydev) 586 return -EOPNOTSUPP; 587 588 phy_ethtool_ksettings_get(netdev->phydev, cmd); 589 590 break; 591 default: 592 netdev_warn(netdev, 593 "Unknown port type, neither Fibre/Copper detected"); 594 return 0; 595 } 596 597 /* mdio_support */ 598 cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; 599 600 link_stat = hns3_get_link(netdev); 601 if (!link_stat) { 602 cmd->base.speed = SPEED_UNKNOWN; 603 cmd->base.duplex = DUPLEX_UNKNOWN; 604 } 605 606 return 0; 607 } 608 609 static int hns3_set_link_ksettings(struct net_device *netdev, 610 const struct ethtool_link_ksettings *cmd) 611 { 612 /* Only support ksettings_set for netdev with phy attached for now */ 613 if (netdev->phydev) 614 return phy_ethtool_ksettings_set(netdev->phydev, cmd); 615 616 return -EOPNOTSUPP; 617 } 618 619 static u32 hns3_get_rss_key_size(struct net_device *netdev) 620 { 621 struct hnae3_handle *h = hns3_get_handle(netdev); 622 623 if (!h->ae_algo || !h->ae_algo->ops || 624 !h->ae_algo->ops->get_rss_key_size) 625 return 0; 626 627 return h->ae_algo->ops->get_rss_key_size(h); 628 } 629 630 static u32 hns3_get_rss_indir_size(struct net_device *netdev) 631 { 632 struct hnae3_handle *h = hns3_get_handle(netdev); 633 634 if (!h->ae_algo || !h->ae_algo->ops || 635 !h->ae_algo->ops->get_rss_indir_size) 636 return 0; 637 638 return h->ae_algo->ops->get_rss_indir_size(h); 639 } 640 641 static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key, 642 u8 *hfunc) 643 { 644 struct hnae3_handle *h = hns3_get_handle(netdev); 645 646 if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss) 647 return -EOPNOTSUPP; 648 649 return h->ae_algo->ops->get_rss(h, indir, key, hfunc); 650 } 651 652 static int hns3_set_rss(struct net_device *netdev, const u32 *indir, 653 const u8 *key, const u8 hfunc) 654 { 655 struct hnae3_handle *h = hns3_get_handle(netdev); 656 657 if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss) 658 return -EOPNOTSUPP; 659 660 /* currently we only support Toeplitz hash */ 661 if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP)) { 662 netdev_err(netdev, 663 "hash func not supported (only Toeplitz hash)\n"); 664 return -EOPNOTSUPP; 665 } 666 if (!indir) { 667 netdev_err(netdev, 668 "set rss failed for indir is empty\n"); 669 return -EOPNOTSUPP; 670 } 671 672 return h->ae_algo->ops->set_rss(h, indir, key, hfunc); 673 } 674 675 static int hns3_get_rxnfc(struct net_device *netdev, 676 struct ethtool_rxnfc *cmd, 677 u32 *rule_locs) 678 { 679 struct hnae3_handle *h = hns3_get_handle(netdev); 680 681 if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss_tuple) 682 return -EOPNOTSUPP; 683 684 switch (cmd->cmd) { 685 case ETHTOOL_GRXRINGS: 686 cmd->data = h->kinfo.rss_size; 687 break; 688 case ETHTOOL_GRXFH: 689 return h->ae_algo->ops->get_rss_tuple(h, cmd); 690 default: 691 return -EOPNOTSUPP; 692 } 693 694 return 0; 695 } 696 697 static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, 698 u32 new_desc_num) 699 { 700 struct hnae3_handle *h = priv->ae_handle; 701 int i; 702 703 h->kinfo.num_desc = new_desc_num; 704 705 for (i = 0; i < h->kinfo.num_tqps * 2; i++) 706 priv->ring_data[i].ring->desc_num = new_desc_num; 707 708 return hns3_init_all_ring(priv); 709 } 710 711 static int hns3_set_ringparam(struct net_device *ndev, 712 struct ethtool_ringparam *param) 713 { 714 struct hns3_nic_priv *priv = netdev_priv(ndev); 715 struct hnae3_handle *h = priv->ae_handle; 716 bool if_running = netif_running(ndev); 717 u32 old_desc_num, new_desc_num; 718 int ret; 719 720 if (param->rx_mini_pending || param->rx_jumbo_pending) 721 return -EINVAL; 722 723 if (param->tx_pending != param->rx_pending) { 724 netdev_err(ndev, 725 "Descriptors of tx and rx must be equal"); 726 return -EINVAL; 727 } 728 729 if (param->tx_pending > HNS3_RING_MAX_PENDING || 730 param->tx_pending < HNS3_RING_MIN_PENDING) { 731 netdev_err(ndev, 732 "Descriptors requested (Tx/Rx: %d) out of range [%d-%d]\n", 733 param->tx_pending, HNS3_RING_MIN_PENDING, 734 HNS3_RING_MAX_PENDING); 735 return -EINVAL; 736 } 737 738 new_desc_num = param->tx_pending; 739 740 /* Hardware requires that its descriptors must be multiple of eight */ 741 new_desc_num = ALIGN(new_desc_num, HNS3_RING_BD_MULTIPLE); 742 old_desc_num = h->kinfo.num_desc; 743 if (old_desc_num == new_desc_num) 744 return 0; 745 746 netdev_info(ndev, 747 "Changing descriptor count from %d to %d.\n", 748 old_desc_num, new_desc_num); 749 750 if (if_running) 751 dev_close(ndev); 752 753 ret = hns3_uninit_all_ring(priv); 754 if (ret) 755 return ret; 756 757 ret = hns3_change_all_ring_bd_num(priv, new_desc_num); 758 if (ret) { 759 ret = hns3_change_all_ring_bd_num(priv, old_desc_num); 760 if (ret) { 761 netdev_err(ndev, 762 "Revert to old bd num fail, ret=%d.\n", ret); 763 return ret; 764 } 765 } 766 767 if (if_running) 768 ret = dev_open(ndev); 769 770 return ret; 771 } 772 773 static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) 774 { 775 struct hnae3_handle *h = hns3_get_handle(netdev); 776 777 if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss_tuple) 778 return -EOPNOTSUPP; 779 780 switch (cmd->cmd) { 781 case ETHTOOL_SRXFH: 782 return h->ae_algo->ops->set_rss_tuple(h, cmd); 783 default: 784 return -EOPNOTSUPP; 785 } 786 } 787 788 static int hns3_nway_reset(struct net_device *netdev) 789 { 790 struct phy_device *phy = netdev->phydev; 791 792 if (!netif_running(netdev)) 793 return 0; 794 795 /* Only support nway_reset for netdev with phy attached for now */ 796 if (!phy) 797 return -EOPNOTSUPP; 798 799 if (phy->autoneg != AUTONEG_ENABLE) 800 return -EINVAL; 801 802 return genphy_restart_aneg(phy); 803 } 804 805 static void hns3_get_channels(struct net_device *netdev, 806 struct ethtool_channels *ch) 807 { 808 struct hnae3_handle *h = hns3_get_handle(netdev); 809 810 if (h->ae_algo->ops->get_channels) 811 h->ae_algo->ops->get_channels(h, ch); 812 } 813 814 static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue, 815 struct ethtool_coalesce *cmd) 816 { 817 struct hns3_enet_tqp_vector *tx_vector, *rx_vector; 818 struct hns3_nic_priv *priv = netdev_priv(netdev); 819 struct hnae3_handle *h = priv->ae_handle; 820 u16 queue_num = h->kinfo.num_tqps; 821 822 if (queue >= queue_num) { 823 netdev_err(netdev, 824 "Invalid queue value %d! Queue max id=%d\n", 825 queue, queue_num - 1); 826 return -EINVAL; 827 } 828 829 tx_vector = priv->ring_data[queue].ring->tqp_vector; 830 rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector; 831 832 cmd->use_adaptive_tx_coalesce = 833 tx_vector->tx_group.coal.gl_adapt_enable; 834 cmd->use_adaptive_rx_coalesce = 835 rx_vector->rx_group.coal.gl_adapt_enable; 836 837 cmd->tx_coalesce_usecs = tx_vector->tx_group.coal.int_gl; 838 cmd->rx_coalesce_usecs = rx_vector->rx_group.coal.int_gl; 839 840 cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting; 841 cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting; 842 843 return 0; 844 } 845 846 static int hns3_get_coalesce(struct net_device *netdev, 847 struct ethtool_coalesce *cmd) 848 { 849 return hns3_get_coalesce_per_queue(netdev, 0, cmd); 850 } 851 852 static int hns3_check_gl_coalesce_para(struct net_device *netdev, 853 struct ethtool_coalesce *cmd) 854 { 855 u32 rx_gl, tx_gl; 856 857 if (cmd->rx_coalesce_usecs > HNS3_INT_GL_MAX) { 858 netdev_err(netdev, 859 "Invalid rx-usecs value, rx-usecs range is 0-%d\n", 860 HNS3_INT_GL_MAX); 861 return -EINVAL; 862 } 863 864 if (cmd->tx_coalesce_usecs > HNS3_INT_GL_MAX) { 865 netdev_err(netdev, 866 "Invalid tx-usecs value, tx-usecs range is 0-%d\n", 867 HNS3_INT_GL_MAX); 868 return -EINVAL; 869 } 870 871 rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs); 872 if (rx_gl != cmd->rx_coalesce_usecs) { 873 netdev_info(netdev, 874 "rx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n", 875 cmd->rx_coalesce_usecs, rx_gl); 876 } 877 878 tx_gl = hns3_gl_round_down(cmd->tx_coalesce_usecs); 879 if (tx_gl != cmd->tx_coalesce_usecs) { 880 netdev_info(netdev, 881 "tx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n", 882 cmd->tx_coalesce_usecs, tx_gl); 883 } 884 885 return 0; 886 } 887 888 static int hns3_check_rl_coalesce_para(struct net_device *netdev, 889 struct ethtool_coalesce *cmd) 890 { 891 u32 rl; 892 893 if (cmd->tx_coalesce_usecs_high != cmd->rx_coalesce_usecs_high) { 894 netdev_err(netdev, 895 "tx_usecs_high must be same as rx_usecs_high.\n"); 896 return -EINVAL; 897 } 898 899 if (cmd->rx_coalesce_usecs_high > HNS3_INT_RL_MAX) { 900 netdev_err(netdev, 901 "Invalid usecs_high value, usecs_high range is 0-%d\n", 902 HNS3_INT_RL_MAX); 903 return -EINVAL; 904 } 905 906 rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high); 907 if (rl != cmd->rx_coalesce_usecs_high) { 908 netdev_info(netdev, 909 "usecs_high(%d) rounded down to %d, because it must be multiple of 4.\n", 910 cmd->rx_coalesce_usecs_high, rl); 911 } 912 913 return 0; 914 } 915 916 static int hns3_check_coalesce_para(struct net_device *netdev, 917 struct ethtool_coalesce *cmd) 918 { 919 int ret; 920 921 ret = hns3_check_gl_coalesce_para(netdev, cmd); 922 if (ret) { 923 netdev_err(netdev, 924 "Check gl coalesce param fail. ret = %d\n", ret); 925 return ret; 926 } 927 928 ret = hns3_check_rl_coalesce_para(netdev, cmd); 929 if (ret) { 930 netdev_err(netdev, 931 "Check rl coalesce param fail. ret = %d\n", ret); 932 return ret; 933 } 934 935 if (cmd->use_adaptive_tx_coalesce == 1 || 936 cmd->use_adaptive_rx_coalesce == 1) { 937 netdev_info(netdev, 938 "adaptive-tx=%d and adaptive-rx=%d, tx_usecs or rx_usecs will changed dynamically.\n", 939 cmd->use_adaptive_tx_coalesce, 940 cmd->use_adaptive_rx_coalesce); 941 } 942 943 return 0; 944 } 945 946 static void hns3_set_coalesce_per_queue(struct net_device *netdev, 947 struct ethtool_coalesce *cmd, 948 u32 queue) 949 { 950 struct hns3_enet_tqp_vector *tx_vector, *rx_vector; 951 struct hns3_nic_priv *priv = netdev_priv(netdev); 952 struct hnae3_handle *h = priv->ae_handle; 953 int queue_num = h->kinfo.num_tqps; 954 955 tx_vector = priv->ring_data[queue].ring->tqp_vector; 956 rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector; 957 958 tx_vector->tx_group.coal.gl_adapt_enable = 959 cmd->use_adaptive_tx_coalesce; 960 rx_vector->rx_group.coal.gl_adapt_enable = 961 cmd->use_adaptive_rx_coalesce; 962 963 tx_vector->tx_group.coal.int_gl = cmd->tx_coalesce_usecs; 964 rx_vector->rx_group.coal.int_gl = cmd->rx_coalesce_usecs; 965 966 hns3_set_vector_coalesce_tx_gl(tx_vector, 967 tx_vector->tx_group.coal.int_gl); 968 hns3_set_vector_coalesce_rx_gl(rx_vector, 969 rx_vector->rx_group.coal.int_gl); 970 971 hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting); 972 hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting); 973 } 974 975 static int hns3_set_coalesce(struct net_device *netdev, 976 struct ethtool_coalesce *cmd) 977 { 978 struct hnae3_handle *h = hns3_get_handle(netdev); 979 u16 queue_num = h->kinfo.num_tqps; 980 int ret; 981 int i; 982 983 ret = hns3_check_coalesce_para(netdev, cmd); 984 if (ret) 985 return ret; 986 987 h->kinfo.int_rl_setting = 988 hns3_rl_round_down(cmd->rx_coalesce_usecs_high); 989 990 for (i = 0; i < queue_num; i++) 991 hns3_set_coalesce_per_queue(netdev, cmd, i); 992 993 return 0; 994 } 995 996 static int hns3_get_regs_len(struct net_device *netdev) 997 { 998 struct hnae3_handle *h = hns3_get_handle(netdev); 999 1000 if (!h->ae_algo->ops->get_regs_len) 1001 return -EOPNOTSUPP; 1002 1003 return h->ae_algo->ops->get_regs_len(h); 1004 } 1005 1006 static void hns3_get_regs(struct net_device *netdev, 1007 struct ethtool_regs *cmd, void *data) 1008 { 1009 struct hnae3_handle *h = hns3_get_handle(netdev); 1010 1011 if (!h->ae_algo->ops->get_regs) 1012 return; 1013 1014 h->ae_algo->ops->get_regs(h, &cmd->version, data); 1015 } 1016 1017 static int hns3_set_phys_id(struct net_device *netdev, 1018 enum ethtool_phys_id_state state) 1019 { 1020 struct hnae3_handle *h = hns3_get_handle(netdev); 1021 1022 if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_led_id) 1023 return -EOPNOTSUPP; 1024 1025 return h->ae_algo->ops->set_led_id(h, state); 1026 } 1027 1028 static const struct ethtool_ops hns3vf_ethtool_ops = { 1029 .get_drvinfo = hns3_get_drvinfo, 1030 .get_ringparam = hns3_get_ringparam, 1031 .set_ringparam = hns3_set_ringparam, 1032 .get_strings = hns3_get_strings, 1033 .get_ethtool_stats = hns3_get_stats, 1034 .get_sset_count = hns3_get_sset_count, 1035 .get_rxnfc = hns3_get_rxnfc, 1036 .get_rxfh_key_size = hns3_get_rss_key_size, 1037 .get_rxfh_indir_size = hns3_get_rss_indir_size, 1038 .get_rxfh = hns3_get_rss, 1039 .set_rxfh = hns3_set_rss, 1040 .get_link_ksettings = hns3_get_link_ksettings, 1041 .get_channels = hns3_get_channels, 1042 .get_coalesce = hns3_get_coalesce, 1043 .set_coalesce = hns3_set_coalesce, 1044 .get_link = hns3_get_link, 1045 }; 1046 1047 static const struct ethtool_ops hns3_ethtool_ops = { 1048 .self_test = hns3_self_test, 1049 .get_drvinfo = hns3_get_drvinfo, 1050 .get_link = hns3_get_link, 1051 .get_ringparam = hns3_get_ringparam, 1052 .set_ringparam = hns3_set_ringparam, 1053 .get_pauseparam = hns3_get_pauseparam, 1054 .set_pauseparam = hns3_set_pauseparam, 1055 .get_strings = hns3_get_strings, 1056 .get_ethtool_stats = hns3_get_stats, 1057 .get_sset_count = hns3_get_sset_count, 1058 .get_rxnfc = hns3_get_rxnfc, 1059 .set_rxnfc = hns3_set_rxnfc, 1060 .get_rxfh_key_size = hns3_get_rss_key_size, 1061 .get_rxfh_indir_size = hns3_get_rss_indir_size, 1062 .get_rxfh = hns3_get_rss, 1063 .set_rxfh = hns3_set_rss, 1064 .get_link_ksettings = hns3_get_link_ksettings, 1065 .set_link_ksettings = hns3_set_link_ksettings, 1066 .nway_reset = hns3_nway_reset, 1067 .get_channels = hns3_get_channels, 1068 .set_channels = hns3_set_channels, 1069 .get_coalesce = hns3_get_coalesce, 1070 .set_coalesce = hns3_set_coalesce, 1071 .get_regs_len = hns3_get_regs_len, 1072 .get_regs = hns3_get_regs, 1073 .set_phys_id = hns3_set_phys_id, 1074 }; 1075 1076 void hns3_ethtool_set_ops(struct net_device *netdev) 1077 { 1078 struct hnae3_handle *h = hns3_get_handle(netdev); 1079 1080 if (h->flags & HNAE3_SUPPORT_VF) 1081 netdev->ethtool_ops = &hns3vf_ethtool_ops; 1082 else 1083 netdev->ethtool_ops = &hns3_ethtool_ops; 1084 } 1085