1 /* 2 * Copyright (c) 2016~2017 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/etherdevice.h> 11 #include <linux/string.h> 12 #include <linux/phy.h> 13 14 #include "hns3_enet.h" 15 16 struct hns3_stats { 17 char stats_string[ETH_GSTRING_LEN]; 18 int stats_offset; 19 }; 20 21 /* tqp related stats */ 22 #define HNS3_TQP_STAT(_string, _member) { \ 23 .stats_string = _string, \ 24 .stats_offset = offsetof(struct hns3_enet_ring, stats) +\ 25 offsetof(struct ring_stats, _member), \ 26 } 27 28 static const struct hns3_stats hns3_txq_stats[] = { 29 /* Tx per-queue statistics */ 30 HNS3_TQP_STAT("io_err_cnt", io_err_cnt), 31 HNS3_TQP_STAT("tx_dropped", sw_err_cnt), 32 HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt), 33 HNS3_TQP_STAT("packets", tx_pkts), 34 HNS3_TQP_STAT("bytes", tx_bytes), 35 HNS3_TQP_STAT("errors", tx_err_cnt), 36 HNS3_TQP_STAT("tx_wake", restart_queue), 37 HNS3_TQP_STAT("tx_busy", tx_busy), 38 }; 39 40 #define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats) 41 42 static const struct hns3_stats hns3_rxq_stats[] = { 43 /* Rx per-queue statistics */ 44 HNS3_TQP_STAT("io_err_cnt", io_err_cnt), 45 HNS3_TQP_STAT("rx_dropped", sw_err_cnt), 46 HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt), 47 HNS3_TQP_STAT("packets", rx_pkts), 48 HNS3_TQP_STAT("bytes", rx_bytes), 49 HNS3_TQP_STAT("errors", rx_err_cnt), 50 HNS3_TQP_STAT("reuse_pg_cnt", reuse_pg_cnt), 51 HNS3_TQP_STAT("err_pkt_len", err_pkt_len), 52 HNS3_TQP_STAT("non_vld_descs", non_vld_descs), 53 HNS3_TQP_STAT("err_bd_num", err_bd_num), 54 HNS3_TQP_STAT("l2_err", l2_err), 55 HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err), 56 }; 57 58 #define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats) 59 60 #define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT) 61 62 #define HNS3_SELF_TEST_TPYE_NUM 1 63 #define HNS3_NIC_LB_TEST_PKT_NUM 1 64 #define HNS3_NIC_LB_TEST_RING_ID 0 65 #define HNS3_NIC_LB_TEST_PACKET_SIZE 128 66 67 /* Nic loopback test err */ 68 #define HNS3_NIC_LB_TEST_NO_MEM_ERR 1 69 #define HNS3_NIC_LB_TEST_TX_CNT_ERR 2 70 #define HNS3_NIC_LB_TEST_RX_CNT_ERR 3 71 72 struct hns3_link_mode_mapping { 73 u32 hns3_link_mode; 74 u32 ethtool_link_mode; 75 }; 76 77 static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) 78 { 79 struct hnae3_handle *h = hns3_get_handle(ndev); 80 int ret; 81 82 if (!h->ae_algo->ops->set_loopback || 83 !h->ae_algo->ops->set_promisc_mode) 84 return -EOPNOTSUPP; 85 86 switch (loop) { 87 case HNAE3_MAC_INTER_LOOP_MAC: 88 ret = h->ae_algo->ops->set_loopback(h, loop, en); 89 break; 90 default: 91 ret = -ENOTSUPP; 92 break; 93 } 94 95 if (ret) 96 return ret; 97 98 h->ae_algo->ops->set_promisc_mode(h, en); 99 100 return ret; 101 } 102 103 static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) 104 { 105 struct hnae3_handle *h = hns3_get_handle(ndev); 106 int ret; 107 108 if (!h->ae_algo->ops->start) 109 return -EOPNOTSUPP; 110 111 ret = h->ae_algo->ops->start(h); 112 if (ret) { 113 netdev_err(ndev, 114 "hns3_lb_up ae start return error: %d\n", ret); 115 return ret; 116 } 117 118 ret = hns3_lp_setup(ndev, loop_mode, true); 119 usleep_range(10000, 20000); 120 121 return ret; 122 } 123 124 static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode) 125 { 126 struct hnae3_handle *h = hns3_get_handle(ndev); 127 int ret; 128 129 if (!h->ae_algo->ops->stop) 130 return -EOPNOTSUPP; 131 132 ret = hns3_lp_setup(ndev, loop_mode, false); 133 if (ret) { 134 netdev_err(ndev, "lb_setup return error: %d\n", ret); 135 return ret; 136 } 137 138 h->ae_algo->ops->stop(h); 139 usleep_range(10000, 20000); 140 141 return 0; 142 } 143 144 static void hns3_lp_setup_skb(struct sk_buff *skb) 145 { 146 struct net_device *ndev = skb->dev; 147 unsigned char *packet; 148 struct ethhdr *ethh; 149 unsigned int i; 150 151 skb_reserve(skb, NET_IP_ALIGN); 152 ethh = skb_put(skb, sizeof(struct ethhdr)); 153 packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE); 154 155 memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN); 156 eth_zero_addr(ethh->h_source); 157 ethh->h_proto = htons(ETH_P_ARP); 158 skb_reset_mac_header(skb); 159 160 for (i = 0; i < HNS3_NIC_LB_TEST_PACKET_SIZE; i++) 161 packet[i] = (unsigned char)(i & 0xff); 162 } 163 164 static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring, 165 struct sk_buff *skb) 166 { 167 struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector; 168 unsigned char *packet = skb->data; 169 u32 i; 170 171 for (i = 0; i < skb->len; i++) 172 if (packet[i] != (unsigned char)(i & 0xff)) 173 break; 174 175 /* The packet is correctly received */ 176 if (i == skb->len) 177 tqp_vector->rx_group.total_packets++; 178 else 179 print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1, 180 skb->data, skb->len, true); 181 182 dev_kfree_skb_any(skb); 183 } 184 185 static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget) 186 { 187 struct hnae3_handle *h = priv->ae_handle; 188 struct hnae3_knic_private_info *kinfo; 189 u32 i, rcv_good_pkt_total = 0; 190 191 kinfo = &h->kinfo; 192 for (i = kinfo->num_tqps; i < kinfo->num_tqps * 2; i++) { 193 struct hns3_enet_ring *ring = priv->ring_data[i].ring; 194 struct hns3_enet_ring_group *rx_group; 195 u64 pre_rx_pkt; 196 197 rx_group = &ring->tqp_vector->rx_group; 198 pre_rx_pkt = rx_group->total_packets; 199 200 hns3_clean_rx_ring(ring, budget, hns3_lb_check_skb_data); 201 202 rcv_good_pkt_total += (rx_group->total_packets - pre_rx_pkt); 203 rx_group->total_packets = pre_rx_pkt; 204 } 205 return rcv_good_pkt_total; 206 } 207 208 static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid, 209 u32 end_ringid, u32 budget) 210 { 211 u32 i; 212 213 for (i = start_ringid; i <= end_ringid; i++) { 214 struct hns3_enet_ring *ring = priv->ring_data[i].ring; 215 216 hns3_clean_tx_ring(ring, budget); 217 } 218 } 219 220 /** 221 * hns3_lp_run_test - run loopback test 222 * @ndev: net device 223 * @mode: loopback type 224 */ 225 static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode) 226 { 227 struct hns3_nic_priv *priv = netdev_priv(ndev); 228 struct sk_buff *skb; 229 u32 i, good_cnt; 230 int ret_val = 0; 231 232 skb = alloc_skb(HNS3_NIC_LB_TEST_PACKET_SIZE + ETH_HLEN + NET_IP_ALIGN, 233 GFP_KERNEL); 234 if (!skb) 235 return HNS3_NIC_LB_TEST_NO_MEM_ERR; 236 237 skb->dev = ndev; 238 hns3_lp_setup_skb(skb); 239 skb->queue_mapping = HNS3_NIC_LB_TEST_RING_ID; 240 241 good_cnt = 0; 242 for (i = 0; i < HNS3_NIC_LB_TEST_PKT_NUM; i++) { 243 netdev_tx_t tx_ret; 244 245 skb_get(skb); 246 tx_ret = hns3_nic_net_xmit(skb, ndev); 247 if (tx_ret == NETDEV_TX_OK) 248 good_cnt++; 249 else 250 netdev_err(ndev, "hns3_lb_run_test xmit failed: %d\n", 251 tx_ret); 252 } 253 if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) { 254 ret_val = HNS3_NIC_LB_TEST_TX_CNT_ERR; 255 netdev_err(ndev, "mode %d sent fail, cnt=0x%x, budget=0x%x\n", 256 mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM); 257 goto out; 258 } 259 260 /* Allow 200 milliseconds for packets to go from Tx to Rx */ 261 msleep(200); 262 263 good_cnt = hns3_lb_check_rx_ring(priv, HNS3_NIC_LB_TEST_PKT_NUM); 264 if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) { 265 ret_val = HNS3_NIC_LB_TEST_RX_CNT_ERR; 266 netdev_err(ndev, "mode %d recv fail, cnt=0x%x, budget=0x%x\n", 267 mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM); 268 } 269 270 out: 271 hns3_lb_clear_tx_ring(priv, HNS3_NIC_LB_TEST_RING_ID, 272 HNS3_NIC_LB_TEST_RING_ID, 273 HNS3_NIC_LB_TEST_PKT_NUM); 274 275 kfree_skb(skb); 276 return ret_val; 277 } 278 279 /** 280 * hns3_nic_self_test - self test 281 * @ndev: net device 282 * @eth_test: test cmd 283 * @data: test result 284 */ 285 static void hns3_self_test(struct net_device *ndev, 286 struct ethtool_test *eth_test, u64 *data) 287 { 288 struct hns3_nic_priv *priv = netdev_priv(ndev); 289 struct hnae3_handle *h = priv->ae_handle; 290 int st_param[HNS3_SELF_TEST_TPYE_NUM][2]; 291 bool if_running = netif_running(ndev); 292 #if IS_ENABLED(CONFIG_VLAN_8021Q) 293 bool dis_vlan_filter; 294 #endif 295 int test_index = 0; 296 u32 i; 297 298 /* Only do offline selftest, or pass by default */ 299 if (eth_test->flags != ETH_TEST_FL_OFFLINE) 300 return; 301 302 st_param[HNAE3_MAC_INTER_LOOP_MAC][0] = HNAE3_MAC_INTER_LOOP_MAC; 303 st_param[HNAE3_MAC_INTER_LOOP_MAC][1] = 304 h->flags & HNAE3_SUPPORT_MAC_LOOPBACK; 305 306 if (if_running) 307 dev_close(ndev); 308 309 #if IS_ENABLED(CONFIG_VLAN_8021Q) 310 /* Disable the vlan filter for selftest does not support it */ 311 dis_vlan_filter = (ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) && 312 h->ae_algo->ops->enable_vlan_filter; 313 if (dis_vlan_filter) 314 h->ae_algo->ops->enable_vlan_filter(h, false); 315 #endif 316 317 set_bit(HNS3_NIC_STATE_TESTING, &priv->state); 318 319 for (i = 0; i < HNS3_SELF_TEST_TPYE_NUM; i++) { 320 enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0]; 321 322 if (!st_param[i][1]) 323 continue; 324 325 data[test_index] = hns3_lp_up(ndev, loop_type); 326 if (!data[test_index]) { 327 data[test_index] = hns3_lp_run_test(ndev, loop_type); 328 hns3_lp_down(ndev, loop_type); 329 } 330 331 if (data[test_index]) 332 eth_test->flags |= ETH_TEST_FL_FAILED; 333 334 test_index++; 335 } 336 337 clear_bit(HNS3_NIC_STATE_TESTING, &priv->state); 338 339 #if IS_ENABLED(CONFIG_VLAN_8021Q) 340 if (dis_vlan_filter) 341 h->ae_algo->ops->enable_vlan_filter(h, true); 342 #endif 343 344 if (if_running) 345 dev_open(ndev); 346 } 347 348 static int hns3_get_sset_count(struct net_device *netdev, int stringset) 349 { 350 struct hnae3_handle *h = hns3_get_handle(netdev); 351 const struct hnae3_ae_ops *ops = h->ae_algo->ops; 352 353 if (!ops->get_sset_count) 354 return -EOPNOTSUPP; 355 356 switch (stringset) { 357 case ETH_SS_STATS: 358 return ((HNS3_TQP_STATS_COUNT * h->kinfo.num_tqps) + 359 ops->get_sset_count(h, stringset)); 360 361 case ETH_SS_TEST: 362 return ops->get_sset_count(h, stringset); 363 } 364 365 return 0; 366 } 367 368 static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, 369 u32 stat_count, u32 num_tqps, const char *prefix) 370 { 371 #define MAX_PREFIX_SIZE (6 + 4) 372 u32 size_left; 373 u32 i, j; 374 u32 n1; 375 376 for (i = 0; i < num_tqps; i++) { 377 for (j = 0; j < stat_count; j++) { 378 data[ETH_GSTRING_LEN - 1] = '\0'; 379 380 /* first, prepend the prefix string */ 381 n1 = snprintf(data, MAX_PREFIX_SIZE, "%s#%d_", 382 prefix, i); 383 n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1); 384 size_left = (ETH_GSTRING_LEN - 1) - n1; 385 386 /* now, concatenate the stats string to it */ 387 strncat(data, stats[j].stats_string, size_left); 388 data += ETH_GSTRING_LEN; 389 } 390 } 391 392 return data; 393 } 394 395 static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data) 396 { 397 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 398 const char tx_prefix[] = "txq"; 399 const char rx_prefix[] = "rxq"; 400 401 /* get strings for Tx */ 402 data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT, 403 kinfo->num_tqps, tx_prefix); 404 405 /* get strings for Rx */ 406 data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT, 407 kinfo->num_tqps, rx_prefix); 408 409 return data; 410 } 411 412 static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 413 { 414 struct hnae3_handle *h = hns3_get_handle(netdev); 415 const struct hnae3_ae_ops *ops = h->ae_algo->ops; 416 char *buff = (char *)data; 417 418 if (!ops->get_strings) 419 return; 420 421 switch (stringset) { 422 case ETH_SS_STATS: 423 buff = hns3_get_strings_tqps(h, buff); 424 h->ae_algo->ops->get_strings(h, stringset, (u8 *)buff); 425 break; 426 case ETH_SS_TEST: 427 ops->get_strings(h, stringset, data); 428 break; 429 } 430 } 431 432 static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) 433 { 434 struct hns3_nic_priv *nic_priv = (struct hns3_nic_priv *)handle->priv; 435 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 436 struct hns3_enet_ring *ring; 437 u8 *stat; 438 int i, j; 439 440 /* get stats for Tx */ 441 for (i = 0; i < kinfo->num_tqps; i++) { 442 ring = nic_priv->ring_data[i].ring; 443 for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) { 444 stat = (u8 *)ring + hns3_txq_stats[j].stats_offset; 445 *data++ = *(u64 *)stat; 446 } 447 } 448 449 /* get stats for Rx */ 450 for (i = 0; i < kinfo->num_tqps; i++) { 451 ring = nic_priv->ring_data[i + kinfo->num_tqps].ring; 452 for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) { 453 stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset; 454 *data++ = *(u64 *)stat; 455 } 456 } 457 458 return data; 459 } 460 461 /* hns3_get_stats - get detail statistics. 462 * @netdev: net device 463 * @stats: statistics info. 464 * @data: statistics data. 465 */ 466 static void hns3_get_stats(struct net_device *netdev, 467 struct ethtool_stats *stats, u64 *data) 468 { 469 struct hnae3_handle *h = hns3_get_handle(netdev); 470 u64 *p = data; 471 472 if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) { 473 netdev_err(netdev, "could not get any statistics\n"); 474 return; 475 } 476 477 h->ae_algo->ops->update_stats(h, &netdev->stats); 478 479 /* get per-queue stats */ 480 p = hns3_get_stats_tqps(h, p); 481 482 /* get MAC & other misc hardware stats */ 483 h->ae_algo->ops->get_stats(h, p); 484 } 485 486 static void hns3_get_drvinfo(struct net_device *netdev, 487 struct ethtool_drvinfo *drvinfo) 488 { 489 struct hns3_nic_priv *priv = netdev_priv(netdev); 490 struct hnae3_handle *h = priv->ae_handle; 491 492 strncpy(drvinfo->version, hns3_driver_version, 493 sizeof(drvinfo->version)); 494 drvinfo->version[sizeof(drvinfo->version) - 1] = '\0'; 495 496 strncpy(drvinfo->driver, h->pdev->driver->name, 497 sizeof(drvinfo->driver)); 498 drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0'; 499 500 strncpy(drvinfo->bus_info, pci_name(h->pdev), 501 sizeof(drvinfo->bus_info)); 502 drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0'; 503 504 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", 505 priv->ae_handle->ae_algo->ops->get_fw_version(h)); 506 } 507 508 static u32 hns3_get_link(struct net_device *netdev) 509 { 510 struct hnae3_handle *h = hns3_get_handle(netdev); 511 512 if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_status) 513 return h->ae_algo->ops->get_status(h); 514 else 515 return 0; 516 } 517 518 static void hns3_get_ringparam(struct net_device *netdev, 519 struct ethtool_ringparam *param) 520 { 521 struct hns3_nic_priv *priv = netdev_priv(netdev); 522 struct hnae3_handle *h = priv->ae_handle; 523 int queue_num = h->kinfo.num_tqps; 524 525 param->tx_max_pending = HNS3_RING_MAX_PENDING; 526 param->rx_max_pending = HNS3_RING_MAX_PENDING; 527 528 param->tx_pending = priv->ring_data[0].ring->desc_num; 529 param->rx_pending = priv->ring_data[queue_num].ring->desc_num; 530 } 531 532 static void hns3_get_pauseparam(struct net_device *netdev, 533 struct ethtool_pauseparam *param) 534 { 535 struct hnae3_handle *h = hns3_get_handle(netdev); 536 537 if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_pauseparam) 538 h->ae_algo->ops->get_pauseparam(h, ¶m->autoneg, 539 ¶m->rx_pause, ¶m->tx_pause); 540 } 541 542 static int hns3_set_pauseparam(struct net_device *netdev, 543 struct ethtool_pauseparam *param) 544 { 545 struct hnae3_handle *h = hns3_get_handle(netdev); 546 547 if (h->ae_algo->ops->set_pauseparam) 548 return h->ae_algo->ops->set_pauseparam(h, param->autoneg, 549 param->rx_pause, 550 param->tx_pause); 551 return -EOPNOTSUPP; 552 } 553 554 static int hns3_get_link_ksettings(struct net_device *netdev, 555 struct ethtool_link_ksettings *cmd) 556 { 557 struct hnae3_handle *h = hns3_get_handle(netdev); 558 u32 flowctrl_adv = 0; 559 u8 link_stat; 560 561 if (!h->ae_algo || !h->ae_algo->ops) 562 return -EOPNOTSUPP; 563 564 /* 1.auto_neg & speed & duplex from cmd */ 565 if (netdev->phydev) { 566 phy_ethtool_ksettings_get(netdev->phydev, cmd); 567 568 return 0; 569 } 570 571 if (h->ae_algo->ops->get_ksettings_an_result) 572 h->ae_algo->ops->get_ksettings_an_result(h, 573 &cmd->base.autoneg, 574 &cmd->base.speed, 575 &cmd->base.duplex); 576 else 577 return -EOPNOTSUPP; 578 579 link_stat = hns3_get_link(netdev); 580 if (!link_stat) { 581 cmd->base.speed = SPEED_UNKNOWN; 582 cmd->base.duplex = DUPLEX_UNKNOWN; 583 } 584 585 /* 2.get link mode and port type*/ 586 if (h->ae_algo->ops->get_link_mode) 587 h->ae_algo->ops->get_link_mode(h, 588 cmd->link_modes.supported, 589 cmd->link_modes.advertising); 590 591 cmd->base.port = PORT_NONE; 592 if (h->ae_algo->ops->get_port_type) 593 h->ae_algo->ops->get_port_type(h, 594 &cmd->base.port); 595 596 /* 3.mdix_ctrl&mdix get from phy reg */ 597 if (h->ae_algo->ops->get_mdix_mode) 598 h->ae_algo->ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl, 599 &cmd->base.eth_tp_mdix); 600 /* 4.mdio_support */ 601 cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; 602 603 /* 5.get flow control setttings */ 604 if (h->ae_algo->ops->get_flowctrl_adv) 605 h->ae_algo->ops->get_flowctrl_adv(h, &flowctrl_adv); 606 607 if (flowctrl_adv & ADVERTISED_Pause) 608 ethtool_link_ksettings_add_link_mode(cmd, advertising, 609 Pause); 610 611 if (flowctrl_adv & ADVERTISED_Asym_Pause) 612 ethtool_link_ksettings_add_link_mode(cmd, advertising, 613 Asym_Pause); 614 615 return 0; 616 } 617 618 static int hns3_set_link_ksettings(struct net_device *netdev, 619 const struct ethtool_link_ksettings *cmd) 620 { 621 /* Only support ksettings_set for netdev with phy attached for now */ 622 if (netdev->phydev) 623 return phy_ethtool_ksettings_set(netdev->phydev, cmd); 624 625 return -EOPNOTSUPP; 626 } 627 628 static u32 hns3_get_rss_key_size(struct net_device *netdev) 629 { 630 struct hnae3_handle *h = hns3_get_handle(netdev); 631 632 if (!h->ae_algo || !h->ae_algo->ops || 633 !h->ae_algo->ops->get_rss_key_size) 634 return 0; 635 636 return h->ae_algo->ops->get_rss_key_size(h); 637 } 638 639 static u32 hns3_get_rss_indir_size(struct net_device *netdev) 640 { 641 struct hnae3_handle *h = hns3_get_handle(netdev); 642 643 if (!h->ae_algo || !h->ae_algo->ops || 644 !h->ae_algo->ops->get_rss_indir_size) 645 return 0; 646 647 return h->ae_algo->ops->get_rss_indir_size(h); 648 } 649 650 static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key, 651 u8 *hfunc) 652 { 653 struct hnae3_handle *h = hns3_get_handle(netdev); 654 655 if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss) 656 return -EOPNOTSUPP; 657 658 return h->ae_algo->ops->get_rss(h, indir, key, hfunc); 659 } 660 661 static int hns3_set_rss(struct net_device *netdev, const u32 *indir, 662 const u8 *key, const u8 hfunc) 663 { 664 struct hnae3_handle *h = hns3_get_handle(netdev); 665 666 if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss) 667 return -EOPNOTSUPP; 668 669 /* currently we only support Toeplitz hash */ 670 if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP)) { 671 netdev_err(netdev, 672 "hash func not supported (only Toeplitz hash)\n"); 673 return -EOPNOTSUPP; 674 } 675 if (!indir) { 676 netdev_err(netdev, 677 "set rss failed for indir is empty\n"); 678 return -EOPNOTSUPP; 679 } 680 681 return h->ae_algo->ops->set_rss(h, indir, key, hfunc); 682 } 683 684 static int hns3_get_rxnfc(struct net_device *netdev, 685 struct ethtool_rxnfc *cmd, 686 u32 *rule_locs) 687 { 688 struct hnae3_handle *h = hns3_get_handle(netdev); 689 690 if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss_tuple) 691 return -EOPNOTSUPP; 692 693 switch (cmd->cmd) { 694 case ETHTOOL_GRXRINGS: 695 cmd->data = h->kinfo.rss_size; 696 break; 697 case ETHTOOL_GRXFH: 698 return h->ae_algo->ops->get_rss_tuple(h, cmd); 699 default: 700 return -EOPNOTSUPP; 701 } 702 703 return 0; 704 } 705 706 static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, 707 u32 new_desc_num) 708 { 709 struct hnae3_handle *h = priv->ae_handle; 710 int i; 711 712 h->kinfo.num_desc = new_desc_num; 713 714 for (i = 0; i < h->kinfo.num_tqps * 2; i++) 715 priv->ring_data[i].ring->desc_num = new_desc_num; 716 717 return hns3_init_all_ring(priv); 718 } 719 720 static int hns3_set_ringparam(struct net_device *ndev, 721 struct ethtool_ringparam *param) 722 { 723 struct hns3_nic_priv *priv = netdev_priv(ndev); 724 struct hnae3_handle *h = priv->ae_handle; 725 bool if_running = netif_running(ndev); 726 u32 old_desc_num, new_desc_num; 727 int ret; 728 729 if (param->rx_mini_pending || param->rx_jumbo_pending) 730 return -EINVAL; 731 732 if (param->tx_pending != param->rx_pending) { 733 netdev_err(ndev, 734 "Descriptors of tx and rx must be equal"); 735 return -EINVAL; 736 } 737 738 if (param->tx_pending > HNS3_RING_MAX_PENDING || 739 param->tx_pending < HNS3_RING_MIN_PENDING) { 740 netdev_err(ndev, 741 "Descriptors requested (Tx/Rx: %d) out of range [%d-%d]\n", 742 param->tx_pending, HNS3_RING_MIN_PENDING, 743 HNS3_RING_MAX_PENDING); 744 return -EINVAL; 745 } 746 747 new_desc_num = param->tx_pending; 748 749 /* Hardware requires that its descriptors must be multiple of eight */ 750 new_desc_num = ALIGN(new_desc_num, HNS3_RING_BD_MULTIPLE); 751 old_desc_num = h->kinfo.num_desc; 752 if (old_desc_num == new_desc_num) 753 return 0; 754 755 netdev_info(ndev, 756 "Changing descriptor count from %d to %d.\n", 757 old_desc_num, new_desc_num); 758 759 if (if_running) 760 dev_close(ndev); 761 762 ret = hns3_uninit_all_ring(priv); 763 if (ret) 764 return ret; 765 766 ret = hns3_change_all_ring_bd_num(priv, new_desc_num); 767 if (ret) { 768 ret = hns3_change_all_ring_bd_num(priv, old_desc_num); 769 if (ret) { 770 netdev_err(ndev, 771 "Revert to old bd num fail, ret=%d.\n", ret); 772 return ret; 773 } 774 } 775 776 if (if_running) 777 ret = dev_open(ndev); 778 779 return ret; 780 } 781 782 static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) 783 { 784 struct hnae3_handle *h = hns3_get_handle(netdev); 785 786 if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss_tuple) 787 return -EOPNOTSUPP; 788 789 switch (cmd->cmd) { 790 case ETHTOOL_SRXFH: 791 return h->ae_algo->ops->set_rss_tuple(h, cmd); 792 default: 793 return -EOPNOTSUPP; 794 } 795 } 796 797 static int hns3_nway_reset(struct net_device *netdev) 798 { 799 struct phy_device *phy = netdev->phydev; 800 801 if (!netif_running(netdev)) 802 return 0; 803 804 /* Only support nway_reset for netdev with phy attached for now */ 805 if (!phy) 806 return -EOPNOTSUPP; 807 808 if (phy->autoneg != AUTONEG_ENABLE) 809 return -EINVAL; 810 811 return genphy_restart_aneg(phy); 812 } 813 814 static void hns3_get_channels(struct net_device *netdev, 815 struct ethtool_channels *ch) 816 { 817 struct hnae3_handle *h = hns3_get_handle(netdev); 818 819 if (h->ae_algo->ops->get_channels) 820 h->ae_algo->ops->get_channels(h, ch); 821 } 822 823 static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue, 824 struct ethtool_coalesce *cmd) 825 { 826 struct hns3_enet_tqp_vector *tx_vector, *rx_vector; 827 struct hns3_nic_priv *priv = netdev_priv(netdev); 828 struct hnae3_handle *h = priv->ae_handle; 829 u16 queue_num = h->kinfo.num_tqps; 830 831 if (queue >= queue_num) { 832 netdev_err(netdev, 833 "Invalid queue value %d! Queue max id=%d\n", 834 queue, queue_num - 1); 835 return -EINVAL; 836 } 837 838 tx_vector = priv->ring_data[queue].ring->tqp_vector; 839 rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector; 840 841 cmd->use_adaptive_tx_coalesce = 842 tx_vector->tx_group.coal.gl_adapt_enable; 843 cmd->use_adaptive_rx_coalesce = 844 rx_vector->rx_group.coal.gl_adapt_enable; 845 846 cmd->tx_coalesce_usecs = tx_vector->tx_group.coal.int_gl; 847 cmd->rx_coalesce_usecs = rx_vector->rx_group.coal.int_gl; 848 849 cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting; 850 cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting; 851 852 return 0; 853 } 854 855 static int hns3_get_coalesce(struct net_device *netdev, 856 struct ethtool_coalesce *cmd) 857 { 858 return hns3_get_coalesce_per_queue(netdev, 0, cmd); 859 } 860 861 static int hns3_check_gl_coalesce_para(struct net_device *netdev, 862 struct ethtool_coalesce *cmd) 863 { 864 u32 rx_gl, tx_gl; 865 866 if (cmd->rx_coalesce_usecs > HNS3_INT_GL_MAX) { 867 netdev_err(netdev, 868 "Invalid rx-usecs value, rx-usecs range is 0-%d\n", 869 HNS3_INT_GL_MAX); 870 return -EINVAL; 871 } 872 873 if (cmd->tx_coalesce_usecs > HNS3_INT_GL_MAX) { 874 netdev_err(netdev, 875 "Invalid tx-usecs value, tx-usecs range is 0-%d\n", 876 HNS3_INT_GL_MAX); 877 return -EINVAL; 878 } 879 880 rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs); 881 if (rx_gl != cmd->rx_coalesce_usecs) { 882 netdev_info(netdev, 883 "rx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n", 884 cmd->rx_coalesce_usecs, rx_gl); 885 } 886 887 tx_gl = hns3_gl_round_down(cmd->tx_coalesce_usecs); 888 if (tx_gl != cmd->tx_coalesce_usecs) { 889 netdev_info(netdev, 890 "tx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n", 891 cmd->tx_coalesce_usecs, tx_gl); 892 } 893 894 return 0; 895 } 896 897 static int hns3_check_rl_coalesce_para(struct net_device *netdev, 898 struct ethtool_coalesce *cmd) 899 { 900 u32 rl; 901 902 if (cmd->tx_coalesce_usecs_high != cmd->rx_coalesce_usecs_high) { 903 netdev_err(netdev, 904 "tx_usecs_high must be same as rx_usecs_high.\n"); 905 return -EINVAL; 906 } 907 908 if (cmd->rx_coalesce_usecs_high > HNS3_INT_RL_MAX) { 909 netdev_err(netdev, 910 "Invalid usecs_high value, usecs_high range is 0-%d\n", 911 HNS3_INT_RL_MAX); 912 return -EINVAL; 913 } 914 915 rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high); 916 if (rl != cmd->rx_coalesce_usecs_high) { 917 netdev_info(netdev, 918 "usecs_high(%d) rounded down to %d, because it must be multiple of 4.\n", 919 cmd->rx_coalesce_usecs_high, rl); 920 } 921 922 return 0; 923 } 924 925 static int hns3_check_coalesce_para(struct net_device *netdev, 926 struct ethtool_coalesce *cmd) 927 { 928 int ret; 929 930 ret = hns3_check_gl_coalesce_para(netdev, cmd); 931 if (ret) { 932 netdev_err(netdev, 933 "Check gl coalesce param fail. ret = %d\n", ret); 934 return ret; 935 } 936 937 ret = hns3_check_rl_coalesce_para(netdev, cmd); 938 if (ret) { 939 netdev_err(netdev, 940 "Check rl coalesce param fail. ret = %d\n", ret); 941 return ret; 942 } 943 944 if (cmd->use_adaptive_tx_coalesce == 1 || 945 cmd->use_adaptive_rx_coalesce == 1) { 946 netdev_info(netdev, 947 "adaptive-tx=%d and adaptive-rx=%d, tx_usecs or rx_usecs will changed dynamically.\n", 948 cmd->use_adaptive_tx_coalesce, 949 cmd->use_adaptive_rx_coalesce); 950 } 951 952 return 0; 953 } 954 955 static void hns3_set_coalesce_per_queue(struct net_device *netdev, 956 struct ethtool_coalesce *cmd, 957 u32 queue) 958 { 959 struct hns3_enet_tqp_vector *tx_vector, *rx_vector; 960 struct hns3_nic_priv *priv = netdev_priv(netdev); 961 struct hnae3_handle *h = priv->ae_handle; 962 int queue_num = h->kinfo.num_tqps; 963 964 tx_vector = priv->ring_data[queue].ring->tqp_vector; 965 rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector; 966 967 tx_vector->tx_group.coal.gl_adapt_enable = 968 cmd->use_adaptive_tx_coalesce; 969 rx_vector->rx_group.coal.gl_adapt_enable = 970 cmd->use_adaptive_rx_coalesce; 971 972 tx_vector->tx_group.coal.int_gl = cmd->tx_coalesce_usecs; 973 rx_vector->rx_group.coal.int_gl = cmd->rx_coalesce_usecs; 974 975 hns3_set_vector_coalesce_tx_gl(tx_vector, 976 tx_vector->tx_group.coal.int_gl); 977 hns3_set_vector_coalesce_rx_gl(rx_vector, 978 rx_vector->rx_group.coal.int_gl); 979 980 hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting); 981 hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting); 982 } 983 984 static int hns3_set_coalesce(struct net_device *netdev, 985 struct ethtool_coalesce *cmd) 986 { 987 struct hnae3_handle *h = hns3_get_handle(netdev); 988 u16 queue_num = h->kinfo.num_tqps; 989 int ret; 990 int i; 991 992 ret = hns3_check_coalesce_para(netdev, cmd); 993 if (ret) 994 return ret; 995 996 h->kinfo.int_rl_setting = 997 hns3_rl_round_down(cmd->rx_coalesce_usecs_high); 998 999 for (i = 0; i < queue_num; i++) 1000 hns3_set_coalesce_per_queue(netdev, cmd, i); 1001 1002 return 0; 1003 } 1004 1005 static int hns3_get_regs_len(struct net_device *netdev) 1006 { 1007 struct hnae3_handle *h = hns3_get_handle(netdev); 1008 1009 if (!h->ae_algo->ops->get_regs_len) 1010 return -EOPNOTSUPP; 1011 1012 return h->ae_algo->ops->get_regs_len(h); 1013 } 1014 1015 static void hns3_get_regs(struct net_device *netdev, 1016 struct ethtool_regs *cmd, void *data) 1017 { 1018 struct hnae3_handle *h = hns3_get_handle(netdev); 1019 1020 if (!h->ae_algo->ops->get_regs) 1021 return; 1022 1023 h->ae_algo->ops->get_regs(h, &cmd->version, data); 1024 } 1025 1026 static int hns3_set_phys_id(struct net_device *netdev, 1027 enum ethtool_phys_id_state state) 1028 { 1029 struct hnae3_handle *h = hns3_get_handle(netdev); 1030 1031 if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_led_id) 1032 return -EOPNOTSUPP; 1033 1034 return h->ae_algo->ops->set_led_id(h, state); 1035 } 1036 1037 static const struct ethtool_ops hns3vf_ethtool_ops = { 1038 .get_drvinfo = hns3_get_drvinfo, 1039 .get_ringparam = hns3_get_ringparam, 1040 .set_ringparam = hns3_set_ringparam, 1041 .get_strings = hns3_get_strings, 1042 .get_ethtool_stats = hns3_get_stats, 1043 .get_sset_count = hns3_get_sset_count, 1044 .get_rxnfc = hns3_get_rxnfc, 1045 .get_rxfh_key_size = hns3_get_rss_key_size, 1046 .get_rxfh_indir_size = hns3_get_rss_indir_size, 1047 .get_rxfh = hns3_get_rss, 1048 .set_rxfh = hns3_set_rss, 1049 .get_link_ksettings = hns3_get_link_ksettings, 1050 .get_channels = hns3_get_channels, 1051 .get_coalesce = hns3_get_coalesce, 1052 .set_coalesce = hns3_set_coalesce, 1053 .get_link = hns3_get_link, 1054 }; 1055 1056 static const struct ethtool_ops hns3_ethtool_ops = { 1057 .self_test = hns3_self_test, 1058 .get_drvinfo = hns3_get_drvinfo, 1059 .get_link = hns3_get_link, 1060 .get_ringparam = hns3_get_ringparam, 1061 .set_ringparam = hns3_set_ringparam, 1062 .get_pauseparam = hns3_get_pauseparam, 1063 .set_pauseparam = hns3_set_pauseparam, 1064 .get_strings = hns3_get_strings, 1065 .get_ethtool_stats = hns3_get_stats, 1066 .get_sset_count = hns3_get_sset_count, 1067 .get_rxnfc = hns3_get_rxnfc, 1068 .set_rxnfc = hns3_set_rxnfc, 1069 .get_rxfh_key_size = hns3_get_rss_key_size, 1070 .get_rxfh_indir_size = hns3_get_rss_indir_size, 1071 .get_rxfh = hns3_get_rss, 1072 .set_rxfh = hns3_set_rss, 1073 .get_link_ksettings = hns3_get_link_ksettings, 1074 .set_link_ksettings = hns3_set_link_ksettings, 1075 .nway_reset = hns3_nway_reset, 1076 .get_channels = hns3_get_channels, 1077 .set_channels = hns3_set_channels, 1078 .get_coalesce = hns3_get_coalesce, 1079 .set_coalesce = hns3_set_coalesce, 1080 .get_regs_len = hns3_get_regs_len, 1081 .get_regs = hns3_get_regs, 1082 .set_phys_id = hns3_set_phys_id, 1083 }; 1084 1085 void hns3_ethtool_set_ops(struct net_device *netdev) 1086 { 1087 struct hnae3_handle *h = hns3_get_handle(netdev); 1088 1089 if (h->flags & HNAE3_SUPPORT_VF) 1090 netdev->ethtool_ops = &hns3vf_ethtool_ops; 1091 else 1092 netdev->ethtool_ops = &hns3_ethtool_ops; 1093 } 1094