1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2020 Marvell. 5 * 6 */ 7 8 #include <linux/pci.h> 9 #include <linux/ethtool.h> 10 #include <linux/stddef.h> 11 #include <linux/etherdevice.h> 12 #include <linux/log2.h> 13 #include <linux/net_tstamp.h> 14 #include <linux/linkmode.h> 15 16 #include "otx2_common.h" 17 #include "otx2_ptp.h" 18 19 #define DRV_NAME "octeontx2-nicpf" 20 #define DRV_VF_NAME "octeontx2-nicvf" 21 22 struct otx2_stat { 23 char name[ETH_GSTRING_LEN]; 24 unsigned int index; 25 }; 26 27 /* HW device stats */ 28 #define OTX2_DEV_STAT(stat) { \ 29 .name = #stat, \ 30 .index = offsetof(struct otx2_dev_stats, stat) / sizeof(u64), \ 31 } 32 33 enum link_mode { 34 OTX2_MODE_SUPPORTED, 35 OTX2_MODE_ADVERTISED 36 }; 37 38 static const struct otx2_stat otx2_dev_stats[] = { 39 OTX2_DEV_STAT(rx_ucast_frames), 40 OTX2_DEV_STAT(rx_bcast_frames), 41 OTX2_DEV_STAT(rx_mcast_frames), 42 43 OTX2_DEV_STAT(tx_ucast_frames), 44 OTX2_DEV_STAT(tx_bcast_frames), 45 OTX2_DEV_STAT(tx_mcast_frames), 46 }; 47 48 /* Driver level stats */ 49 #define OTX2_DRV_STAT(stat) { \ 50 .name = #stat, \ 51 .index = offsetof(struct otx2_drv_stats, stat) / sizeof(atomic_t), \ 52 } 53 54 static const struct otx2_stat otx2_drv_stats[] = { 55 OTX2_DRV_STAT(rx_fcs_errs), 56 OTX2_DRV_STAT(rx_oversize_errs), 57 OTX2_DRV_STAT(rx_undersize_errs), 58 OTX2_DRV_STAT(rx_csum_errs), 59 OTX2_DRV_STAT(rx_len_errs), 60 OTX2_DRV_STAT(rx_other_errs), 61 }; 62 63 static const struct otx2_stat otx2_queue_stats[] = { 64 { "bytes", 0 }, 65 { "frames", 1 }, 66 }; 67 68 static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats); 69 static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats); 70 static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats); 71 72 static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf); 73 74 static void otx2_get_drvinfo(struct net_device *netdev, 75 struct ethtool_drvinfo *info) 76 { 77 struct otx2_nic *pfvf = netdev_priv(netdev); 78 79 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 80 strlcpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info)); 81 } 82 83 static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset) 84 { 85 int start_qidx = qset * pfvf->hw.rx_queues; 86 int qidx, stats; 87 88 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { 89 for (stats = 0; stats < otx2_n_queue_stats; stats++) { 90 sprintf(*data, "rxq%d: %s", qidx + start_qidx, 91 otx2_queue_stats[stats].name); 92 *data += ETH_GSTRING_LEN; 93 } 94 } 95 for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { 96 for (stats = 0; stats < otx2_n_queue_stats; stats++) { 97 sprintf(*data, "txq%d: %s", qidx + start_qidx, 98 otx2_queue_stats[stats].name); 99 *data += ETH_GSTRING_LEN; 100 } 101 } 102 } 103 104 static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data) 105 { 106 struct otx2_nic *pfvf = netdev_priv(netdev); 107 int stats; 108 109 if (sset != ETH_SS_STATS) 110 return; 111 112 for (stats = 0; stats < otx2_n_dev_stats; stats++) { 113 memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN); 114 data += ETH_GSTRING_LEN; 115 } 116 117 for (stats = 0; stats < otx2_n_drv_stats; stats++) { 118 memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN); 119 data += ETH_GSTRING_LEN; 120 } 121 122 otx2_get_qset_strings(pfvf, &data, 0); 123 124 for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) { 125 sprintf(data, "cgx_rxstat%d: ", stats); 126 data += ETH_GSTRING_LEN; 127 } 128 129 for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) { 130 sprintf(data, "cgx_txstat%d: ", stats); 131 data += ETH_GSTRING_LEN; 132 } 133 134 strcpy(data, "reset_count"); 135 data += ETH_GSTRING_LEN; 136 sprintf(data, "Fec Corrected Errors: "); 137 data += ETH_GSTRING_LEN; 138 sprintf(data, "Fec Uncorrected Errors: "); 139 data += ETH_GSTRING_LEN; 140 } 141 142 static void otx2_get_qset_stats(struct otx2_nic *pfvf, 143 struct ethtool_stats *stats, u64 **data) 144 { 145 int stat, qidx; 146 147 if (!pfvf) 148 return; 149 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { 150 if (!otx2_update_rq_stats(pfvf, qidx)) { 151 for (stat = 0; stat < otx2_n_queue_stats; stat++) 152 *((*data)++) = 0; 153 continue; 154 } 155 for (stat = 0; stat < otx2_n_queue_stats; stat++) 156 *((*data)++) = ((u64 *)&pfvf->qset.rq[qidx].stats) 157 [otx2_queue_stats[stat].index]; 158 } 159 160 for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { 161 if (!otx2_update_sq_stats(pfvf, qidx)) { 162 for (stat = 0; stat < otx2_n_queue_stats; stat++) 163 *((*data)++) = 0; 164 continue; 165 } 166 for (stat = 0; stat < otx2_n_queue_stats; stat++) 167 *((*data)++) = ((u64 *)&pfvf->qset.sq[qidx].stats) 168 [otx2_queue_stats[stat].index]; 169 } 170 } 171 172 static int otx2_get_phy_fec_stats(struct otx2_nic *pfvf) 173 { 174 struct msg_req *req; 175 int rc = -ENOMEM; 176 177 mutex_lock(&pfvf->mbox.lock); 178 req = otx2_mbox_alloc_msg_cgx_get_phy_fec_stats(&pfvf->mbox); 179 if (!req) 180 goto end; 181 182 if (!otx2_sync_mbox_msg(&pfvf->mbox)) 183 rc = 0; 184 end: 185 mutex_unlock(&pfvf->mbox.lock); 186 return rc; 187 } 188 189 /* Get device and per queue statistics */ 190 static void otx2_get_ethtool_stats(struct net_device *netdev, 191 struct ethtool_stats *stats, u64 *data) 192 { 193 struct otx2_nic *pfvf = netdev_priv(netdev); 194 u64 fec_corr_blks, fec_uncorr_blks; 195 struct cgx_fw_data *rsp; 196 int stat; 197 198 otx2_get_dev_stats(pfvf); 199 for (stat = 0; stat < otx2_n_dev_stats; stat++) 200 *(data++) = ((u64 *)&pfvf->hw.dev_stats) 201 [otx2_dev_stats[stat].index]; 202 203 for (stat = 0; stat < otx2_n_drv_stats; stat++) 204 *(data++) = atomic_read(&((atomic_t *)&pfvf->hw.drv_stats) 205 [otx2_drv_stats[stat].index]); 206 207 otx2_get_qset_stats(pfvf, stats, &data); 208 otx2_update_lmac_stats(pfvf); 209 for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++) 210 *(data++) = pfvf->hw.cgx_rx_stats[stat]; 211 for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++) 212 *(data++) = pfvf->hw.cgx_tx_stats[stat]; 213 *(data++) = pfvf->reset_count; 214 215 fec_corr_blks = pfvf->hw.cgx_fec_corr_blks; 216 fec_uncorr_blks = pfvf->hw.cgx_fec_uncorr_blks; 217 218 rsp = otx2_get_fwdata(pfvf); 219 if (!IS_ERR(rsp) && rsp->fwdata.phy.misc.has_fec_stats && 220 !otx2_get_phy_fec_stats(pfvf)) { 221 /* Fetch fwdata again because it's been recently populated with 222 * latest PHY FEC stats. 223 */ 224 rsp = otx2_get_fwdata(pfvf); 225 if (!IS_ERR(rsp)) { 226 struct fec_stats_s *p = &rsp->fwdata.phy.fec_stats; 227 228 if (pfvf->linfo.fec == OTX2_FEC_BASER) { 229 fec_corr_blks = p->brfec_corr_blks; 230 fec_uncorr_blks = p->brfec_uncorr_blks; 231 } else { 232 fec_corr_blks = p->rsfec_corr_cws; 233 fec_uncorr_blks = p->rsfec_uncorr_cws; 234 } 235 } 236 } 237 238 *(data++) = fec_corr_blks; 239 *(data++) = fec_uncorr_blks; 240 } 241 242 static int otx2_get_sset_count(struct net_device *netdev, int sset) 243 { 244 struct otx2_nic *pfvf = netdev_priv(netdev); 245 int qstats_count; 246 247 if (sset != ETH_SS_STATS) 248 return -EINVAL; 249 250 qstats_count = otx2_n_queue_stats * 251 (pfvf->hw.rx_queues + pfvf->hw.tx_queues); 252 otx2_update_lmac_fec_stats(pfvf); 253 254 return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 255 CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + OTX2_FEC_STATS_CNT 256 + 1; 257 } 258 259 /* Get no of queues device supports and current queue count */ 260 static void otx2_get_channels(struct net_device *dev, 261 struct ethtool_channels *channel) 262 { 263 struct otx2_nic *pfvf = netdev_priv(dev); 264 265 channel->max_rx = pfvf->hw.max_queues; 266 channel->max_tx = pfvf->hw.max_queues; 267 268 channel->rx_count = pfvf->hw.rx_queues; 269 channel->tx_count = pfvf->hw.tx_queues; 270 } 271 272 /* Set no of Tx, Rx queues to be used */ 273 static int otx2_set_channels(struct net_device *dev, 274 struct ethtool_channels *channel) 275 { 276 struct otx2_nic *pfvf = netdev_priv(dev); 277 bool if_up = netif_running(dev); 278 int err = 0; 279 280 if (!channel->rx_count || !channel->tx_count) 281 return -EINVAL; 282 283 if (bitmap_weight(&pfvf->rq_bmap, pfvf->hw.rx_queues) > 1) { 284 netdev_err(dev, 285 "Receive queues are in use by TC police action\n"); 286 return -EINVAL; 287 } 288 289 if (if_up) 290 dev->netdev_ops->ndo_stop(dev); 291 292 err = otx2_set_real_num_queues(dev, channel->tx_count, 293 channel->rx_count); 294 if (err) 295 return err; 296 297 pfvf->hw.rx_queues = channel->rx_count; 298 pfvf->hw.tx_queues = channel->tx_count; 299 pfvf->qset.cq_cnt = pfvf->hw.tx_queues + pfvf->hw.rx_queues; 300 301 if (if_up) 302 err = dev->netdev_ops->ndo_open(dev); 303 304 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", 305 pfvf->hw.tx_queues, pfvf->hw.rx_queues); 306 307 return err; 308 } 309 310 static void otx2_get_pauseparam(struct net_device *netdev, 311 struct ethtool_pauseparam *pause) 312 { 313 struct otx2_nic *pfvf = netdev_priv(netdev); 314 struct cgx_pause_frm_cfg *req, *rsp; 315 316 if (is_otx2_lbkvf(pfvf->pdev)) 317 return; 318 319 req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox); 320 if (!req) 321 return; 322 323 if (!otx2_sync_mbox_msg(&pfvf->mbox)) { 324 rsp = (struct cgx_pause_frm_cfg *) 325 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); 326 pause->rx_pause = rsp->rx_pause; 327 pause->tx_pause = rsp->tx_pause; 328 } 329 } 330 331 static int otx2_set_pauseparam(struct net_device *netdev, 332 struct ethtool_pauseparam *pause) 333 { 334 struct otx2_nic *pfvf = netdev_priv(netdev); 335 336 if (pause->autoneg) 337 return -EOPNOTSUPP; 338 339 if (is_otx2_lbkvf(pfvf->pdev)) 340 return -EOPNOTSUPP; 341 342 if (pause->rx_pause) 343 pfvf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED; 344 else 345 pfvf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED; 346 347 if (pause->tx_pause) 348 pfvf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED; 349 else 350 pfvf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED; 351 352 return otx2_config_pause_frm(pfvf); 353 } 354 355 static void otx2_get_ringparam(struct net_device *netdev, 356 struct ethtool_ringparam *ring) 357 { 358 struct otx2_nic *pfvf = netdev_priv(netdev); 359 struct otx2_qset *qs = &pfvf->qset; 360 361 ring->rx_max_pending = Q_COUNT(Q_SIZE_MAX); 362 ring->rx_pending = qs->rqe_cnt ? qs->rqe_cnt : Q_COUNT(Q_SIZE_256); 363 ring->tx_max_pending = Q_COUNT(Q_SIZE_MAX); 364 ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K); 365 } 366 367 static int otx2_set_ringparam(struct net_device *netdev, 368 struct ethtool_ringparam *ring) 369 { 370 struct otx2_nic *pfvf = netdev_priv(netdev); 371 bool if_up = netif_running(netdev); 372 struct otx2_qset *qs = &pfvf->qset; 373 u32 rx_count, tx_count; 374 375 if (ring->rx_mini_pending || ring->rx_jumbo_pending) 376 return -EINVAL; 377 378 /* Permitted lengths are 16 64 256 1K 4K 16K 64K 256K 1M */ 379 rx_count = ring->rx_pending; 380 /* On some silicon variants a skid or reserved CQEs are 381 * needed to avoid CQ overflow. 382 */ 383 if (rx_count < pfvf->hw.rq_skid) 384 rx_count = pfvf->hw.rq_skid; 385 rx_count = Q_COUNT(Q_SIZE(rx_count, 3)); 386 387 /* Due pipelining impact minimum 2000 unused SQ CQE's 388 * need to be maintained to avoid CQ overflow, hence the 389 * minimum 4K size. 390 */ 391 tx_count = clamp_t(u32, ring->tx_pending, 392 Q_COUNT(Q_SIZE_4K), Q_COUNT(Q_SIZE_MAX)); 393 tx_count = Q_COUNT(Q_SIZE(tx_count, 3)); 394 395 if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt) 396 return 0; 397 398 if (if_up) 399 netdev->netdev_ops->ndo_stop(netdev); 400 401 /* Assigned to the nearest possible exponent. */ 402 qs->sqe_cnt = tx_count; 403 qs->rqe_cnt = rx_count; 404 405 if (if_up) 406 return netdev->netdev_ops->ndo_open(netdev); 407 408 return 0; 409 } 410 411 static int otx2_get_coalesce(struct net_device *netdev, 412 struct ethtool_coalesce *cmd, 413 struct kernel_ethtool_coalesce *kernel_coal, 414 struct netlink_ext_ack *extack) 415 { 416 struct otx2_nic *pfvf = netdev_priv(netdev); 417 struct otx2_hw *hw = &pfvf->hw; 418 419 cmd->rx_coalesce_usecs = hw->cq_time_wait; 420 cmd->rx_max_coalesced_frames = hw->cq_ecount_wait; 421 cmd->tx_coalesce_usecs = hw->cq_time_wait; 422 cmd->tx_max_coalesced_frames = hw->cq_ecount_wait; 423 424 return 0; 425 } 426 427 static int otx2_set_coalesce(struct net_device *netdev, 428 struct ethtool_coalesce *ec, 429 struct kernel_ethtool_coalesce *kernel_coal, 430 struct netlink_ext_ack *extack) 431 { 432 struct otx2_nic *pfvf = netdev_priv(netdev); 433 struct otx2_hw *hw = &pfvf->hw; 434 int qidx; 435 436 if (!ec->rx_max_coalesced_frames || !ec->tx_max_coalesced_frames) 437 return 0; 438 439 /* 'cq_time_wait' is 8bit and is in multiple of 100ns, 440 * so clamp the user given value to the range of 1 to 25usec. 441 */ 442 ec->rx_coalesce_usecs = clamp_t(u32, ec->rx_coalesce_usecs, 443 1, CQ_TIMER_THRESH_MAX); 444 ec->tx_coalesce_usecs = clamp_t(u32, ec->tx_coalesce_usecs, 445 1, CQ_TIMER_THRESH_MAX); 446 447 /* Rx and Tx are mapped to same CQ, check which one 448 * is changed, if both then choose the min. 449 */ 450 if (hw->cq_time_wait == ec->rx_coalesce_usecs) 451 hw->cq_time_wait = ec->tx_coalesce_usecs; 452 else if (hw->cq_time_wait == ec->tx_coalesce_usecs) 453 hw->cq_time_wait = ec->rx_coalesce_usecs; 454 else 455 hw->cq_time_wait = min_t(u8, ec->rx_coalesce_usecs, 456 ec->tx_coalesce_usecs); 457 458 /* Max ecount_wait supported is 16bit, 459 * so clamp the user given value to the range of 1 to 64k. 460 */ 461 ec->rx_max_coalesced_frames = clamp_t(u32, ec->rx_max_coalesced_frames, 462 1, U16_MAX); 463 ec->tx_max_coalesced_frames = clamp_t(u32, ec->tx_max_coalesced_frames, 464 1, U16_MAX); 465 466 /* Rx and Tx are mapped to same CQ, check which one 467 * is changed, if both then choose the min. 468 */ 469 if (hw->cq_ecount_wait == ec->rx_max_coalesced_frames) 470 hw->cq_ecount_wait = ec->tx_max_coalesced_frames; 471 else if (hw->cq_ecount_wait == ec->tx_max_coalesced_frames) 472 hw->cq_ecount_wait = ec->rx_max_coalesced_frames; 473 else 474 hw->cq_ecount_wait = min_t(u16, ec->rx_max_coalesced_frames, 475 ec->tx_max_coalesced_frames); 476 477 if (netif_running(netdev)) { 478 for (qidx = 0; qidx < pfvf->hw.cint_cnt; qidx++) 479 otx2_config_irq_coalescing(pfvf, qidx); 480 } 481 482 return 0; 483 } 484 485 static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf, 486 struct ethtool_rxnfc *nfc) 487 { 488 struct otx2_rss_info *rss = &pfvf->hw.rss_info; 489 490 if (!(rss->flowkey_cfg & 491 (NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6))) 492 return 0; 493 494 /* Mimimum is IPv4 and IPv6, SIP/DIP */ 495 nfc->data = RXH_IP_SRC | RXH_IP_DST; 496 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_VLAN) 497 nfc->data |= RXH_VLAN; 498 499 switch (nfc->flow_type) { 500 case TCP_V4_FLOW: 501 case TCP_V6_FLOW: 502 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_TCP) 503 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 504 break; 505 case UDP_V4_FLOW: 506 case UDP_V6_FLOW: 507 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_UDP) 508 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 509 break; 510 case SCTP_V4_FLOW: 511 case SCTP_V6_FLOW: 512 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_SCTP) 513 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 514 break; 515 case AH_ESP_V4_FLOW: 516 case AH_ESP_V6_FLOW: 517 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_ESP) 518 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 519 break; 520 case AH_V4_FLOW: 521 case ESP_V4_FLOW: 522 case IPV4_FLOW: 523 break; 524 case AH_V6_FLOW: 525 case ESP_V6_FLOW: 526 case IPV6_FLOW: 527 break; 528 default: 529 return -EINVAL; 530 } 531 532 return 0; 533 } 534 535 static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf, 536 struct ethtool_rxnfc *nfc) 537 { 538 struct otx2_rss_info *rss = &pfvf->hw.rss_info; 539 u32 rxh_l4 = RXH_L4_B_0_1 | RXH_L4_B_2_3; 540 u32 rss_cfg = rss->flowkey_cfg; 541 542 if (!rss->enable) { 543 netdev_err(pfvf->netdev, 544 "RSS is disabled, cannot change settings\n"); 545 return -EIO; 546 } 547 548 /* Mimimum is IPv4 and IPv6, SIP/DIP */ 549 if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST)) 550 return -EINVAL; 551 552 if (nfc->data & RXH_VLAN) 553 rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN; 554 else 555 rss_cfg &= ~NIX_FLOW_KEY_TYPE_VLAN; 556 557 switch (nfc->flow_type) { 558 case TCP_V4_FLOW: 559 case TCP_V6_FLOW: 560 /* Different config for v4 and v6 is not supported. 561 * Both of them have to be either 4-tuple or 2-tuple. 562 */ 563 switch (nfc->data & rxh_l4) { 564 case 0: 565 rss_cfg &= ~NIX_FLOW_KEY_TYPE_TCP; 566 break; 567 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 568 rss_cfg |= NIX_FLOW_KEY_TYPE_TCP; 569 break; 570 default: 571 return -EINVAL; 572 } 573 break; 574 case UDP_V4_FLOW: 575 case UDP_V6_FLOW: 576 switch (nfc->data & rxh_l4) { 577 case 0: 578 rss_cfg &= ~NIX_FLOW_KEY_TYPE_UDP; 579 break; 580 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 581 rss_cfg |= NIX_FLOW_KEY_TYPE_UDP; 582 break; 583 default: 584 return -EINVAL; 585 } 586 break; 587 case SCTP_V4_FLOW: 588 case SCTP_V6_FLOW: 589 switch (nfc->data & rxh_l4) { 590 case 0: 591 rss_cfg &= ~NIX_FLOW_KEY_TYPE_SCTP; 592 break; 593 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 594 rss_cfg |= NIX_FLOW_KEY_TYPE_SCTP; 595 break; 596 default: 597 return -EINVAL; 598 } 599 break; 600 case AH_ESP_V4_FLOW: 601 case AH_ESP_V6_FLOW: 602 switch (nfc->data & rxh_l4) { 603 case 0: 604 rss_cfg &= ~(NIX_FLOW_KEY_TYPE_ESP | 605 NIX_FLOW_KEY_TYPE_AH); 606 rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN | 607 NIX_FLOW_KEY_TYPE_IPV4_PROTO; 608 break; 609 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 610 /* If VLAN hashing is also requested for ESP then do not 611 * allow because of hardware 40 bytes flow key limit. 612 */ 613 if (rss_cfg & NIX_FLOW_KEY_TYPE_VLAN) { 614 netdev_err(pfvf->netdev, 615 "RSS hash of ESP or AH with VLAN is not supported\n"); 616 return -EOPNOTSUPP; 617 } 618 619 rss_cfg |= NIX_FLOW_KEY_TYPE_ESP | NIX_FLOW_KEY_TYPE_AH; 620 /* Disable IPv4 proto hashing since IPv6 SA+DA(32 bytes) 621 * and ESP SPI+sequence(8 bytes) uses hardware maximum 622 * limit of 40 byte flow key. 623 */ 624 rss_cfg &= ~NIX_FLOW_KEY_TYPE_IPV4_PROTO; 625 break; 626 default: 627 return -EINVAL; 628 } 629 break; 630 case IPV4_FLOW: 631 case IPV6_FLOW: 632 rss_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; 633 break; 634 default: 635 return -EINVAL; 636 } 637 638 rss->flowkey_cfg = rss_cfg; 639 otx2_set_flowkey_cfg(pfvf); 640 return 0; 641 } 642 643 static int otx2_get_rxnfc(struct net_device *dev, 644 struct ethtool_rxnfc *nfc, u32 *rules) 645 { 646 bool ntuple = !!(dev->features & NETIF_F_NTUPLE); 647 struct otx2_nic *pfvf = netdev_priv(dev); 648 int ret = -EOPNOTSUPP; 649 650 switch (nfc->cmd) { 651 case ETHTOOL_GRXRINGS: 652 nfc->data = pfvf->hw.rx_queues; 653 ret = 0; 654 break; 655 case ETHTOOL_GRXCLSRLCNT: 656 if (netif_running(dev) && ntuple) { 657 nfc->rule_cnt = pfvf->flow_cfg->nr_flows; 658 ret = 0; 659 } 660 break; 661 case ETHTOOL_GRXCLSRULE: 662 if (netif_running(dev) && ntuple) 663 ret = otx2_get_flow(pfvf, nfc, nfc->fs.location); 664 break; 665 case ETHTOOL_GRXCLSRLALL: 666 if (netif_running(dev) && ntuple) 667 ret = otx2_get_all_flows(pfvf, nfc, rules); 668 break; 669 case ETHTOOL_GRXFH: 670 return otx2_get_rss_hash_opts(pfvf, nfc); 671 default: 672 break; 673 } 674 return ret; 675 } 676 677 static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc) 678 { 679 bool ntuple = !!(dev->features & NETIF_F_NTUPLE); 680 struct otx2_nic *pfvf = netdev_priv(dev); 681 int ret = -EOPNOTSUPP; 682 683 switch (nfc->cmd) { 684 case ETHTOOL_SRXFH: 685 ret = otx2_set_rss_hash_opts(pfvf, nfc); 686 break; 687 case ETHTOOL_SRXCLSRLINS: 688 if (netif_running(dev) && ntuple) 689 ret = otx2_add_flow(pfvf, nfc); 690 break; 691 case ETHTOOL_SRXCLSRLDEL: 692 if (netif_running(dev) && ntuple) 693 ret = otx2_remove_flow(pfvf, nfc->fs.location); 694 break; 695 default: 696 break; 697 } 698 699 return ret; 700 } 701 702 static u32 otx2_get_rxfh_key_size(struct net_device *netdev) 703 { 704 struct otx2_nic *pfvf = netdev_priv(netdev); 705 struct otx2_rss_info *rss; 706 707 rss = &pfvf->hw.rss_info; 708 709 return sizeof(rss->key); 710 } 711 712 static u32 otx2_get_rxfh_indir_size(struct net_device *dev) 713 { 714 return MAX_RSS_INDIR_TBL_SIZE; 715 } 716 717 static int otx2_rss_ctx_delete(struct otx2_nic *pfvf, int ctx_id) 718 { 719 struct otx2_rss_info *rss = &pfvf->hw.rss_info; 720 721 otx2_rss_ctx_flow_del(pfvf, ctx_id); 722 kfree(rss->rss_ctx[ctx_id]); 723 rss->rss_ctx[ctx_id] = NULL; 724 725 return 0; 726 } 727 728 static int otx2_rss_ctx_create(struct otx2_nic *pfvf, 729 u32 *rss_context) 730 { 731 struct otx2_rss_info *rss = &pfvf->hw.rss_info; 732 u8 ctx; 733 734 for (ctx = 0; ctx < MAX_RSS_GROUPS; ctx++) { 735 if (!rss->rss_ctx[ctx]) 736 break; 737 } 738 if (ctx == MAX_RSS_GROUPS) 739 return -EINVAL; 740 741 rss->rss_ctx[ctx] = kzalloc(sizeof(*rss->rss_ctx[ctx]), GFP_KERNEL); 742 if (!rss->rss_ctx[ctx]) 743 return -ENOMEM; 744 *rss_context = ctx; 745 746 return 0; 747 } 748 749 /* RSS context configuration */ 750 static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir, 751 const u8 *hkey, const u8 hfunc, 752 u32 *rss_context, bool delete) 753 { 754 struct otx2_nic *pfvf = netdev_priv(dev); 755 struct otx2_rss_ctx *rss_ctx; 756 struct otx2_rss_info *rss; 757 int ret, idx; 758 759 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 760 return -EOPNOTSUPP; 761 762 if (*rss_context != ETH_RXFH_CONTEXT_ALLOC && 763 *rss_context >= MAX_RSS_GROUPS) 764 return -EINVAL; 765 766 rss = &pfvf->hw.rss_info; 767 768 if (!rss->enable) { 769 netdev_err(dev, "RSS is disabled, cannot change settings\n"); 770 return -EIO; 771 } 772 773 if (hkey) { 774 memcpy(rss->key, hkey, sizeof(rss->key)); 775 otx2_set_rss_key(pfvf); 776 } 777 if (delete) 778 return otx2_rss_ctx_delete(pfvf, *rss_context); 779 780 if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) { 781 ret = otx2_rss_ctx_create(pfvf, rss_context); 782 if (ret) 783 return ret; 784 } 785 if (indir) { 786 rss_ctx = rss->rss_ctx[*rss_context]; 787 for (idx = 0; idx < rss->rss_size; idx++) 788 rss_ctx->ind_tbl[idx] = indir[idx]; 789 } 790 otx2_set_rss_table(pfvf, *rss_context); 791 792 return 0; 793 } 794 795 static int otx2_get_rxfh_context(struct net_device *dev, u32 *indir, 796 u8 *hkey, u8 *hfunc, u32 rss_context) 797 { 798 struct otx2_nic *pfvf = netdev_priv(dev); 799 struct otx2_rss_ctx *rss_ctx; 800 struct otx2_rss_info *rss; 801 int idx, rx_queues; 802 803 rss = &pfvf->hw.rss_info; 804 805 if (hfunc) 806 *hfunc = ETH_RSS_HASH_TOP; 807 808 if (!indir) 809 return 0; 810 811 if (!rss->enable && rss_context == DEFAULT_RSS_CONTEXT_GROUP) { 812 rx_queues = pfvf->hw.rx_queues; 813 for (idx = 0; idx < MAX_RSS_INDIR_TBL_SIZE; idx++) 814 indir[idx] = ethtool_rxfh_indir_default(idx, rx_queues); 815 return 0; 816 } 817 if (rss_context >= MAX_RSS_GROUPS) 818 return -ENOENT; 819 820 rss_ctx = rss->rss_ctx[rss_context]; 821 if (!rss_ctx) 822 return -ENOENT; 823 824 if (indir) { 825 for (idx = 0; idx < rss->rss_size; idx++) 826 indir[idx] = rss_ctx->ind_tbl[idx]; 827 } 828 if (hkey) 829 memcpy(hkey, rss->key, sizeof(rss->key)); 830 831 return 0; 832 } 833 834 /* Get RSS configuration */ 835 static int otx2_get_rxfh(struct net_device *dev, u32 *indir, 836 u8 *hkey, u8 *hfunc) 837 { 838 return otx2_get_rxfh_context(dev, indir, hkey, hfunc, 839 DEFAULT_RSS_CONTEXT_GROUP); 840 } 841 842 /* Configure RSS table and hash key */ 843 static int otx2_set_rxfh(struct net_device *dev, const u32 *indir, 844 const u8 *hkey, const u8 hfunc) 845 { 846 847 u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP; 848 849 return otx2_set_rxfh_context(dev, indir, hkey, hfunc, &rss_context, 0); 850 } 851 852 static u32 otx2_get_msglevel(struct net_device *netdev) 853 { 854 struct otx2_nic *pfvf = netdev_priv(netdev); 855 856 return pfvf->msg_enable; 857 } 858 859 static void otx2_set_msglevel(struct net_device *netdev, u32 val) 860 { 861 struct otx2_nic *pfvf = netdev_priv(netdev); 862 863 pfvf->msg_enable = val; 864 } 865 866 static u32 otx2_get_link(struct net_device *netdev) 867 { 868 struct otx2_nic *pfvf = netdev_priv(netdev); 869 870 /* LBK link is internal and always UP */ 871 if (is_otx2_lbkvf(pfvf->pdev)) 872 return 1; 873 return pfvf->linfo.link_up; 874 } 875 876 static int otx2_get_ts_info(struct net_device *netdev, 877 struct ethtool_ts_info *info) 878 { 879 struct otx2_nic *pfvf = netdev_priv(netdev); 880 881 if (!pfvf->ptp) 882 return ethtool_op_get_ts_info(netdev, info); 883 884 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 885 SOF_TIMESTAMPING_RX_SOFTWARE | 886 SOF_TIMESTAMPING_SOFTWARE | 887 SOF_TIMESTAMPING_TX_HARDWARE | 888 SOF_TIMESTAMPING_RX_HARDWARE | 889 SOF_TIMESTAMPING_RAW_HARDWARE; 890 891 info->phc_index = otx2_ptp_clock_index(pfvf); 892 893 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 894 895 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 896 (1 << HWTSTAMP_FILTER_ALL); 897 898 return 0; 899 } 900 901 static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf) 902 { 903 struct cgx_fw_data *rsp = NULL; 904 struct msg_req *req; 905 int err = 0; 906 907 mutex_lock(&pfvf->mbox.lock); 908 req = otx2_mbox_alloc_msg_cgx_get_aux_link_info(&pfvf->mbox); 909 if (!req) { 910 mutex_unlock(&pfvf->mbox.lock); 911 return ERR_PTR(-ENOMEM); 912 } 913 914 err = otx2_sync_mbox_msg(&pfvf->mbox); 915 if (!err) { 916 rsp = (struct cgx_fw_data *) 917 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); 918 } else { 919 rsp = ERR_PTR(err); 920 } 921 922 mutex_unlock(&pfvf->mbox.lock); 923 return rsp; 924 } 925 926 static int otx2_get_fecparam(struct net_device *netdev, 927 struct ethtool_fecparam *fecparam) 928 { 929 struct otx2_nic *pfvf = netdev_priv(netdev); 930 struct cgx_fw_data *rsp; 931 const int fec[] = { 932 ETHTOOL_FEC_OFF, 933 ETHTOOL_FEC_BASER, 934 ETHTOOL_FEC_RS, 935 ETHTOOL_FEC_BASER | ETHTOOL_FEC_RS}; 936 #define FEC_MAX_INDEX 4 937 if (pfvf->linfo.fec < FEC_MAX_INDEX) 938 fecparam->active_fec = fec[pfvf->linfo.fec]; 939 940 rsp = otx2_get_fwdata(pfvf); 941 if (IS_ERR(rsp)) 942 return PTR_ERR(rsp); 943 944 if (rsp->fwdata.supported_fec < FEC_MAX_INDEX) { 945 if (!rsp->fwdata.supported_fec) 946 fecparam->fec = ETHTOOL_FEC_NONE; 947 else 948 fecparam->fec = fec[rsp->fwdata.supported_fec]; 949 } 950 return 0; 951 } 952 953 static int otx2_set_fecparam(struct net_device *netdev, 954 struct ethtool_fecparam *fecparam) 955 { 956 struct otx2_nic *pfvf = netdev_priv(netdev); 957 struct mbox *mbox = &pfvf->mbox; 958 struct fec_mode *req, *rsp; 959 int err = 0, fec = 0; 960 961 switch (fecparam->fec) { 962 /* Firmware does not support AUTO mode consider it as FEC_OFF */ 963 case ETHTOOL_FEC_OFF: 964 case ETHTOOL_FEC_AUTO: 965 fec = OTX2_FEC_OFF; 966 break; 967 case ETHTOOL_FEC_RS: 968 fec = OTX2_FEC_RS; 969 break; 970 case ETHTOOL_FEC_BASER: 971 fec = OTX2_FEC_BASER; 972 break; 973 default: 974 netdev_warn(pfvf->netdev, "Unsupported FEC mode: %d", 975 fecparam->fec); 976 return -EINVAL; 977 } 978 979 if (fec == pfvf->linfo.fec) 980 return 0; 981 982 mutex_lock(&mbox->lock); 983 req = otx2_mbox_alloc_msg_cgx_set_fec_param(&pfvf->mbox); 984 if (!req) { 985 err = -ENOMEM; 986 goto end; 987 } 988 req->fec = fec; 989 err = otx2_sync_mbox_msg(&pfvf->mbox); 990 if (err) 991 goto end; 992 993 rsp = (struct fec_mode *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 994 0, &req->hdr); 995 if (rsp->fec >= 0) 996 pfvf->linfo.fec = rsp->fec; 997 else 998 err = rsp->fec; 999 end: 1000 mutex_unlock(&mbox->lock); 1001 return err; 1002 } 1003 1004 static void otx2_get_fec_info(u64 index, int req_mode, 1005 struct ethtool_link_ksettings *link_ksettings) 1006 { 1007 __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_fec_modes) = { 0, }; 1008 1009 switch (index) { 1010 case OTX2_FEC_NONE: 1011 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 1012 otx2_fec_modes); 1013 break; 1014 case OTX2_FEC_BASER: 1015 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 1016 otx2_fec_modes); 1017 break; 1018 case OTX2_FEC_RS: 1019 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 1020 otx2_fec_modes); 1021 break; 1022 case OTX2_FEC_BASER | OTX2_FEC_RS: 1023 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 1024 otx2_fec_modes); 1025 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 1026 otx2_fec_modes); 1027 break; 1028 } 1029 1030 /* Add fec modes to existing modes */ 1031 if (req_mode == OTX2_MODE_ADVERTISED) 1032 linkmode_or(link_ksettings->link_modes.advertising, 1033 link_ksettings->link_modes.advertising, 1034 otx2_fec_modes); 1035 else 1036 linkmode_or(link_ksettings->link_modes.supported, 1037 link_ksettings->link_modes.supported, 1038 otx2_fec_modes); 1039 } 1040 1041 static void otx2_get_link_mode_info(u64 link_mode_bmap, 1042 bool req_mode, 1043 struct ethtool_link_ksettings 1044 *link_ksettings) 1045 { 1046 __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_link_modes) = { 0, }; 1047 const int otx2_sgmii_features[6] = { 1048 ETHTOOL_LINK_MODE_10baseT_Half_BIT, 1049 ETHTOOL_LINK_MODE_10baseT_Full_BIT, 1050 ETHTOOL_LINK_MODE_100baseT_Half_BIT, 1051 ETHTOOL_LINK_MODE_100baseT_Full_BIT, 1052 ETHTOOL_LINK_MODE_1000baseT_Half_BIT, 1053 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 1054 }; 1055 /* CGX link modes to Ethtool link mode mapping */ 1056 const int cgx_link_mode[27] = { 1057 0, /* SGMII Mode */ 1058 ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 1059 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 1060 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 1061 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 1062 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 1063 0, 1064 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 1065 0, 1066 0, 1067 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 1068 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 1069 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 1070 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 1071 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 1072 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 1073 0, 1074 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 1075 0, 1076 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 1077 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 1078 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 1079 0, 1080 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 1081 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 1082 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 1083 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 1084 }; 1085 u8 bit; 1086 1087 for_each_set_bit(bit, (unsigned long *)&link_mode_bmap, 27) { 1088 /* SGMII mode is set */ 1089 if (bit == 0) 1090 linkmode_set_bit_array(otx2_sgmii_features, 1091 ARRAY_SIZE(otx2_sgmii_features), 1092 otx2_link_modes); 1093 else 1094 linkmode_set_bit(cgx_link_mode[bit], otx2_link_modes); 1095 } 1096 1097 if (req_mode == OTX2_MODE_ADVERTISED) 1098 linkmode_copy(link_ksettings->link_modes.advertising, 1099 otx2_link_modes); 1100 else 1101 linkmode_copy(link_ksettings->link_modes.supported, 1102 otx2_link_modes); 1103 } 1104 1105 static int otx2_get_link_ksettings(struct net_device *netdev, 1106 struct ethtool_link_ksettings *cmd) 1107 { 1108 struct otx2_nic *pfvf = netdev_priv(netdev); 1109 struct cgx_fw_data *rsp = NULL; 1110 1111 cmd->base.duplex = pfvf->linfo.full_duplex; 1112 cmd->base.speed = pfvf->linfo.speed; 1113 cmd->base.autoneg = pfvf->linfo.an; 1114 1115 rsp = otx2_get_fwdata(pfvf); 1116 if (IS_ERR(rsp)) 1117 return PTR_ERR(rsp); 1118 1119 if (rsp->fwdata.supported_an) 1120 ethtool_link_ksettings_add_link_mode(cmd, 1121 supported, 1122 Autoneg); 1123 1124 otx2_get_link_mode_info(rsp->fwdata.advertised_link_modes, 1125 OTX2_MODE_ADVERTISED, cmd); 1126 otx2_get_fec_info(rsp->fwdata.advertised_fec, 1127 OTX2_MODE_ADVERTISED, cmd); 1128 otx2_get_link_mode_info(rsp->fwdata.supported_link_modes, 1129 OTX2_MODE_SUPPORTED, cmd); 1130 otx2_get_fec_info(rsp->fwdata.supported_fec, 1131 OTX2_MODE_SUPPORTED, cmd); 1132 return 0; 1133 } 1134 1135 static void otx2_get_advertised_mode(const struct ethtool_link_ksettings *cmd, 1136 u64 *mode) 1137 { 1138 u32 bit_pos; 1139 1140 /* Firmware does not support requesting multiple advertised modes 1141 * return first set bit 1142 */ 1143 bit_pos = find_first_bit(cmd->link_modes.advertising, 1144 __ETHTOOL_LINK_MODE_MASK_NBITS); 1145 if (bit_pos != __ETHTOOL_LINK_MODE_MASK_NBITS) 1146 *mode = bit_pos; 1147 } 1148 1149 static int otx2_set_link_ksettings(struct net_device *netdev, 1150 const struct ethtool_link_ksettings *cmd) 1151 { 1152 struct otx2_nic *pf = netdev_priv(netdev); 1153 struct ethtool_link_ksettings cur_ks; 1154 struct cgx_set_link_mode_req *req; 1155 struct mbox *mbox = &pf->mbox; 1156 int err = 0; 1157 1158 memset(&cur_ks, 0, sizeof(struct ethtool_link_ksettings)); 1159 1160 if (!ethtool_validate_speed(cmd->base.speed) || 1161 !ethtool_validate_duplex(cmd->base.duplex)) 1162 return -EINVAL; 1163 1164 if (cmd->base.autoneg != AUTONEG_ENABLE && 1165 cmd->base.autoneg != AUTONEG_DISABLE) 1166 return -EINVAL; 1167 1168 otx2_get_link_ksettings(netdev, &cur_ks); 1169 1170 /* Check requested modes against supported modes by hardware */ 1171 if (!bitmap_subset(cmd->link_modes.advertising, 1172 cur_ks.link_modes.supported, 1173 __ETHTOOL_LINK_MODE_MASK_NBITS)) 1174 return -EINVAL; 1175 1176 mutex_lock(&mbox->lock); 1177 req = otx2_mbox_alloc_msg_cgx_set_link_mode(&pf->mbox); 1178 if (!req) { 1179 err = -ENOMEM; 1180 goto end; 1181 } 1182 1183 req->args.speed = cmd->base.speed; 1184 /* firmware expects 1 for half duplex and 0 for full duplex 1185 * hence inverting 1186 */ 1187 req->args.duplex = cmd->base.duplex ^ 0x1; 1188 req->args.an = cmd->base.autoneg; 1189 otx2_get_advertised_mode(cmd, &req->args.mode); 1190 1191 err = otx2_sync_mbox_msg(&pf->mbox); 1192 end: 1193 mutex_unlock(&mbox->lock); 1194 return err; 1195 } 1196 1197 static const struct ethtool_ops otx2_ethtool_ops = { 1198 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 1199 ETHTOOL_COALESCE_MAX_FRAMES, 1200 .get_link = otx2_get_link, 1201 .get_drvinfo = otx2_get_drvinfo, 1202 .get_strings = otx2_get_strings, 1203 .get_ethtool_stats = otx2_get_ethtool_stats, 1204 .get_sset_count = otx2_get_sset_count, 1205 .set_channels = otx2_set_channels, 1206 .get_channels = otx2_get_channels, 1207 .get_ringparam = otx2_get_ringparam, 1208 .set_ringparam = otx2_set_ringparam, 1209 .get_coalesce = otx2_get_coalesce, 1210 .set_coalesce = otx2_set_coalesce, 1211 .get_rxnfc = otx2_get_rxnfc, 1212 .set_rxnfc = otx2_set_rxnfc, 1213 .get_rxfh_key_size = otx2_get_rxfh_key_size, 1214 .get_rxfh_indir_size = otx2_get_rxfh_indir_size, 1215 .get_rxfh = otx2_get_rxfh, 1216 .set_rxfh = otx2_set_rxfh, 1217 .get_rxfh_context = otx2_get_rxfh_context, 1218 .set_rxfh_context = otx2_set_rxfh_context, 1219 .get_msglevel = otx2_get_msglevel, 1220 .set_msglevel = otx2_set_msglevel, 1221 .get_pauseparam = otx2_get_pauseparam, 1222 .set_pauseparam = otx2_set_pauseparam, 1223 .get_ts_info = otx2_get_ts_info, 1224 .get_fecparam = otx2_get_fecparam, 1225 .set_fecparam = otx2_set_fecparam, 1226 .get_link_ksettings = otx2_get_link_ksettings, 1227 .set_link_ksettings = otx2_set_link_ksettings, 1228 }; 1229 1230 void otx2_set_ethtool_ops(struct net_device *netdev) 1231 { 1232 netdev->ethtool_ops = &otx2_ethtool_ops; 1233 } 1234 1235 /* VF's ethtool APIs */ 1236 static void otx2vf_get_drvinfo(struct net_device *netdev, 1237 struct ethtool_drvinfo *info) 1238 { 1239 struct otx2_nic *vf = netdev_priv(netdev); 1240 1241 strlcpy(info->driver, DRV_VF_NAME, sizeof(info->driver)); 1242 strlcpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info)); 1243 } 1244 1245 static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data) 1246 { 1247 struct otx2_nic *vf = netdev_priv(netdev); 1248 int stats; 1249 1250 if (sset != ETH_SS_STATS) 1251 return; 1252 1253 for (stats = 0; stats < otx2_n_dev_stats; stats++) { 1254 memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN); 1255 data += ETH_GSTRING_LEN; 1256 } 1257 1258 for (stats = 0; stats < otx2_n_drv_stats; stats++) { 1259 memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN); 1260 data += ETH_GSTRING_LEN; 1261 } 1262 1263 otx2_get_qset_strings(vf, &data, 0); 1264 1265 strcpy(data, "reset_count"); 1266 data += ETH_GSTRING_LEN; 1267 } 1268 1269 static void otx2vf_get_ethtool_stats(struct net_device *netdev, 1270 struct ethtool_stats *stats, u64 *data) 1271 { 1272 struct otx2_nic *vf = netdev_priv(netdev); 1273 int stat; 1274 1275 otx2_get_dev_stats(vf); 1276 for (stat = 0; stat < otx2_n_dev_stats; stat++) 1277 *(data++) = ((u64 *)&vf->hw.dev_stats) 1278 [otx2_dev_stats[stat].index]; 1279 1280 for (stat = 0; stat < otx2_n_drv_stats; stat++) 1281 *(data++) = atomic_read(&((atomic_t *)&vf->hw.drv_stats) 1282 [otx2_drv_stats[stat].index]); 1283 1284 otx2_get_qset_stats(vf, stats, &data); 1285 *(data++) = vf->reset_count; 1286 } 1287 1288 static int otx2vf_get_sset_count(struct net_device *netdev, int sset) 1289 { 1290 struct otx2_nic *vf = netdev_priv(netdev); 1291 int qstats_count; 1292 1293 if (sset != ETH_SS_STATS) 1294 return -EINVAL; 1295 1296 qstats_count = otx2_n_queue_stats * 1297 (vf->hw.rx_queues + vf->hw.tx_queues); 1298 1299 return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 1; 1300 } 1301 1302 static int otx2vf_get_link_ksettings(struct net_device *netdev, 1303 struct ethtool_link_ksettings *cmd) 1304 { 1305 struct otx2_nic *pfvf = netdev_priv(netdev); 1306 1307 if (is_otx2_lbkvf(pfvf->pdev)) { 1308 cmd->base.duplex = DUPLEX_FULL; 1309 cmd->base.speed = SPEED_100000; 1310 } else { 1311 return otx2_get_link_ksettings(netdev, cmd); 1312 } 1313 return 0; 1314 } 1315 1316 static const struct ethtool_ops otx2vf_ethtool_ops = { 1317 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 1318 ETHTOOL_COALESCE_MAX_FRAMES, 1319 .get_link = otx2_get_link, 1320 .get_drvinfo = otx2vf_get_drvinfo, 1321 .get_strings = otx2vf_get_strings, 1322 .get_ethtool_stats = otx2vf_get_ethtool_stats, 1323 .get_sset_count = otx2vf_get_sset_count, 1324 .set_channels = otx2_set_channels, 1325 .get_channels = otx2_get_channels, 1326 .get_rxnfc = otx2_get_rxnfc, 1327 .set_rxnfc = otx2_set_rxnfc, 1328 .get_rxfh_key_size = otx2_get_rxfh_key_size, 1329 .get_rxfh_indir_size = otx2_get_rxfh_indir_size, 1330 .get_rxfh = otx2_get_rxfh, 1331 .set_rxfh = otx2_set_rxfh, 1332 .get_rxfh_context = otx2_get_rxfh_context, 1333 .set_rxfh_context = otx2_set_rxfh_context, 1334 .get_ringparam = otx2_get_ringparam, 1335 .set_ringparam = otx2_set_ringparam, 1336 .get_coalesce = otx2_get_coalesce, 1337 .set_coalesce = otx2_set_coalesce, 1338 .get_msglevel = otx2_get_msglevel, 1339 .set_msglevel = otx2_set_msglevel, 1340 .get_pauseparam = otx2_get_pauseparam, 1341 .set_pauseparam = otx2_set_pauseparam, 1342 .get_link_ksettings = otx2vf_get_link_ksettings, 1343 }; 1344 1345 void otx2vf_set_ethtool_ops(struct net_device *netdev) 1346 { 1347 netdev->ethtool_ops = &otx2vf_ethtool_ops; 1348 } 1349 EXPORT_SYMBOL(otx2vf_set_ethtool_ops); 1350