1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Ethernet driver 3 * 4 * Copyright (C) 2020 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/pci.h> 12 #include <linux/ethtool.h> 13 #include <linux/stddef.h> 14 #include <linux/etherdevice.h> 15 #include <linux/log2.h> 16 #include <linux/net_tstamp.h> 17 #include <linux/linkmode.h> 18 19 #include "otx2_common.h" 20 #include "otx2_ptp.h" 21 22 #define DRV_NAME "octeontx2-nicpf" 23 #define DRV_VF_NAME "octeontx2-nicvf" 24 25 struct otx2_stat { 26 char name[ETH_GSTRING_LEN]; 27 unsigned int index; 28 }; 29 30 /* HW device stats */ 31 #define OTX2_DEV_STAT(stat) { \ 32 .name = #stat, \ 33 .index = offsetof(struct otx2_dev_stats, stat) / sizeof(u64), \ 34 } 35 36 enum link_mode { 37 OTX2_MODE_SUPPORTED, 38 OTX2_MODE_ADVERTISED 39 }; 40 41 static const struct otx2_stat otx2_dev_stats[] = { 42 OTX2_DEV_STAT(rx_ucast_frames), 43 OTX2_DEV_STAT(rx_bcast_frames), 44 OTX2_DEV_STAT(rx_mcast_frames), 45 46 OTX2_DEV_STAT(tx_ucast_frames), 47 OTX2_DEV_STAT(tx_bcast_frames), 48 OTX2_DEV_STAT(tx_mcast_frames), 49 }; 50 51 /* Driver level stats */ 52 #define OTX2_DRV_STAT(stat) { \ 53 .name = #stat, \ 54 .index = offsetof(struct otx2_drv_stats, stat) / sizeof(atomic_t), \ 55 } 56 57 static const struct otx2_stat otx2_drv_stats[] = { 58 OTX2_DRV_STAT(rx_fcs_errs), 59 OTX2_DRV_STAT(rx_oversize_errs), 60 OTX2_DRV_STAT(rx_undersize_errs), 61 OTX2_DRV_STAT(rx_csum_errs), 62 OTX2_DRV_STAT(rx_len_errs), 63 OTX2_DRV_STAT(rx_other_errs), 64 }; 65 66 static const struct otx2_stat otx2_queue_stats[] = { 67 { "bytes", 0 }, 68 { "frames", 1 }, 69 }; 70 71 static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats); 72 static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats); 73 static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats); 74 75 static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf); 76 77 static void otx2_get_drvinfo(struct net_device *netdev, 78 struct ethtool_drvinfo *info) 79 { 80 struct otx2_nic *pfvf = netdev_priv(netdev); 81 82 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 83 strlcpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info)); 84 } 85 86 static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset) 87 { 88 int start_qidx = qset * pfvf->hw.rx_queues; 89 int qidx, stats; 90 91 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { 92 for (stats = 0; stats < otx2_n_queue_stats; stats++) { 93 sprintf(*data, "rxq%d: %s", qidx + start_qidx, 94 otx2_queue_stats[stats].name); 95 *data += ETH_GSTRING_LEN; 96 } 97 } 98 for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { 99 for (stats = 0; stats < otx2_n_queue_stats; stats++) { 100 sprintf(*data, "txq%d: %s", qidx + start_qidx, 101 otx2_queue_stats[stats].name); 102 *data += ETH_GSTRING_LEN; 103 } 104 } 105 } 106 107 static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data) 108 { 109 struct otx2_nic *pfvf = netdev_priv(netdev); 110 int stats; 111 112 if (sset != ETH_SS_STATS) 113 return; 114 115 for (stats = 0; stats < otx2_n_dev_stats; stats++) { 116 memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN); 117 data += ETH_GSTRING_LEN; 118 } 119 120 for (stats = 0; stats < otx2_n_drv_stats; stats++) { 121 memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN); 122 data += ETH_GSTRING_LEN; 123 } 124 125 otx2_get_qset_strings(pfvf, &data, 0); 126 127 for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) { 128 sprintf(data, "cgx_rxstat%d: ", stats); 129 data += ETH_GSTRING_LEN; 130 } 131 132 for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) { 133 sprintf(data, "cgx_txstat%d: ", stats); 134 data += ETH_GSTRING_LEN; 135 } 136 137 strcpy(data, "reset_count"); 138 data += ETH_GSTRING_LEN; 139 sprintf(data, "Fec Corrected Errors: "); 140 data += ETH_GSTRING_LEN; 141 sprintf(data, "Fec Uncorrected Errors: "); 142 data += ETH_GSTRING_LEN; 143 } 144 145 static void otx2_get_qset_stats(struct otx2_nic *pfvf, 146 struct ethtool_stats *stats, u64 **data) 147 { 148 int stat, qidx; 149 150 if (!pfvf) 151 return; 152 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { 153 if (!otx2_update_rq_stats(pfvf, qidx)) { 154 for (stat = 0; stat < otx2_n_queue_stats; stat++) 155 *((*data)++) = 0; 156 continue; 157 } 158 for (stat = 0; stat < otx2_n_queue_stats; stat++) 159 *((*data)++) = ((u64 *)&pfvf->qset.rq[qidx].stats) 160 [otx2_queue_stats[stat].index]; 161 } 162 163 for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { 164 if (!otx2_update_sq_stats(pfvf, qidx)) { 165 for (stat = 0; stat < otx2_n_queue_stats; stat++) 166 *((*data)++) = 0; 167 continue; 168 } 169 for (stat = 0; stat < otx2_n_queue_stats; stat++) 170 *((*data)++) = ((u64 *)&pfvf->qset.sq[qidx].stats) 171 [otx2_queue_stats[stat].index]; 172 } 173 } 174 175 static int otx2_get_phy_fec_stats(struct otx2_nic *pfvf) 176 { 177 struct msg_req *req; 178 int rc = -ENOMEM; 179 180 mutex_lock(&pfvf->mbox.lock); 181 req = otx2_mbox_alloc_msg_cgx_get_phy_fec_stats(&pfvf->mbox); 182 if (!req) 183 goto end; 184 185 if (!otx2_sync_mbox_msg(&pfvf->mbox)) 186 rc = 0; 187 end: 188 mutex_unlock(&pfvf->mbox.lock); 189 return rc; 190 } 191 192 /* Get device and per queue statistics */ 193 static void otx2_get_ethtool_stats(struct net_device *netdev, 194 struct ethtool_stats *stats, u64 *data) 195 { 196 struct otx2_nic *pfvf = netdev_priv(netdev); 197 u64 fec_corr_blks, fec_uncorr_blks; 198 struct cgx_fw_data *rsp; 199 int stat; 200 201 otx2_get_dev_stats(pfvf); 202 for (stat = 0; stat < otx2_n_dev_stats; stat++) 203 *(data++) = ((u64 *)&pfvf->hw.dev_stats) 204 [otx2_dev_stats[stat].index]; 205 206 for (stat = 0; stat < otx2_n_drv_stats; stat++) 207 *(data++) = atomic_read(&((atomic_t *)&pfvf->hw.drv_stats) 208 [otx2_drv_stats[stat].index]); 209 210 otx2_get_qset_stats(pfvf, stats, &data); 211 otx2_update_lmac_stats(pfvf); 212 for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++) 213 *(data++) = pfvf->hw.cgx_rx_stats[stat]; 214 for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++) 215 *(data++) = pfvf->hw.cgx_tx_stats[stat]; 216 *(data++) = pfvf->reset_count; 217 218 fec_corr_blks = pfvf->hw.cgx_fec_corr_blks; 219 fec_uncorr_blks = pfvf->hw.cgx_fec_uncorr_blks; 220 221 rsp = otx2_get_fwdata(pfvf); 222 if (!IS_ERR(rsp) && rsp->fwdata.phy.misc.has_fec_stats && 223 !otx2_get_phy_fec_stats(pfvf)) { 224 /* Fetch fwdata again because it's been recently populated with 225 * latest PHY FEC stats. 226 */ 227 rsp = otx2_get_fwdata(pfvf); 228 if (!IS_ERR(rsp)) { 229 struct fec_stats_s *p = &rsp->fwdata.phy.fec_stats; 230 231 if (pfvf->linfo.fec == OTX2_FEC_BASER) { 232 fec_corr_blks = p->brfec_corr_blks; 233 fec_uncorr_blks = p->brfec_uncorr_blks; 234 } else { 235 fec_corr_blks = p->rsfec_corr_cws; 236 fec_uncorr_blks = p->rsfec_uncorr_cws; 237 } 238 } 239 } 240 241 *(data++) = fec_corr_blks; 242 *(data++) = fec_uncorr_blks; 243 } 244 245 static int otx2_get_sset_count(struct net_device *netdev, int sset) 246 { 247 struct otx2_nic *pfvf = netdev_priv(netdev); 248 int qstats_count; 249 250 if (sset != ETH_SS_STATS) 251 return -EINVAL; 252 253 qstats_count = otx2_n_queue_stats * 254 (pfvf->hw.rx_queues + pfvf->hw.tx_queues); 255 otx2_update_lmac_fec_stats(pfvf); 256 257 return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 258 CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + OTX2_FEC_STATS_CNT 259 + 1; 260 } 261 262 /* Get no of queues device supports and current queue count */ 263 static void otx2_get_channels(struct net_device *dev, 264 struct ethtool_channels *channel) 265 { 266 struct otx2_nic *pfvf = netdev_priv(dev); 267 268 channel->max_rx = pfvf->hw.max_queues; 269 channel->max_tx = pfvf->hw.max_queues; 270 271 channel->rx_count = pfvf->hw.rx_queues; 272 channel->tx_count = pfvf->hw.tx_queues; 273 } 274 275 /* Set no of Tx, Rx queues to be used */ 276 static int otx2_set_channels(struct net_device *dev, 277 struct ethtool_channels *channel) 278 { 279 struct otx2_nic *pfvf = netdev_priv(dev); 280 bool if_up = netif_running(dev); 281 int err = 0; 282 283 if (!channel->rx_count || !channel->tx_count) 284 return -EINVAL; 285 286 if (bitmap_weight(&pfvf->rq_bmap, pfvf->hw.rx_queues) > 1) { 287 netdev_err(dev, 288 "Receive queues are in use by TC police action\n"); 289 return -EINVAL; 290 } 291 292 if (if_up) 293 dev->netdev_ops->ndo_stop(dev); 294 295 err = otx2_set_real_num_queues(dev, channel->tx_count, 296 channel->rx_count); 297 if (err) 298 return err; 299 300 pfvf->hw.rx_queues = channel->rx_count; 301 pfvf->hw.tx_queues = channel->tx_count; 302 pfvf->qset.cq_cnt = pfvf->hw.tx_queues + pfvf->hw.rx_queues; 303 304 if (if_up) 305 err = dev->netdev_ops->ndo_open(dev); 306 307 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", 308 pfvf->hw.tx_queues, pfvf->hw.rx_queues); 309 310 return err; 311 } 312 313 static void otx2_get_pauseparam(struct net_device *netdev, 314 struct ethtool_pauseparam *pause) 315 { 316 struct otx2_nic *pfvf = netdev_priv(netdev); 317 struct cgx_pause_frm_cfg *req, *rsp; 318 319 if (is_otx2_lbkvf(pfvf->pdev)) 320 return; 321 322 req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox); 323 if (!req) 324 return; 325 326 if (!otx2_sync_mbox_msg(&pfvf->mbox)) { 327 rsp = (struct cgx_pause_frm_cfg *) 328 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); 329 pause->rx_pause = rsp->rx_pause; 330 pause->tx_pause = rsp->tx_pause; 331 } 332 } 333 334 static int otx2_set_pauseparam(struct net_device *netdev, 335 struct ethtool_pauseparam *pause) 336 { 337 struct otx2_nic *pfvf = netdev_priv(netdev); 338 339 if (pause->autoneg) 340 return -EOPNOTSUPP; 341 342 if (is_otx2_lbkvf(pfvf->pdev)) 343 return -EOPNOTSUPP; 344 345 if (pause->rx_pause) 346 pfvf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED; 347 else 348 pfvf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED; 349 350 if (pause->tx_pause) 351 pfvf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED; 352 else 353 pfvf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED; 354 355 return otx2_config_pause_frm(pfvf); 356 } 357 358 static void otx2_get_ringparam(struct net_device *netdev, 359 struct ethtool_ringparam *ring) 360 { 361 struct otx2_nic *pfvf = netdev_priv(netdev); 362 struct otx2_qset *qs = &pfvf->qset; 363 364 ring->rx_max_pending = Q_COUNT(Q_SIZE_MAX); 365 ring->rx_pending = qs->rqe_cnt ? qs->rqe_cnt : Q_COUNT(Q_SIZE_256); 366 ring->tx_max_pending = Q_COUNT(Q_SIZE_MAX); 367 ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K); 368 } 369 370 static int otx2_set_ringparam(struct net_device *netdev, 371 struct ethtool_ringparam *ring) 372 { 373 struct otx2_nic *pfvf = netdev_priv(netdev); 374 bool if_up = netif_running(netdev); 375 struct otx2_qset *qs = &pfvf->qset; 376 u32 rx_count, tx_count; 377 378 if (ring->rx_mini_pending || ring->rx_jumbo_pending) 379 return -EINVAL; 380 381 /* Permitted lengths are 16 64 256 1K 4K 16K 64K 256K 1M */ 382 rx_count = ring->rx_pending; 383 /* On some silicon variants a skid or reserved CQEs are 384 * needed to avoid CQ overflow. 385 */ 386 if (rx_count < pfvf->hw.rq_skid) 387 rx_count = pfvf->hw.rq_skid; 388 rx_count = Q_COUNT(Q_SIZE(rx_count, 3)); 389 390 /* Due pipelining impact minimum 2000 unused SQ CQE's 391 * need to be maintained to avoid CQ overflow, hence the 392 * minimum 4K size. 393 */ 394 tx_count = clamp_t(u32, ring->tx_pending, 395 Q_COUNT(Q_SIZE_4K), Q_COUNT(Q_SIZE_MAX)); 396 tx_count = Q_COUNT(Q_SIZE(tx_count, 3)); 397 398 if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt) 399 return 0; 400 401 if (if_up) 402 netdev->netdev_ops->ndo_stop(netdev); 403 404 /* Assigned to the nearest possible exponent. */ 405 qs->sqe_cnt = tx_count; 406 qs->rqe_cnt = rx_count; 407 408 if (if_up) 409 return netdev->netdev_ops->ndo_open(netdev); 410 411 return 0; 412 } 413 414 static int otx2_get_coalesce(struct net_device *netdev, 415 struct ethtool_coalesce *cmd, 416 struct kernel_ethtool_coalesce *kernel_coal, 417 struct netlink_ext_ack *extack) 418 { 419 struct otx2_nic *pfvf = netdev_priv(netdev); 420 struct otx2_hw *hw = &pfvf->hw; 421 422 cmd->rx_coalesce_usecs = hw->cq_time_wait; 423 cmd->rx_max_coalesced_frames = hw->cq_ecount_wait; 424 cmd->tx_coalesce_usecs = hw->cq_time_wait; 425 cmd->tx_max_coalesced_frames = hw->cq_ecount_wait; 426 427 return 0; 428 } 429 430 static int otx2_set_coalesce(struct net_device *netdev, 431 struct ethtool_coalesce *ec, 432 struct kernel_ethtool_coalesce *kernel_coal, 433 struct netlink_ext_ack *extack) 434 { 435 struct otx2_nic *pfvf = netdev_priv(netdev); 436 struct otx2_hw *hw = &pfvf->hw; 437 int qidx; 438 439 if (!ec->rx_max_coalesced_frames || !ec->tx_max_coalesced_frames) 440 return 0; 441 442 /* 'cq_time_wait' is 8bit and is in multiple of 100ns, 443 * so clamp the user given value to the range of 1 to 25usec. 444 */ 445 ec->rx_coalesce_usecs = clamp_t(u32, ec->rx_coalesce_usecs, 446 1, CQ_TIMER_THRESH_MAX); 447 ec->tx_coalesce_usecs = clamp_t(u32, ec->tx_coalesce_usecs, 448 1, CQ_TIMER_THRESH_MAX); 449 450 /* Rx and Tx are mapped to same CQ, check which one 451 * is changed, if both then choose the min. 452 */ 453 if (hw->cq_time_wait == ec->rx_coalesce_usecs) 454 hw->cq_time_wait = ec->tx_coalesce_usecs; 455 else if (hw->cq_time_wait == ec->tx_coalesce_usecs) 456 hw->cq_time_wait = ec->rx_coalesce_usecs; 457 else 458 hw->cq_time_wait = min_t(u8, ec->rx_coalesce_usecs, 459 ec->tx_coalesce_usecs); 460 461 /* Max ecount_wait supported is 16bit, 462 * so clamp the user given value to the range of 1 to 64k. 463 */ 464 ec->rx_max_coalesced_frames = clamp_t(u32, ec->rx_max_coalesced_frames, 465 1, U16_MAX); 466 ec->tx_max_coalesced_frames = clamp_t(u32, ec->tx_max_coalesced_frames, 467 1, U16_MAX); 468 469 /* Rx and Tx are mapped to same CQ, check which one 470 * is changed, if both then choose the min. 471 */ 472 if (hw->cq_ecount_wait == ec->rx_max_coalesced_frames) 473 hw->cq_ecount_wait = ec->tx_max_coalesced_frames; 474 else if (hw->cq_ecount_wait == ec->tx_max_coalesced_frames) 475 hw->cq_ecount_wait = ec->rx_max_coalesced_frames; 476 else 477 hw->cq_ecount_wait = min_t(u16, ec->rx_max_coalesced_frames, 478 ec->tx_max_coalesced_frames); 479 480 if (netif_running(netdev)) { 481 for (qidx = 0; qidx < pfvf->hw.cint_cnt; qidx++) 482 otx2_config_irq_coalescing(pfvf, qidx); 483 } 484 485 return 0; 486 } 487 488 static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf, 489 struct ethtool_rxnfc *nfc) 490 { 491 struct otx2_rss_info *rss = &pfvf->hw.rss_info; 492 493 if (!(rss->flowkey_cfg & 494 (NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6))) 495 return 0; 496 497 /* Mimimum is IPv4 and IPv6, SIP/DIP */ 498 nfc->data = RXH_IP_SRC | RXH_IP_DST; 499 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_VLAN) 500 nfc->data |= RXH_VLAN; 501 502 switch (nfc->flow_type) { 503 case TCP_V4_FLOW: 504 case TCP_V6_FLOW: 505 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_TCP) 506 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 507 break; 508 case UDP_V4_FLOW: 509 case UDP_V6_FLOW: 510 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_UDP) 511 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 512 break; 513 case SCTP_V4_FLOW: 514 case SCTP_V6_FLOW: 515 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_SCTP) 516 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 517 break; 518 case AH_ESP_V4_FLOW: 519 case AH_ESP_V6_FLOW: 520 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_ESP) 521 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 522 break; 523 case AH_V4_FLOW: 524 case ESP_V4_FLOW: 525 case IPV4_FLOW: 526 break; 527 case AH_V6_FLOW: 528 case ESP_V6_FLOW: 529 case IPV6_FLOW: 530 break; 531 default: 532 return -EINVAL; 533 } 534 535 return 0; 536 } 537 538 static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf, 539 struct ethtool_rxnfc *nfc) 540 { 541 struct otx2_rss_info *rss = &pfvf->hw.rss_info; 542 u32 rxh_l4 = RXH_L4_B_0_1 | RXH_L4_B_2_3; 543 u32 rss_cfg = rss->flowkey_cfg; 544 545 if (!rss->enable) { 546 netdev_err(pfvf->netdev, 547 "RSS is disabled, cannot change settings\n"); 548 return -EIO; 549 } 550 551 /* Mimimum is IPv4 and IPv6, SIP/DIP */ 552 if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST)) 553 return -EINVAL; 554 555 if (nfc->data & RXH_VLAN) 556 rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN; 557 else 558 rss_cfg &= ~NIX_FLOW_KEY_TYPE_VLAN; 559 560 switch (nfc->flow_type) { 561 case TCP_V4_FLOW: 562 case TCP_V6_FLOW: 563 /* Different config for v4 and v6 is not supported. 564 * Both of them have to be either 4-tuple or 2-tuple. 565 */ 566 switch (nfc->data & rxh_l4) { 567 case 0: 568 rss_cfg &= ~NIX_FLOW_KEY_TYPE_TCP; 569 break; 570 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 571 rss_cfg |= NIX_FLOW_KEY_TYPE_TCP; 572 break; 573 default: 574 return -EINVAL; 575 } 576 break; 577 case UDP_V4_FLOW: 578 case UDP_V6_FLOW: 579 switch (nfc->data & rxh_l4) { 580 case 0: 581 rss_cfg &= ~NIX_FLOW_KEY_TYPE_UDP; 582 break; 583 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 584 rss_cfg |= NIX_FLOW_KEY_TYPE_UDP; 585 break; 586 default: 587 return -EINVAL; 588 } 589 break; 590 case SCTP_V4_FLOW: 591 case SCTP_V6_FLOW: 592 switch (nfc->data & rxh_l4) { 593 case 0: 594 rss_cfg &= ~NIX_FLOW_KEY_TYPE_SCTP; 595 break; 596 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 597 rss_cfg |= NIX_FLOW_KEY_TYPE_SCTP; 598 break; 599 default: 600 return -EINVAL; 601 } 602 break; 603 case AH_ESP_V4_FLOW: 604 case AH_ESP_V6_FLOW: 605 switch (nfc->data & rxh_l4) { 606 case 0: 607 rss_cfg &= ~(NIX_FLOW_KEY_TYPE_ESP | 608 NIX_FLOW_KEY_TYPE_AH); 609 rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN | 610 NIX_FLOW_KEY_TYPE_IPV4_PROTO; 611 break; 612 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 613 /* If VLAN hashing is also requested for ESP then do not 614 * allow because of hardware 40 bytes flow key limit. 615 */ 616 if (rss_cfg & NIX_FLOW_KEY_TYPE_VLAN) { 617 netdev_err(pfvf->netdev, 618 "RSS hash of ESP or AH with VLAN is not supported\n"); 619 return -EOPNOTSUPP; 620 } 621 622 rss_cfg |= NIX_FLOW_KEY_TYPE_ESP | NIX_FLOW_KEY_TYPE_AH; 623 /* Disable IPv4 proto hashing since IPv6 SA+DA(32 bytes) 624 * and ESP SPI+sequence(8 bytes) uses hardware maximum 625 * limit of 40 byte flow key. 626 */ 627 rss_cfg &= ~NIX_FLOW_KEY_TYPE_IPV4_PROTO; 628 break; 629 default: 630 return -EINVAL; 631 } 632 break; 633 case IPV4_FLOW: 634 case IPV6_FLOW: 635 rss_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; 636 break; 637 default: 638 return -EINVAL; 639 } 640 641 rss->flowkey_cfg = rss_cfg; 642 otx2_set_flowkey_cfg(pfvf); 643 return 0; 644 } 645 646 static int otx2_get_rxnfc(struct net_device *dev, 647 struct ethtool_rxnfc *nfc, u32 *rules) 648 { 649 bool ntuple = !!(dev->features & NETIF_F_NTUPLE); 650 struct otx2_nic *pfvf = netdev_priv(dev); 651 int ret = -EOPNOTSUPP; 652 653 switch (nfc->cmd) { 654 case ETHTOOL_GRXRINGS: 655 nfc->data = pfvf->hw.rx_queues; 656 ret = 0; 657 break; 658 case ETHTOOL_GRXCLSRLCNT: 659 if (netif_running(dev) && ntuple) { 660 nfc->rule_cnt = pfvf->flow_cfg->nr_flows; 661 ret = 0; 662 } 663 break; 664 case ETHTOOL_GRXCLSRULE: 665 if (netif_running(dev) && ntuple) 666 ret = otx2_get_flow(pfvf, nfc, nfc->fs.location); 667 break; 668 case ETHTOOL_GRXCLSRLALL: 669 if (netif_running(dev) && ntuple) 670 ret = otx2_get_all_flows(pfvf, nfc, rules); 671 break; 672 case ETHTOOL_GRXFH: 673 return otx2_get_rss_hash_opts(pfvf, nfc); 674 default: 675 break; 676 } 677 return ret; 678 } 679 680 static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc) 681 { 682 bool ntuple = !!(dev->features & NETIF_F_NTUPLE); 683 struct otx2_nic *pfvf = netdev_priv(dev); 684 int ret = -EOPNOTSUPP; 685 686 switch (nfc->cmd) { 687 case ETHTOOL_SRXFH: 688 ret = otx2_set_rss_hash_opts(pfvf, nfc); 689 break; 690 case ETHTOOL_SRXCLSRLINS: 691 if (netif_running(dev) && ntuple) 692 ret = otx2_add_flow(pfvf, nfc); 693 break; 694 case ETHTOOL_SRXCLSRLDEL: 695 if (netif_running(dev) && ntuple) 696 ret = otx2_remove_flow(pfvf, nfc->fs.location); 697 break; 698 default: 699 break; 700 } 701 702 return ret; 703 } 704 705 static u32 otx2_get_rxfh_key_size(struct net_device *netdev) 706 { 707 struct otx2_nic *pfvf = netdev_priv(netdev); 708 struct otx2_rss_info *rss; 709 710 rss = &pfvf->hw.rss_info; 711 712 return sizeof(rss->key); 713 } 714 715 static u32 otx2_get_rxfh_indir_size(struct net_device *dev) 716 { 717 return MAX_RSS_INDIR_TBL_SIZE; 718 } 719 720 static int otx2_rss_ctx_delete(struct otx2_nic *pfvf, int ctx_id) 721 { 722 struct otx2_rss_info *rss = &pfvf->hw.rss_info; 723 724 otx2_rss_ctx_flow_del(pfvf, ctx_id); 725 kfree(rss->rss_ctx[ctx_id]); 726 rss->rss_ctx[ctx_id] = NULL; 727 728 return 0; 729 } 730 731 static int otx2_rss_ctx_create(struct otx2_nic *pfvf, 732 u32 *rss_context) 733 { 734 struct otx2_rss_info *rss = &pfvf->hw.rss_info; 735 u8 ctx; 736 737 for (ctx = 0; ctx < MAX_RSS_GROUPS; ctx++) { 738 if (!rss->rss_ctx[ctx]) 739 break; 740 } 741 if (ctx == MAX_RSS_GROUPS) 742 return -EINVAL; 743 744 rss->rss_ctx[ctx] = kzalloc(sizeof(*rss->rss_ctx[ctx]), GFP_KERNEL); 745 if (!rss->rss_ctx[ctx]) 746 return -ENOMEM; 747 *rss_context = ctx; 748 749 return 0; 750 } 751 752 /* RSS context configuration */ 753 static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir, 754 const u8 *hkey, const u8 hfunc, 755 u32 *rss_context, bool delete) 756 { 757 struct otx2_nic *pfvf = netdev_priv(dev); 758 struct otx2_rss_ctx *rss_ctx; 759 struct otx2_rss_info *rss; 760 int ret, idx; 761 762 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 763 return -EOPNOTSUPP; 764 765 if (*rss_context != ETH_RXFH_CONTEXT_ALLOC && 766 *rss_context >= MAX_RSS_GROUPS) 767 return -EINVAL; 768 769 rss = &pfvf->hw.rss_info; 770 771 if (!rss->enable) { 772 netdev_err(dev, "RSS is disabled, cannot change settings\n"); 773 return -EIO; 774 } 775 776 if (hkey) { 777 memcpy(rss->key, hkey, sizeof(rss->key)); 778 otx2_set_rss_key(pfvf); 779 } 780 if (delete) 781 return otx2_rss_ctx_delete(pfvf, *rss_context); 782 783 if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) { 784 ret = otx2_rss_ctx_create(pfvf, rss_context); 785 if (ret) 786 return ret; 787 } 788 if (indir) { 789 rss_ctx = rss->rss_ctx[*rss_context]; 790 for (idx = 0; idx < rss->rss_size; idx++) 791 rss_ctx->ind_tbl[idx] = indir[idx]; 792 } 793 otx2_set_rss_table(pfvf, *rss_context); 794 795 return 0; 796 } 797 798 static int otx2_get_rxfh_context(struct net_device *dev, u32 *indir, 799 u8 *hkey, u8 *hfunc, u32 rss_context) 800 { 801 struct otx2_nic *pfvf = netdev_priv(dev); 802 struct otx2_rss_ctx *rss_ctx; 803 struct otx2_rss_info *rss; 804 int idx, rx_queues; 805 806 rss = &pfvf->hw.rss_info; 807 808 if (hfunc) 809 *hfunc = ETH_RSS_HASH_TOP; 810 811 if (!indir) 812 return 0; 813 814 if (!rss->enable && rss_context == DEFAULT_RSS_CONTEXT_GROUP) { 815 rx_queues = pfvf->hw.rx_queues; 816 for (idx = 0; idx < MAX_RSS_INDIR_TBL_SIZE; idx++) 817 indir[idx] = ethtool_rxfh_indir_default(idx, rx_queues); 818 return 0; 819 } 820 if (rss_context >= MAX_RSS_GROUPS) 821 return -ENOENT; 822 823 rss_ctx = rss->rss_ctx[rss_context]; 824 if (!rss_ctx) 825 return -ENOENT; 826 827 if (indir) { 828 for (idx = 0; idx < rss->rss_size; idx++) 829 indir[idx] = rss_ctx->ind_tbl[idx]; 830 } 831 if (hkey) 832 memcpy(hkey, rss->key, sizeof(rss->key)); 833 834 return 0; 835 } 836 837 /* Get RSS configuration */ 838 static int otx2_get_rxfh(struct net_device *dev, u32 *indir, 839 u8 *hkey, u8 *hfunc) 840 { 841 return otx2_get_rxfh_context(dev, indir, hkey, hfunc, 842 DEFAULT_RSS_CONTEXT_GROUP); 843 } 844 845 /* Configure RSS table and hash key */ 846 static int otx2_set_rxfh(struct net_device *dev, const u32 *indir, 847 const u8 *hkey, const u8 hfunc) 848 { 849 850 u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP; 851 852 return otx2_set_rxfh_context(dev, indir, hkey, hfunc, &rss_context, 0); 853 } 854 855 static u32 otx2_get_msglevel(struct net_device *netdev) 856 { 857 struct otx2_nic *pfvf = netdev_priv(netdev); 858 859 return pfvf->msg_enable; 860 } 861 862 static void otx2_set_msglevel(struct net_device *netdev, u32 val) 863 { 864 struct otx2_nic *pfvf = netdev_priv(netdev); 865 866 pfvf->msg_enable = val; 867 } 868 869 static u32 otx2_get_link(struct net_device *netdev) 870 { 871 struct otx2_nic *pfvf = netdev_priv(netdev); 872 873 /* LBK link is internal and always UP */ 874 if (is_otx2_lbkvf(pfvf->pdev)) 875 return 1; 876 return pfvf->linfo.link_up; 877 } 878 879 static int otx2_get_ts_info(struct net_device *netdev, 880 struct ethtool_ts_info *info) 881 { 882 struct otx2_nic *pfvf = netdev_priv(netdev); 883 884 if (!pfvf->ptp) 885 return ethtool_op_get_ts_info(netdev, info); 886 887 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 888 SOF_TIMESTAMPING_RX_SOFTWARE | 889 SOF_TIMESTAMPING_SOFTWARE | 890 SOF_TIMESTAMPING_TX_HARDWARE | 891 SOF_TIMESTAMPING_RX_HARDWARE | 892 SOF_TIMESTAMPING_RAW_HARDWARE; 893 894 info->phc_index = otx2_ptp_clock_index(pfvf); 895 896 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 897 898 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 899 (1 << HWTSTAMP_FILTER_ALL); 900 901 return 0; 902 } 903 904 static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf) 905 { 906 struct cgx_fw_data *rsp = NULL; 907 struct msg_req *req; 908 int err = 0; 909 910 mutex_lock(&pfvf->mbox.lock); 911 req = otx2_mbox_alloc_msg_cgx_get_aux_link_info(&pfvf->mbox); 912 if (!req) { 913 mutex_unlock(&pfvf->mbox.lock); 914 return ERR_PTR(-ENOMEM); 915 } 916 917 err = otx2_sync_mbox_msg(&pfvf->mbox); 918 if (!err) { 919 rsp = (struct cgx_fw_data *) 920 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); 921 } else { 922 rsp = ERR_PTR(err); 923 } 924 925 mutex_unlock(&pfvf->mbox.lock); 926 return rsp; 927 } 928 929 static int otx2_get_fecparam(struct net_device *netdev, 930 struct ethtool_fecparam *fecparam) 931 { 932 struct otx2_nic *pfvf = netdev_priv(netdev); 933 struct cgx_fw_data *rsp; 934 const int fec[] = { 935 ETHTOOL_FEC_OFF, 936 ETHTOOL_FEC_BASER, 937 ETHTOOL_FEC_RS, 938 ETHTOOL_FEC_BASER | ETHTOOL_FEC_RS}; 939 #define FEC_MAX_INDEX 4 940 if (pfvf->linfo.fec < FEC_MAX_INDEX) 941 fecparam->active_fec = fec[pfvf->linfo.fec]; 942 943 rsp = otx2_get_fwdata(pfvf); 944 if (IS_ERR(rsp)) 945 return PTR_ERR(rsp); 946 947 if (rsp->fwdata.supported_fec < FEC_MAX_INDEX) { 948 if (!rsp->fwdata.supported_fec) 949 fecparam->fec = ETHTOOL_FEC_NONE; 950 else 951 fecparam->fec = fec[rsp->fwdata.supported_fec]; 952 } 953 return 0; 954 } 955 956 static int otx2_set_fecparam(struct net_device *netdev, 957 struct ethtool_fecparam *fecparam) 958 { 959 struct otx2_nic *pfvf = netdev_priv(netdev); 960 struct mbox *mbox = &pfvf->mbox; 961 struct fec_mode *req, *rsp; 962 int err = 0, fec = 0; 963 964 switch (fecparam->fec) { 965 /* Firmware does not support AUTO mode consider it as FEC_OFF */ 966 case ETHTOOL_FEC_OFF: 967 case ETHTOOL_FEC_AUTO: 968 fec = OTX2_FEC_OFF; 969 break; 970 case ETHTOOL_FEC_RS: 971 fec = OTX2_FEC_RS; 972 break; 973 case ETHTOOL_FEC_BASER: 974 fec = OTX2_FEC_BASER; 975 break; 976 default: 977 netdev_warn(pfvf->netdev, "Unsupported FEC mode: %d", 978 fecparam->fec); 979 return -EINVAL; 980 } 981 982 if (fec == pfvf->linfo.fec) 983 return 0; 984 985 mutex_lock(&mbox->lock); 986 req = otx2_mbox_alloc_msg_cgx_set_fec_param(&pfvf->mbox); 987 if (!req) { 988 err = -ENOMEM; 989 goto end; 990 } 991 req->fec = fec; 992 err = otx2_sync_mbox_msg(&pfvf->mbox); 993 if (err) 994 goto end; 995 996 rsp = (struct fec_mode *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 997 0, &req->hdr); 998 if (rsp->fec >= 0) 999 pfvf->linfo.fec = rsp->fec; 1000 else 1001 err = rsp->fec; 1002 end: 1003 mutex_unlock(&mbox->lock); 1004 return err; 1005 } 1006 1007 static void otx2_get_fec_info(u64 index, int req_mode, 1008 struct ethtool_link_ksettings *link_ksettings) 1009 { 1010 __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_fec_modes) = { 0, }; 1011 1012 switch (index) { 1013 case OTX2_FEC_NONE: 1014 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 1015 otx2_fec_modes); 1016 break; 1017 case OTX2_FEC_BASER: 1018 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 1019 otx2_fec_modes); 1020 break; 1021 case OTX2_FEC_RS: 1022 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 1023 otx2_fec_modes); 1024 break; 1025 case OTX2_FEC_BASER | OTX2_FEC_RS: 1026 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 1027 otx2_fec_modes); 1028 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 1029 otx2_fec_modes); 1030 break; 1031 } 1032 1033 /* Add fec modes to existing modes */ 1034 if (req_mode == OTX2_MODE_ADVERTISED) 1035 linkmode_or(link_ksettings->link_modes.advertising, 1036 link_ksettings->link_modes.advertising, 1037 otx2_fec_modes); 1038 else 1039 linkmode_or(link_ksettings->link_modes.supported, 1040 link_ksettings->link_modes.supported, 1041 otx2_fec_modes); 1042 } 1043 1044 static void otx2_get_link_mode_info(u64 link_mode_bmap, 1045 bool req_mode, 1046 struct ethtool_link_ksettings 1047 *link_ksettings) 1048 { 1049 __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_link_modes) = { 0, }; 1050 const int otx2_sgmii_features[6] = { 1051 ETHTOOL_LINK_MODE_10baseT_Half_BIT, 1052 ETHTOOL_LINK_MODE_10baseT_Full_BIT, 1053 ETHTOOL_LINK_MODE_100baseT_Half_BIT, 1054 ETHTOOL_LINK_MODE_100baseT_Full_BIT, 1055 ETHTOOL_LINK_MODE_1000baseT_Half_BIT, 1056 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 1057 }; 1058 /* CGX link modes to Ethtool link mode mapping */ 1059 const int cgx_link_mode[27] = { 1060 0, /* SGMII Mode */ 1061 ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 1062 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 1063 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 1064 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 1065 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 1066 0, 1067 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 1068 0, 1069 0, 1070 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 1071 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 1072 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 1073 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 1074 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 1075 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 1076 0, 1077 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 1078 0, 1079 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 1080 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 1081 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 1082 0, 1083 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 1084 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 1085 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 1086 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 1087 }; 1088 u8 bit; 1089 1090 for_each_set_bit(bit, (unsigned long *)&link_mode_bmap, 27) { 1091 /* SGMII mode is set */ 1092 if (bit == 0) 1093 linkmode_set_bit_array(otx2_sgmii_features, 1094 ARRAY_SIZE(otx2_sgmii_features), 1095 otx2_link_modes); 1096 else 1097 linkmode_set_bit(cgx_link_mode[bit], otx2_link_modes); 1098 } 1099 1100 if (req_mode == OTX2_MODE_ADVERTISED) 1101 linkmode_copy(link_ksettings->link_modes.advertising, 1102 otx2_link_modes); 1103 else 1104 linkmode_copy(link_ksettings->link_modes.supported, 1105 otx2_link_modes); 1106 } 1107 1108 static int otx2_get_link_ksettings(struct net_device *netdev, 1109 struct ethtool_link_ksettings *cmd) 1110 { 1111 struct otx2_nic *pfvf = netdev_priv(netdev); 1112 struct cgx_fw_data *rsp = NULL; 1113 1114 cmd->base.duplex = pfvf->linfo.full_duplex; 1115 cmd->base.speed = pfvf->linfo.speed; 1116 cmd->base.autoneg = pfvf->linfo.an; 1117 1118 rsp = otx2_get_fwdata(pfvf); 1119 if (IS_ERR(rsp)) 1120 return PTR_ERR(rsp); 1121 1122 if (rsp->fwdata.supported_an) 1123 ethtool_link_ksettings_add_link_mode(cmd, 1124 supported, 1125 Autoneg); 1126 1127 otx2_get_link_mode_info(rsp->fwdata.advertised_link_modes, 1128 OTX2_MODE_ADVERTISED, cmd); 1129 otx2_get_fec_info(rsp->fwdata.advertised_fec, 1130 OTX2_MODE_ADVERTISED, cmd); 1131 otx2_get_link_mode_info(rsp->fwdata.supported_link_modes, 1132 OTX2_MODE_SUPPORTED, cmd); 1133 otx2_get_fec_info(rsp->fwdata.supported_fec, 1134 OTX2_MODE_SUPPORTED, cmd); 1135 return 0; 1136 } 1137 1138 static void otx2_get_advertised_mode(const struct ethtool_link_ksettings *cmd, 1139 u64 *mode) 1140 { 1141 u32 bit_pos; 1142 1143 /* Firmware does not support requesting multiple advertised modes 1144 * return first set bit 1145 */ 1146 bit_pos = find_first_bit(cmd->link_modes.advertising, 1147 __ETHTOOL_LINK_MODE_MASK_NBITS); 1148 if (bit_pos != __ETHTOOL_LINK_MODE_MASK_NBITS) 1149 *mode = bit_pos; 1150 } 1151 1152 static int otx2_set_link_ksettings(struct net_device *netdev, 1153 const struct ethtool_link_ksettings *cmd) 1154 { 1155 struct otx2_nic *pf = netdev_priv(netdev); 1156 struct ethtool_link_ksettings cur_ks; 1157 struct cgx_set_link_mode_req *req; 1158 struct mbox *mbox = &pf->mbox; 1159 int err = 0; 1160 1161 memset(&cur_ks, 0, sizeof(struct ethtool_link_ksettings)); 1162 1163 if (!ethtool_validate_speed(cmd->base.speed) || 1164 !ethtool_validate_duplex(cmd->base.duplex)) 1165 return -EINVAL; 1166 1167 if (cmd->base.autoneg != AUTONEG_ENABLE && 1168 cmd->base.autoneg != AUTONEG_DISABLE) 1169 return -EINVAL; 1170 1171 otx2_get_link_ksettings(netdev, &cur_ks); 1172 1173 /* Check requested modes against supported modes by hardware */ 1174 if (!bitmap_subset(cmd->link_modes.advertising, 1175 cur_ks.link_modes.supported, 1176 __ETHTOOL_LINK_MODE_MASK_NBITS)) 1177 return -EINVAL; 1178 1179 mutex_lock(&mbox->lock); 1180 req = otx2_mbox_alloc_msg_cgx_set_link_mode(&pf->mbox); 1181 if (!req) { 1182 err = -ENOMEM; 1183 goto end; 1184 } 1185 1186 req->args.speed = cmd->base.speed; 1187 /* firmware expects 1 for half duplex and 0 for full duplex 1188 * hence inverting 1189 */ 1190 req->args.duplex = cmd->base.duplex ^ 0x1; 1191 req->args.an = cmd->base.autoneg; 1192 otx2_get_advertised_mode(cmd, &req->args.mode); 1193 1194 err = otx2_sync_mbox_msg(&pf->mbox); 1195 end: 1196 mutex_unlock(&mbox->lock); 1197 return err; 1198 } 1199 1200 static const struct ethtool_ops otx2_ethtool_ops = { 1201 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 1202 ETHTOOL_COALESCE_MAX_FRAMES, 1203 .get_link = otx2_get_link, 1204 .get_drvinfo = otx2_get_drvinfo, 1205 .get_strings = otx2_get_strings, 1206 .get_ethtool_stats = otx2_get_ethtool_stats, 1207 .get_sset_count = otx2_get_sset_count, 1208 .set_channels = otx2_set_channels, 1209 .get_channels = otx2_get_channels, 1210 .get_ringparam = otx2_get_ringparam, 1211 .set_ringparam = otx2_set_ringparam, 1212 .get_coalesce = otx2_get_coalesce, 1213 .set_coalesce = otx2_set_coalesce, 1214 .get_rxnfc = otx2_get_rxnfc, 1215 .set_rxnfc = otx2_set_rxnfc, 1216 .get_rxfh_key_size = otx2_get_rxfh_key_size, 1217 .get_rxfh_indir_size = otx2_get_rxfh_indir_size, 1218 .get_rxfh = otx2_get_rxfh, 1219 .set_rxfh = otx2_set_rxfh, 1220 .get_rxfh_context = otx2_get_rxfh_context, 1221 .set_rxfh_context = otx2_set_rxfh_context, 1222 .get_msglevel = otx2_get_msglevel, 1223 .set_msglevel = otx2_set_msglevel, 1224 .get_pauseparam = otx2_get_pauseparam, 1225 .set_pauseparam = otx2_set_pauseparam, 1226 .get_ts_info = otx2_get_ts_info, 1227 .get_fecparam = otx2_get_fecparam, 1228 .set_fecparam = otx2_set_fecparam, 1229 .get_link_ksettings = otx2_get_link_ksettings, 1230 .set_link_ksettings = otx2_set_link_ksettings, 1231 }; 1232 1233 void otx2_set_ethtool_ops(struct net_device *netdev) 1234 { 1235 netdev->ethtool_ops = &otx2_ethtool_ops; 1236 } 1237 1238 /* VF's ethtool APIs */ 1239 static void otx2vf_get_drvinfo(struct net_device *netdev, 1240 struct ethtool_drvinfo *info) 1241 { 1242 struct otx2_nic *vf = netdev_priv(netdev); 1243 1244 strlcpy(info->driver, DRV_VF_NAME, sizeof(info->driver)); 1245 strlcpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info)); 1246 } 1247 1248 static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data) 1249 { 1250 struct otx2_nic *vf = netdev_priv(netdev); 1251 int stats; 1252 1253 if (sset != ETH_SS_STATS) 1254 return; 1255 1256 for (stats = 0; stats < otx2_n_dev_stats; stats++) { 1257 memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN); 1258 data += ETH_GSTRING_LEN; 1259 } 1260 1261 for (stats = 0; stats < otx2_n_drv_stats; stats++) { 1262 memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN); 1263 data += ETH_GSTRING_LEN; 1264 } 1265 1266 otx2_get_qset_strings(vf, &data, 0); 1267 1268 strcpy(data, "reset_count"); 1269 data += ETH_GSTRING_LEN; 1270 } 1271 1272 static void otx2vf_get_ethtool_stats(struct net_device *netdev, 1273 struct ethtool_stats *stats, u64 *data) 1274 { 1275 struct otx2_nic *vf = netdev_priv(netdev); 1276 int stat; 1277 1278 otx2_get_dev_stats(vf); 1279 for (stat = 0; stat < otx2_n_dev_stats; stat++) 1280 *(data++) = ((u64 *)&vf->hw.dev_stats) 1281 [otx2_dev_stats[stat].index]; 1282 1283 for (stat = 0; stat < otx2_n_drv_stats; stat++) 1284 *(data++) = atomic_read(&((atomic_t *)&vf->hw.drv_stats) 1285 [otx2_drv_stats[stat].index]); 1286 1287 otx2_get_qset_stats(vf, stats, &data); 1288 *(data++) = vf->reset_count; 1289 } 1290 1291 static int otx2vf_get_sset_count(struct net_device *netdev, int sset) 1292 { 1293 struct otx2_nic *vf = netdev_priv(netdev); 1294 int qstats_count; 1295 1296 if (sset != ETH_SS_STATS) 1297 return -EINVAL; 1298 1299 qstats_count = otx2_n_queue_stats * 1300 (vf->hw.rx_queues + vf->hw.tx_queues); 1301 1302 return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 1; 1303 } 1304 1305 static int otx2vf_get_link_ksettings(struct net_device *netdev, 1306 struct ethtool_link_ksettings *cmd) 1307 { 1308 struct otx2_nic *pfvf = netdev_priv(netdev); 1309 1310 if (is_otx2_lbkvf(pfvf->pdev)) { 1311 cmd->base.duplex = DUPLEX_FULL; 1312 cmd->base.speed = SPEED_100000; 1313 } else { 1314 return otx2_get_link_ksettings(netdev, cmd); 1315 } 1316 return 0; 1317 } 1318 1319 static const struct ethtool_ops otx2vf_ethtool_ops = { 1320 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 1321 ETHTOOL_COALESCE_MAX_FRAMES, 1322 .get_link = otx2_get_link, 1323 .get_drvinfo = otx2vf_get_drvinfo, 1324 .get_strings = otx2vf_get_strings, 1325 .get_ethtool_stats = otx2vf_get_ethtool_stats, 1326 .get_sset_count = otx2vf_get_sset_count, 1327 .set_channels = otx2_set_channels, 1328 .get_channels = otx2_get_channels, 1329 .get_rxnfc = otx2_get_rxnfc, 1330 .set_rxnfc = otx2_set_rxnfc, 1331 .get_rxfh_key_size = otx2_get_rxfh_key_size, 1332 .get_rxfh_indir_size = otx2_get_rxfh_indir_size, 1333 .get_rxfh = otx2_get_rxfh, 1334 .set_rxfh = otx2_set_rxfh, 1335 .get_rxfh_context = otx2_get_rxfh_context, 1336 .set_rxfh_context = otx2_set_rxfh_context, 1337 .get_ringparam = otx2_get_ringparam, 1338 .set_ringparam = otx2_set_ringparam, 1339 .get_coalesce = otx2_get_coalesce, 1340 .set_coalesce = otx2_set_coalesce, 1341 .get_msglevel = otx2_get_msglevel, 1342 .set_msglevel = otx2_set_msglevel, 1343 .get_pauseparam = otx2_get_pauseparam, 1344 .set_pauseparam = otx2_set_pauseparam, 1345 .get_link_ksettings = otx2vf_get_link_ksettings, 1346 }; 1347 1348 void otx2vf_set_ethtool_ops(struct net_device *netdev) 1349 { 1350 netdev->ethtool_ops = &otx2vf_ethtool_ops; 1351 } 1352 EXPORT_SYMBOL(otx2vf_set_ethtool_ops); 1353