1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Ethernet driver 3 * 4 * Copyright (C) 2020 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/pci.h> 12 #include <linux/ethtool.h> 13 #include <linux/stddef.h> 14 #include <linux/etherdevice.h> 15 #include <linux/log2.h> 16 #include <linux/net_tstamp.h> 17 #include <linux/linkmode.h> 18 19 #include "otx2_common.h" 20 #include "otx2_ptp.h" 21 22 #define DRV_NAME "octeontx2-nicpf" 23 #define DRV_VF_NAME "octeontx2-nicvf" 24 25 struct otx2_stat { 26 char name[ETH_GSTRING_LEN]; 27 unsigned int index; 28 }; 29 30 /* HW device stats */ 31 #define OTX2_DEV_STAT(stat) { \ 32 .name = #stat, \ 33 .index = offsetof(struct otx2_dev_stats, stat) / sizeof(u64), \ 34 } 35 36 /* Physical link config */ 37 #define OTX2_ETHTOOL_SUPPORTED_MODES 0x638CCBF //110001110001100110010111111 38 39 enum link_mode { 40 OTX2_MODE_SUPPORTED, 41 OTX2_MODE_ADVERTISED 42 }; 43 44 static const struct otx2_stat otx2_dev_stats[] = { 45 OTX2_DEV_STAT(rx_ucast_frames), 46 OTX2_DEV_STAT(rx_bcast_frames), 47 OTX2_DEV_STAT(rx_mcast_frames), 48 49 OTX2_DEV_STAT(tx_ucast_frames), 50 OTX2_DEV_STAT(tx_bcast_frames), 51 OTX2_DEV_STAT(tx_mcast_frames), 52 }; 53 54 /* Driver level stats */ 55 #define OTX2_DRV_STAT(stat) { \ 56 .name = #stat, \ 57 .index = offsetof(struct otx2_drv_stats, stat) / sizeof(atomic_t), \ 58 } 59 60 static const struct otx2_stat otx2_drv_stats[] = { 61 OTX2_DRV_STAT(rx_fcs_errs), 62 OTX2_DRV_STAT(rx_oversize_errs), 63 OTX2_DRV_STAT(rx_undersize_errs), 64 OTX2_DRV_STAT(rx_csum_errs), 65 OTX2_DRV_STAT(rx_len_errs), 66 OTX2_DRV_STAT(rx_other_errs), 67 }; 68 69 static const struct otx2_stat otx2_queue_stats[] = { 70 { "bytes", 0 }, 71 { "frames", 1 }, 72 }; 73 74 static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats); 75 static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats); 76 static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats); 77 78 static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf); 79 80 static void otx2_get_drvinfo(struct net_device *netdev, 81 struct ethtool_drvinfo *info) 82 { 83 struct otx2_nic *pfvf = netdev_priv(netdev); 84 85 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 86 strlcpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info)); 87 } 88 89 static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset) 90 { 91 int start_qidx = qset * pfvf->hw.rx_queues; 92 int qidx, stats; 93 94 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { 95 for (stats = 0; stats < otx2_n_queue_stats; stats++) { 96 sprintf(*data, "rxq%d: %s", qidx + start_qidx, 97 otx2_queue_stats[stats].name); 98 *data += ETH_GSTRING_LEN; 99 } 100 } 101 for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { 102 for (stats = 0; stats < otx2_n_queue_stats; stats++) { 103 sprintf(*data, "txq%d: %s", qidx + start_qidx, 104 otx2_queue_stats[stats].name); 105 *data += ETH_GSTRING_LEN; 106 } 107 } 108 } 109 110 static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data) 111 { 112 struct otx2_nic *pfvf = netdev_priv(netdev); 113 int stats; 114 115 if (sset != ETH_SS_STATS) 116 return; 117 118 for (stats = 0; stats < otx2_n_dev_stats; stats++) { 119 memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN); 120 data += ETH_GSTRING_LEN; 121 } 122 123 for (stats = 0; stats < otx2_n_drv_stats; stats++) { 124 memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN); 125 data += ETH_GSTRING_LEN; 126 } 127 128 otx2_get_qset_strings(pfvf, &data, 0); 129 130 for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) { 131 sprintf(data, "cgx_rxstat%d: ", stats); 132 data += ETH_GSTRING_LEN; 133 } 134 135 for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) { 136 sprintf(data, "cgx_txstat%d: ", stats); 137 data += ETH_GSTRING_LEN; 138 } 139 140 strcpy(data, "reset_count"); 141 data += ETH_GSTRING_LEN; 142 sprintf(data, "Fec Corrected Errors: "); 143 data += ETH_GSTRING_LEN; 144 sprintf(data, "Fec Uncorrected Errors: "); 145 data += ETH_GSTRING_LEN; 146 } 147 148 static void otx2_get_qset_stats(struct otx2_nic *pfvf, 149 struct ethtool_stats *stats, u64 **data) 150 { 151 int stat, qidx; 152 153 if (!pfvf) 154 return; 155 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { 156 if (!otx2_update_rq_stats(pfvf, qidx)) { 157 for (stat = 0; stat < otx2_n_queue_stats; stat++) 158 *((*data)++) = 0; 159 continue; 160 } 161 for (stat = 0; stat < otx2_n_queue_stats; stat++) 162 *((*data)++) = ((u64 *)&pfvf->qset.rq[qidx].stats) 163 [otx2_queue_stats[stat].index]; 164 } 165 166 for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { 167 if (!otx2_update_sq_stats(pfvf, qidx)) { 168 for (stat = 0; stat < otx2_n_queue_stats; stat++) 169 *((*data)++) = 0; 170 continue; 171 } 172 for (stat = 0; stat < otx2_n_queue_stats; stat++) 173 *((*data)++) = ((u64 *)&pfvf->qset.sq[qidx].stats) 174 [otx2_queue_stats[stat].index]; 175 } 176 } 177 178 static int otx2_get_phy_fec_stats(struct otx2_nic *pfvf) 179 { 180 struct msg_req *req; 181 int rc = -ENOMEM; 182 183 mutex_lock(&pfvf->mbox.lock); 184 req = otx2_mbox_alloc_msg_cgx_get_phy_fec_stats(&pfvf->mbox); 185 if (!req) 186 goto end; 187 188 if (!otx2_sync_mbox_msg(&pfvf->mbox)) 189 rc = 0; 190 end: 191 mutex_unlock(&pfvf->mbox.lock); 192 return rc; 193 } 194 195 /* Get device and per queue statistics */ 196 static void otx2_get_ethtool_stats(struct net_device *netdev, 197 struct ethtool_stats *stats, u64 *data) 198 { 199 struct otx2_nic *pfvf = netdev_priv(netdev); 200 u64 fec_corr_blks, fec_uncorr_blks; 201 struct cgx_fw_data *rsp; 202 int stat; 203 204 otx2_get_dev_stats(pfvf); 205 for (stat = 0; stat < otx2_n_dev_stats; stat++) 206 *(data++) = ((u64 *)&pfvf->hw.dev_stats) 207 [otx2_dev_stats[stat].index]; 208 209 for (stat = 0; stat < otx2_n_drv_stats; stat++) 210 *(data++) = atomic_read(&((atomic_t *)&pfvf->hw.drv_stats) 211 [otx2_drv_stats[stat].index]); 212 213 otx2_get_qset_stats(pfvf, stats, &data); 214 otx2_update_lmac_stats(pfvf); 215 for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++) 216 *(data++) = pfvf->hw.cgx_rx_stats[stat]; 217 for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++) 218 *(data++) = pfvf->hw.cgx_tx_stats[stat]; 219 *(data++) = pfvf->reset_count; 220 221 fec_corr_blks = pfvf->hw.cgx_fec_corr_blks; 222 fec_uncorr_blks = pfvf->hw.cgx_fec_uncorr_blks; 223 224 rsp = otx2_get_fwdata(pfvf); 225 if (!IS_ERR(rsp) && rsp->fwdata.phy.misc.has_fec_stats && 226 !otx2_get_phy_fec_stats(pfvf)) { 227 /* Fetch fwdata again because it's been recently populated with 228 * latest PHY FEC stats. 229 */ 230 rsp = otx2_get_fwdata(pfvf); 231 if (!IS_ERR(rsp)) { 232 struct fec_stats_s *p = &rsp->fwdata.phy.fec_stats; 233 234 if (pfvf->linfo.fec == OTX2_FEC_BASER) { 235 fec_corr_blks = p->brfec_corr_blks; 236 fec_uncorr_blks = p->brfec_uncorr_blks; 237 } else { 238 fec_corr_blks = p->rsfec_corr_cws; 239 fec_uncorr_blks = p->rsfec_uncorr_cws; 240 } 241 } 242 } 243 244 *(data++) = fec_corr_blks; 245 *(data++) = fec_uncorr_blks; 246 } 247 248 static int otx2_get_sset_count(struct net_device *netdev, int sset) 249 { 250 struct otx2_nic *pfvf = netdev_priv(netdev); 251 int qstats_count; 252 253 if (sset != ETH_SS_STATS) 254 return -EINVAL; 255 256 qstats_count = otx2_n_queue_stats * 257 (pfvf->hw.rx_queues + pfvf->hw.tx_queues); 258 otx2_update_lmac_fec_stats(pfvf); 259 260 return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 261 CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + OTX2_FEC_STATS_CNT 262 + 1; 263 } 264 265 /* Get no of queues device supports and current queue count */ 266 static void otx2_get_channels(struct net_device *dev, 267 struct ethtool_channels *channel) 268 { 269 struct otx2_nic *pfvf = netdev_priv(dev); 270 271 channel->max_rx = pfvf->hw.max_queues; 272 channel->max_tx = pfvf->hw.max_queues; 273 274 channel->rx_count = pfvf->hw.rx_queues; 275 channel->tx_count = pfvf->hw.tx_queues; 276 } 277 278 /* Set no of Tx, Rx queues to be used */ 279 static int otx2_set_channels(struct net_device *dev, 280 struct ethtool_channels *channel) 281 { 282 struct otx2_nic *pfvf = netdev_priv(dev); 283 bool if_up = netif_running(dev); 284 int err = 0; 285 286 if (!channel->rx_count || !channel->tx_count) 287 return -EINVAL; 288 289 if (bitmap_weight(&pfvf->rq_bmap, pfvf->hw.rx_queues) > 1) { 290 netdev_err(dev, 291 "Receive queues are in use by TC police action\n"); 292 return -EINVAL; 293 } 294 295 if (if_up) 296 dev->netdev_ops->ndo_stop(dev); 297 298 err = otx2_set_real_num_queues(dev, channel->tx_count, 299 channel->rx_count); 300 if (err) 301 goto fail; 302 303 pfvf->hw.rx_queues = channel->rx_count; 304 pfvf->hw.tx_queues = channel->tx_count; 305 pfvf->qset.cq_cnt = pfvf->hw.tx_queues + pfvf->hw.rx_queues; 306 307 fail: 308 if (if_up) 309 dev->netdev_ops->ndo_open(dev); 310 311 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", 312 pfvf->hw.tx_queues, pfvf->hw.rx_queues); 313 314 return err; 315 } 316 317 static void otx2_get_pauseparam(struct net_device *netdev, 318 struct ethtool_pauseparam *pause) 319 { 320 struct otx2_nic *pfvf = netdev_priv(netdev); 321 struct cgx_pause_frm_cfg *req, *rsp; 322 323 if (is_otx2_lbkvf(pfvf->pdev)) 324 return; 325 326 req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox); 327 if (!req) 328 return; 329 330 if (!otx2_sync_mbox_msg(&pfvf->mbox)) { 331 rsp = (struct cgx_pause_frm_cfg *) 332 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); 333 pause->rx_pause = rsp->rx_pause; 334 pause->tx_pause = rsp->tx_pause; 335 } 336 } 337 338 static int otx2_set_pauseparam(struct net_device *netdev, 339 struct ethtool_pauseparam *pause) 340 { 341 struct otx2_nic *pfvf = netdev_priv(netdev); 342 343 if (pause->autoneg) 344 return -EOPNOTSUPP; 345 346 if (is_otx2_lbkvf(pfvf->pdev)) 347 return -EOPNOTSUPP; 348 349 if (pause->rx_pause) 350 pfvf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED; 351 else 352 pfvf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED; 353 354 if (pause->tx_pause) 355 pfvf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED; 356 else 357 pfvf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED; 358 359 return otx2_config_pause_frm(pfvf); 360 } 361 362 static void otx2_get_ringparam(struct net_device *netdev, 363 struct ethtool_ringparam *ring) 364 { 365 struct otx2_nic *pfvf = netdev_priv(netdev); 366 struct otx2_qset *qs = &pfvf->qset; 367 368 ring->rx_max_pending = Q_COUNT(Q_SIZE_MAX); 369 ring->rx_pending = qs->rqe_cnt ? qs->rqe_cnt : Q_COUNT(Q_SIZE_256); 370 ring->tx_max_pending = Q_COUNT(Q_SIZE_MAX); 371 ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K); 372 } 373 374 static int otx2_set_ringparam(struct net_device *netdev, 375 struct ethtool_ringparam *ring) 376 { 377 struct otx2_nic *pfvf = netdev_priv(netdev); 378 bool if_up = netif_running(netdev); 379 struct otx2_qset *qs = &pfvf->qset; 380 u32 rx_count, tx_count; 381 382 if (ring->rx_mini_pending || ring->rx_jumbo_pending) 383 return -EINVAL; 384 385 /* Permitted lengths are 16 64 256 1K 4K 16K 64K 256K 1M */ 386 rx_count = ring->rx_pending; 387 /* On some silicon variants a skid or reserved CQEs are 388 * needed to avoid CQ overflow. 389 */ 390 if (rx_count < pfvf->hw.rq_skid) 391 rx_count = pfvf->hw.rq_skid; 392 rx_count = Q_COUNT(Q_SIZE(rx_count, 3)); 393 394 /* Due pipelining impact minimum 2000 unused SQ CQE's 395 * need to be maintained to avoid CQ overflow, hence the 396 * minimum 4K size. 397 */ 398 tx_count = clamp_t(u32, ring->tx_pending, 399 Q_COUNT(Q_SIZE_4K), Q_COUNT(Q_SIZE_MAX)); 400 tx_count = Q_COUNT(Q_SIZE(tx_count, 3)); 401 402 if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt) 403 return 0; 404 405 if (if_up) 406 netdev->netdev_ops->ndo_stop(netdev); 407 408 /* Assigned to the nearest possible exponent. */ 409 qs->sqe_cnt = tx_count; 410 qs->rqe_cnt = rx_count; 411 412 if (if_up) 413 netdev->netdev_ops->ndo_open(netdev); 414 415 return 0; 416 } 417 418 static int otx2_get_coalesce(struct net_device *netdev, 419 struct ethtool_coalesce *cmd) 420 { 421 struct otx2_nic *pfvf = netdev_priv(netdev); 422 struct otx2_hw *hw = &pfvf->hw; 423 424 cmd->rx_coalesce_usecs = hw->cq_time_wait; 425 cmd->rx_max_coalesced_frames = hw->cq_ecount_wait; 426 cmd->tx_coalesce_usecs = hw->cq_time_wait; 427 cmd->tx_max_coalesced_frames = hw->cq_ecount_wait; 428 429 return 0; 430 } 431 432 static int otx2_set_coalesce(struct net_device *netdev, 433 struct ethtool_coalesce *ec) 434 { 435 struct otx2_nic *pfvf = netdev_priv(netdev); 436 struct otx2_hw *hw = &pfvf->hw; 437 int qidx; 438 439 if (!ec->rx_max_coalesced_frames || !ec->tx_max_coalesced_frames) 440 return 0; 441 442 /* 'cq_time_wait' is 8bit and is in multiple of 100ns, 443 * so clamp the user given value to the range of 1 to 25usec. 444 */ 445 ec->rx_coalesce_usecs = clamp_t(u32, ec->rx_coalesce_usecs, 446 1, CQ_TIMER_THRESH_MAX); 447 ec->tx_coalesce_usecs = clamp_t(u32, ec->tx_coalesce_usecs, 448 1, CQ_TIMER_THRESH_MAX); 449 450 /* Rx and Tx are mapped to same CQ, check which one 451 * is changed, if both then choose the min. 452 */ 453 if (hw->cq_time_wait == ec->rx_coalesce_usecs) 454 hw->cq_time_wait = ec->tx_coalesce_usecs; 455 else if (hw->cq_time_wait == ec->tx_coalesce_usecs) 456 hw->cq_time_wait = ec->rx_coalesce_usecs; 457 else 458 hw->cq_time_wait = min_t(u8, ec->rx_coalesce_usecs, 459 ec->tx_coalesce_usecs); 460 461 /* Max ecount_wait supported is 16bit, 462 * so clamp the user given value to the range of 1 to 64k. 463 */ 464 ec->rx_max_coalesced_frames = clamp_t(u32, ec->rx_max_coalesced_frames, 465 1, U16_MAX); 466 ec->tx_max_coalesced_frames = clamp_t(u32, ec->tx_max_coalesced_frames, 467 1, U16_MAX); 468 469 /* Rx and Tx are mapped to same CQ, check which one 470 * is changed, if both then choose the min. 471 */ 472 if (hw->cq_ecount_wait == ec->rx_max_coalesced_frames) 473 hw->cq_ecount_wait = ec->tx_max_coalesced_frames; 474 else if (hw->cq_ecount_wait == ec->tx_max_coalesced_frames) 475 hw->cq_ecount_wait = ec->rx_max_coalesced_frames; 476 else 477 hw->cq_ecount_wait = min_t(u16, ec->rx_max_coalesced_frames, 478 ec->tx_max_coalesced_frames); 479 480 if (netif_running(netdev)) { 481 for (qidx = 0; qidx < pfvf->hw.cint_cnt; qidx++) 482 otx2_config_irq_coalescing(pfvf, qidx); 483 } 484 485 return 0; 486 } 487 488 static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf, 489 struct ethtool_rxnfc *nfc) 490 { 491 struct otx2_rss_info *rss = &pfvf->hw.rss_info; 492 493 if (!(rss->flowkey_cfg & 494 (NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6))) 495 return 0; 496 497 /* Mimimum is IPv4 and IPv6, SIP/DIP */ 498 nfc->data = RXH_IP_SRC | RXH_IP_DST; 499 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_VLAN) 500 nfc->data |= RXH_VLAN; 501 502 switch (nfc->flow_type) { 503 case TCP_V4_FLOW: 504 case TCP_V6_FLOW: 505 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_TCP) 506 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 507 break; 508 case UDP_V4_FLOW: 509 case UDP_V6_FLOW: 510 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_UDP) 511 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 512 break; 513 case SCTP_V4_FLOW: 514 case SCTP_V6_FLOW: 515 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_SCTP) 516 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 517 break; 518 case AH_ESP_V4_FLOW: 519 case AH_ESP_V6_FLOW: 520 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_ESP) 521 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 522 break; 523 case AH_V4_FLOW: 524 case ESP_V4_FLOW: 525 case IPV4_FLOW: 526 break; 527 case AH_V6_FLOW: 528 case ESP_V6_FLOW: 529 case IPV6_FLOW: 530 break; 531 default: 532 return -EINVAL; 533 } 534 535 return 0; 536 } 537 538 static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf, 539 struct ethtool_rxnfc *nfc) 540 { 541 struct otx2_rss_info *rss = &pfvf->hw.rss_info; 542 u32 rxh_l4 = RXH_L4_B_0_1 | RXH_L4_B_2_3; 543 u32 rss_cfg = rss->flowkey_cfg; 544 545 if (!rss->enable) { 546 netdev_err(pfvf->netdev, 547 "RSS is disabled, cannot change settings\n"); 548 return -EIO; 549 } 550 551 /* Mimimum is IPv4 and IPv6, SIP/DIP */ 552 if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST)) 553 return -EINVAL; 554 555 if (nfc->data & RXH_VLAN) 556 rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN; 557 else 558 rss_cfg &= ~NIX_FLOW_KEY_TYPE_VLAN; 559 560 switch (nfc->flow_type) { 561 case TCP_V4_FLOW: 562 case TCP_V6_FLOW: 563 /* Different config for v4 and v6 is not supported. 564 * Both of them have to be either 4-tuple or 2-tuple. 565 */ 566 switch (nfc->data & rxh_l4) { 567 case 0: 568 rss_cfg &= ~NIX_FLOW_KEY_TYPE_TCP; 569 break; 570 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 571 rss_cfg |= NIX_FLOW_KEY_TYPE_TCP; 572 break; 573 default: 574 return -EINVAL; 575 } 576 break; 577 case UDP_V4_FLOW: 578 case UDP_V6_FLOW: 579 switch (nfc->data & rxh_l4) { 580 case 0: 581 rss_cfg &= ~NIX_FLOW_KEY_TYPE_UDP; 582 break; 583 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 584 rss_cfg |= NIX_FLOW_KEY_TYPE_UDP; 585 break; 586 default: 587 return -EINVAL; 588 } 589 break; 590 case SCTP_V4_FLOW: 591 case SCTP_V6_FLOW: 592 switch (nfc->data & rxh_l4) { 593 case 0: 594 rss_cfg &= ~NIX_FLOW_KEY_TYPE_SCTP; 595 break; 596 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 597 rss_cfg |= NIX_FLOW_KEY_TYPE_SCTP; 598 break; 599 default: 600 return -EINVAL; 601 } 602 break; 603 case AH_ESP_V4_FLOW: 604 case AH_ESP_V6_FLOW: 605 switch (nfc->data & rxh_l4) { 606 case 0: 607 rss_cfg &= ~(NIX_FLOW_KEY_TYPE_ESP | 608 NIX_FLOW_KEY_TYPE_AH); 609 rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN | 610 NIX_FLOW_KEY_TYPE_IPV4_PROTO; 611 break; 612 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 613 /* If VLAN hashing is also requested for ESP then do not 614 * allow because of hardware 40 bytes flow key limit. 615 */ 616 if (rss_cfg & NIX_FLOW_KEY_TYPE_VLAN) { 617 netdev_err(pfvf->netdev, 618 "RSS hash of ESP or AH with VLAN is not supported\n"); 619 return -EOPNOTSUPP; 620 } 621 622 rss_cfg |= NIX_FLOW_KEY_TYPE_ESP | NIX_FLOW_KEY_TYPE_AH; 623 /* Disable IPv4 proto hashing since IPv6 SA+DA(32 bytes) 624 * and ESP SPI+sequence(8 bytes) uses hardware maximum 625 * limit of 40 byte flow key. 626 */ 627 rss_cfg &= ~NIX_FLOW_KEY_TYPE_IPV4_PROTO; 628 break; 629 default: 630 return -EINVAL; 631 } 632 break; 633 case IPV4_FLOW: 634 case IPV6_FLOW: 635 rss_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; 636 break; 637 default: 638 return -EINVAL; 639 } 640 641 rss->flowkey_cfg = rss_cfg; 642 otx2_set_flowkey_cfg(pfvf); 643 return 0; 644 } 645 646 static int otx2_get_rxnfc(struct net_device *dev, 647 struct ethtool_rxnfc *nfc, u32 *rules) 648 { 649 struct otx2_nic *pfvf = netdev_priv(dev); 650 int ret = -EOPNOTSUPP; 651 652 switch (nfc->cmd) { 653 case ETHTOOL_GRXRINGS: 654 nfc->data = pfvf->hw.rx_queues; 655 ret = 0; 656 break; 657 case ETHTOOL_GRXCLSRLCNT: 658 nfc->rule_cnt = pfvf->flow_cfg->nr_flows; 659 ret = 0; 660 break; 661 case ETHTOOL_GRXCLSRULE: 662 ret = otx2_get_flow(pfvf, nfc, nfc->fs.location); 663 break; 664 case ETHTOOL_GRXCLSRLALL: 665 ret = otx2_get_all_flows(pfvf, nfc, rules); 666 break; 667 case ETHTOOL_GRXFH: 668 return otx2_get_rss_hash_opts(pfvf, nfc); 669 default: 670 break; 671 } 672 return ret; 673 } 674 675 static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc) 676 { 677 bool ntuple = !!(dev->features & NETIF_F_NTUPLE); 678 struct otx2_nic *pfvf = netdev_priv(dev); 679 int ret = -EOPNOTSUPP; 680 681 switch (nfc->cmd) { 682 case ETHTOOL_SRXFH: 683 ret = otx2_set_rss_hash_opts(pfvf, nfc); 684 break; 685 case ETHTOOL_SRXCLSRLINS: 686 if (netif_running(dev) && ntuple) 687 ret = otx2_add_flow(pfvf, nfc); 688 break; 689 case ETHTOOL_SRXCLSRLDEL: 690 if (netif_running(dev) && ntuple) 691 ret = otx2_remove_flow(pfvf, nfc->fs.location); 692 break; 693 default: 694 break; 695 } 696 697 return ret; 698 } 699 700 static int otx2vf_get_rxnfc(struct net_device *dev, 701 struct ethtool_rxnfc *nfc, u32 *rules) 702 { 703 struct otx2_nic *pfvf = netdev_priv(dev); 704 int ret = -EOPNOTSUPP; 705 706 switch (nfc->cmd) { 707 case ETHTOOL_GRXRINGS: 708 nfc->data = pfvf->hw.rx_queues; 709 ret = 0; 710 break; 711 case ETHTOOL_GRXFH: 712 return otx2_get_rss_hash_opts(pfvf, nfc); 713 default: 714 break; 715 } 716 return ret; 717 } 718 719 static int otx2vf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc) 720 { 721 struct otx2_nic *pfvf = netdev_priv(dev); 722 int ret = -EOPNOTSUPP; 723 724 switch (nfc->cmd) { 725 case ETHTOOL_SRXFH: 726 ret = otx2_set_rss_hash_opts(pfvf, nfc); 727 break; 728 default: 729 break; 730 } 731 732 return ret; 733 } 734 735 static u32 otx2_get_rxfh_key_size(struct net_device *netdev) 736 { 737 struct otx2_nic *pfvf = netdev_priv(netdev); 738 struct otx2_rss_info *rss; 739 740 rss = &pfvf->hw.rss_info; 741 742 return sizeof(rss->key); 743 } 744 745 static u32 otx2_get_rxfh_indir_size(struct net_device *dev) 746 { 747 return MAX_RSS_INDIR_TBL_SIZE; 748 } 749 750 static int otx2_rss_ctx_delete(struct otx2_nic *pfvf, int ctx_id) 751 { 752 struct otx2_rss_info *rss = &pfvf->hw.rss_info; 753 754 otx2_rss_ctx_flow_del(pfvf, ctx_id); 755 kfree(rss->rss_ctx[ctx_id]); 756 rss->rss_ctx[ctx_id] = NULL; 757 758 return 0; 759 } 760 761 static int otx2_rss_ctx_create(struct otx2_nic *pfvf, 762 u32 *rss_context) 763 { 764 struct otx2_rss_info *rss = &pfvf->hw.rss_info; 765 u8 ctx; 766 767 for (ctx = 0; ctx < MAX_RSS_GROUPS; ctx++) { 768 if (!rss->rss_ctx[ctx]) 769 break; 770 } 771 if (ctx == MAX_RSS_GROUPS) 772 return -EINVAL; 773 774 rss->rss_ctx[ctx] = kzalloc(sizeof(*rss->rss_ctx[ctx]), GFP_KERNEL); 775 if (!rss->rss_ctx[ctx]) 776 return -ENOMEM; 777 *rss_context = ctx; 778 779 return 0; 780 } 781 782 /* RSS context configuration */ 783 static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir, 784 const u8 *hkey, const u8 hfunc, 785 u32 *rss_context, bool delete) 786 { 787 struct otx2_nic *pfvf = netdev_priv(dev); 788 struct otx2_rss_ctx *rss_ctx; 789 struct otx2_rss_info *rss; 790 int ret, idx; 791 792 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 793 return -EOPNOTSUPP; 794 795 if (*rss_context != ETH_RXFH_CONTEXT_ALLOC && 796 *rss_context >= MAX_RSS_GROUPS) 797 return -EINVAL; 798 799 rss = &pfvf->hw.rss_info; 800 801 if (!rss->enable) { 802 netdev_err(dev, "RSS is disabled, cannot change settings\n"); 803 return -EIO; 804 } 805 806 if (hkey) { 807 memcpy(rss->key, hkey, sizeof(rss->key)); 808 otx2_set_rss_key(pfvf); 809 } 810 if (delete) 811 return otx2_rss_ctx_delete(pfvf, *rss_context); 812 813 if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) { 814 ret = otx2_rss_ctx_create(pfvf, rss_context); 815 if (ret) 816 return ret; 817 } 818 if (indir) { 819 rss_ctx = rss->rss_ctx[*rss_context]; 820 for (idx = 0; idx < rss->rss_size; idx++) 821 rss_ctx->ind_tbl[idx] = indir[idx]; 822 } 823 otx2_set_rss_table(pfvf, *rss_context); 824 825 return 0; 826 } 827 828 static int otx2_get_rxfh_context(struct net_device *dev, u32 *indir, 829 u8 *hkey, u8 *hfunc, u32 rss_context) 830 { 831 struct otx2_nic *pfvf = netdev_priv(dev); 832 struct otx2_rss_ctx *rss_ctx; 833 struct otx2_rss_info *rss; 834 int idx, rx_queues; 835 836 rss = &pfvf->hw.rss_info; 837 838 if (hfunc) 839 *hfunc = ETH_RSS_HASH_TOP; 840 841 if (!indir) 842 return 0; 843 844 if (!rss->enable && rss_context == DEFAULT_RSS_CONTEXT_GROUP) { 845 rx_queues = pfvf->hw.rx_queues; 846 for (idx = 0; idx < MAX_RSS_INDIR_TBL_SIZE; idx++) 847 indir[idx] = ethtool_rxfh_indir_default(idx, rx_queues); 848 return 0; 849 } 850 if (rss_context >= MAX_RSS_GROUPS) 851 return -ENOENT; 852 853 rss_ctx = rss->rss_ctx[rss_context]; 854 if (!rss_ctx) 855 return -ENOENT; 856 857 if (indir) { 858 for (idx = 0; idx < rss->rss_size; idx++) 859 indir[idx] = rss_ctx->ind_tbl[idx]; 860 } 861 if (hkey) 862 memcpy(hkey, rss->key, sizeof(rss->key)); 863 864 return 0; 865 } 866 867 /* Get RSS configuration */ 868 static int otx2_get_rxfh(struct net_device *dev, u32 *indir, 869 u8 *hkey, u8 *hfunc) 870 { 871 return otx2_get_rxfh_context(dev, indir, hkey, hfunc, 872 DEFAULT_RSS_CONTEXT_GROUP); 873 } 874 875 /* Configure RSS table and hash key */ 876 static int otx2_set_rxfh(struct net_device *dev, const u32 *indir, 877 const u8 *hkey, const u8 hfunc) 878 { 879 880 u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP; 881 882 return otx2_set_rxfh_context(dev, indir, hkey, hfunc, &rss_context, 0); 883 } 884 885 static u32 otx2_get_msglevel(struct net_device *netdev) 886 { 887 struct otx2_nic *pfvf = netdev_priv(netdev); 888 889 return pfvf->msg_enable; 890 } 891 892 static void otx2_set_msglevel(struct net_device *netdev, u32 val) 893 { 894 struct otx2_nic *pfvf = netdev_priv(netdev); 895 896 pfvf->msg_enable = val; 897 } 898 899 static u32 otx2_get_link(struct net_device *netdev) 900 { 901 struct otx2_nic *pfvf = netdev_priv(netdev); 902 903 /* LBK link is internal and always UP */ 904 if (is_otx2_lbkvf(pfvf->pdev)) 905 return 1; 906 return pfvf->linfo.link_up; 907 } 908 909 static int otx2_get_ts_info(struct net_device *netdev, 910 struct ethtool_ts_info *info) 911 { 912 struct otx2_nic *pfvf = netdev_priv(netdev); 913 914 if (!pfvf->ptp) 915 return ethtool_op_get_ts_info(netdev, info); 916 917 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 918 SOF_TIMESTAMPING_RX_SOFTWARE | 919 SOF_TIMESTAMPING_SOFTWARE | 920 SOF_TIMESTAMPING_TX_HARDWARE | 921 SOF_TIMESTAMPING_RX_HARDWARE | 922 SOF_TIMESTAMPING_RAW_HARDWARE; 923 924 info->phc_index = otx2_ptp_clock_index(pfvf); 925 926 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 927 928 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 929 (1 << HWTSTAMP_FILTER_ALL); 930 931 return 0; 932 } 933 934 static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf) 935 { 936 struct cgx_fw_data *rsp = NULL; 937 struct msg_req *req; 938 int err = 0; 939 940 mutex_lock(&pfvf->mbox.lock); 941 req = otx2_mbox_alloc_msg_cgx_get_aux_link_info(&pfvf->mbox); 942 if (!req) { 943 mutex_unlock(&pfvf->mbox.lock); 944 return ERR_PTR(-ENOMEM); 945 } 946 947 err = otx2_sync_mbox_msg(&pfvf->mbox); 948 if (!err) { 949 rsp = (struct cgx_fw_data *) 950 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); 951 } else { 952 rsp = ERR_PTR(err); 953 } 954 955 mutex_unlock(&pfvf->mbox.lock); 956 return rsp; 957 } 958 959 static int otx2_get_fecparam(struct net_device *netdev, 960 struct ethtool_fecparam *fecparam) 961 { 962 struct otx2_nic *pfvf = netdev_priv(netdev); 963 struct cgx_fw_data *rsp; 964 const int fec[] = { 965 ETHTOOL_FEC_OFF, 966 ETHTOOL_FEC_BASER, 967 ETHTOOL_FEC_RS, 968 ETHTOOL_FEC_BASER | ETHTOOL_FEC_RS}; 969 #define FEC_MAX_INDEX 4 970 if (pfvf->linfo.fec < FEC_MAX_INDEX) 971 fecparam->active_fec = fec[pfvf->linfo.fec]; 972 973 rsp = otx2_get_fwdata(pfvf); 974 if (IS_ERR(rsp)) 975 return PTR_ERR(rsp); 976 977 if (rsp->fwdata.supported_fec < FEC_MAX_INDEX) { 978 if (!rsp->fwdata.supported_fec) 979 fecparam->fec = ETHTOOL_FEC_NONE; 980 else 981 fecparam->fec = fec[rsp->fwdata.supported_fec]; 982 } 983 return 0; 984 } 985 986 static int otx2_set_fecparam(struct net_device *netdev, 987 struct ethtool_fecparam *fecparam) 988 { 989 struct otx2_nic *pfvf = netdev_priv(netdev); 990 struct mbox *mbox = &pfvf->mbox; 991 struct fec_mode *req, *rsp; 992 int err = 0, fec = 0; 993 994 switch (fecparam->fec) { 995 /* Firmware does not support AUTO mode consider it as FEC_OFF */ 996 case ETHTOOL_FEC_OFF: 997 case ETHTOOL_FEC_AUTO: 998 fec = OTX2_FEC_OFF; 999 break; 1000 case ETHTOOL_FEC_RS: 1001 fec = OTX2_FEC_RS; 1002 break; 1003 case ETHTOOL_FEC_BASER: 1004 fec = OTX2_FEC_BASER; 1005 break; 1006 default: 1007 netdev_warn(pfvf->netdev, "Unsupported FEC mode: %d", 1008 fecparam->fec); 1009 return -EINVAL; 1010 } 1011 1012 if (fec == pfvf->linfo.fec) 1013 return 0; 1014 1015 mutex_lock(&mbox->lock); 1016 req = otx2_mbox_alloc_msg_cgx_set_fec_param(&pfvf->mbox); 1017 if (!req) { 1018 err = -ENOMEM; 1019 goto end; 1020 } 1021 req->fec = fec; 1022 err = otx2_sync_mbox_msg(&pfvf->mbox); 1023 if (err) 1024 goto end; 1025 1026 rsp = (struct fec_mode *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 1027 0, &req->hdr); 1028 if (rsp->fec >= 0) 1029 pfvf->linfo.fec = rsp->fec; 1030 else 1031 err = rsp->fec; 1032 end: 1033 mutex_unlock(&mbox->lock); 1034 return err; 1035 } 1036 1037 static void otx2_get_fec_info(u64 index, int req_mode, 1038 struct ethtool_link_ksettings *link_ksettings) 1039 { 1040 __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_fec_modes) = { 0, }; 1041 1042 switch (index) { 1043 case OTX2_FEC_NONE: 1044 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 1045 otx2_fec_modes); 1046 break; 1047 case OTX2_FEC_BASER: 1048 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 1049 otx2_fec_modes); 1050 break; 1051 case OTX2_FEC_RS: 1052 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 1053 otx2_fec_modes); 1054 break; 1055 case OTX2_FEC_BASER | OTX2_FEC_RS: 1056 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 1057 otx2_fec_modes); 1058 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 1059 otx2_fec_modes); 1060 break; 1061 } 1062 1063 /* Add fec modes to existing modes */ 1064 if (req_mode == OTX2_MODE_ADVERTISED) 1065 linkmode_or(link_ksettings->link_modes.advertising, 1066 link_ksettings->link_modes.advertising, 1067 otx2_fec_modes); 1068 else 1069 linkmode_or(link_ksettings->link_modes.supported, 1070 link_ksettings->link_modes.supported, 1071 otx2_fec_modes); 1072 } 1073 1074 static void otx2_get_link_mode_info(u64 link_mode_bmap, 1075 bool req_mode, 1076 struct ethtool_link_ksettings 1077 *link_ksettings) 1078 { 1079 __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_link_modes) = { 0, }; 1080 const int otx2_sgmii_features[6] = { 1081 ETHTOOL_LINK_MODE_10baseT_Half_BIT, 1082 ETHTOOL_LINK_MODE_10baseT_Full_BIT, 1083 ETHTOOL_LINK_MODE_100baseT_Half_BIT, 1084 ETHTOOL_LINK_MODE_100baseT_Full_BIT, 1085 ETHTOOL_LINK_MODE_1000baseT_Half_BIT, 1086 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 1087 }; 1088 /* CGX link modes to Ethtool link mode mapping */ 1089 const int cgx_link_mode[27] = { 1090 0, /* SGMII Mode */ 1091 ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 1092 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 1093 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 1094 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 1095 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 1096 0, 1097 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 1098 0, 1099 0, 1100 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 1101 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 1102 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 1103 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 1104 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 1105 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 1106 0, 1107 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 1108 0, 1109 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 1110 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 1111 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 1112 0, 1113 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 1114 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 1115 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 1116 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 1117 }; 1118 u8 bit; 1119 1120 link_mode_bmap = link_mode_bmap & OTX2_ETHTOOL_SUPPORTED_MODES; 1121 1122 for_each_set_bit(bit, (unsigned long *)&link_mode_bmap, 27) { 1123 /* SGMII mode is set */ 1124 if (bit == 0) 1125 linkmode_set_bit_array(otx2_sgmii_features, 1126 ARRAY_SIZE(otx2_sgmii_features), 1127 otx2_link_modes); 1128 else 1129 linkmode_set_bit(cgx_link_mode[bit], otx2_link_modes); 1130 } 1131 1132 if (req_mode == OTX2_MODE_ADVERTISED) 1133 linkmode_copy(link_ksettings->link_modes.advertising, 1134 otx2_link_modes); 1135 else 1136 linkmode_copy(link_ksettings->link_modes.supported, 1137 otx2_link_modes); 1138 } 1139 1140 static int otx2_get_link_ksettings(struct net_device *netdev, 1141 struct ethtool_link_ksettings *cmd) 1142 { 1143 struct otx2_nic *pfvf = netdev_priv(netdev); 1144 struct cgx_fw_data *rsp = NULL; 1145 1146 cmd->base.duplex = pfvf->linfo.full_duplex; 1147 cmd->base.speed = pfvf->linfo.speed; 1148 cmd->base.autoneg = pfvf->linfo.an; 1149 1150 rsp = otx2_get_fwdata(pfvf); 1151 if (IS_ERR(rsp)) 1152 return PTR_ERR(rsp); 1153 1154 if (rsp->fwdata.supported_an) 1155 ethtool_link_ksettings_add_link_mode(cmd, 1156 supported, 1157 Autoneg); 1158 1159 otx2_get_link_mode_info(rsp->fwdata.advertised_link_modes, 1160 OTX2_MODE_ADVERTISED, cmd); 1161 otx2_get_fec_info(rsp->fwdata.advertised_fec, 1162 OTX2_MODE_ADVERTISED, cmd); 1163 otx2_get_link_mode_info(rsp->fwdata.supported_link_modes, 1164 OTX2_MODE_SUPPORTED, cmd); 1165 otx2_get_fec_info(rsp->fwdata.supported_fec, 1166 OTX2_MODE_SUPPORTED, cmd); 1167 return 0; 1168 } 1169 1170 static void otx2_get_advertised_mode(const struct ethtool_link_ksettings *cmd, 1171 u64 *mode) 1172 { 1173 u32 bit_pos; 1174 1175 /* Firmware does not support requesting multiple advertised modes 1176 * return first set bit 1177 */ 1178 bit_pos = find_first_bit(cmd->link_modes.advertising, 1179 __ETHTOOL_LINK_MODE_MASK_NBITS); 1180 if (bit_pos != __ETHTOOL_LINK_MODE_MASK_NBITS) 1181 *mode = bit_pos; 1182 } 1183 1184 static int otx2_set_link_ksettings(struct net_device *netdev, 1185 const struct ethtool_link_ksettings *cmd) 1186 { 1187 struct otx2_nic *pf = netdev_priv(netdev); 1188 struct ethtool_link_ksettings cur_ks; 1189 struct cgx_set_link_mode_req *req; 1190 struct mbox *mbox = &pf->mbox; 1191 int err = 0; 1192 1193 memset(&cur_ks, 0, sizeof(struct ethtool_link_ksettings)); 1194 1195 if (!ethtool_validate_speed(cmd->base.speed) || 1196 !ethtool_validate_duplex(cmd->base.duplex)) 1197 return -EINVAL; 1198 1199 if (cmd->base.autoneg != AUTONEG_ENABLE && 1200 cmd->base.autoneg != AUTONEG_DISABLE) 1201 return -EINVAL; 1202 1203 otx2_get_link_ksettings(netdev, &cur_ks); 1204 1205 /* Check requested modes against supported modes by hardware */ 1206 if (!bitmap_subset(cmd->link_modes.advertising, 1207 cur_ks.link_modes.supported, 1208 __ETHTOOL_LINK_MODE_MASK_NBITS)) 1209 return -EINVAL; 1210 1211 mutex_lock(&mbox->lock); 1212 req = otx2_mbox_alloc_msg_cgx_set_link_mode(&pf->mbox); 1213 if (!req) { 1214 err = -ENOMEM; 1215 goto end; 1216 } 1217 1218 req->args.speed = cmd->base.speed; 1219 /* firmware expects 1 for half duplex and 0 for full duplex 1220 * hence inverting 1221 */ 1222 req->args.duplex = cmd->base.duplex ^ 0x1; 1223 req->args.an = cmd->base.autoneg; 1224 otx2_get_advertised_mode(cmd, &req->args.mode); 1225 1226 err = otx2_sync_mbox_msg(&pf->mbox); 1227 end: 1228 mutex_unlock(&mbox->lock); 1229 return err; 1230 } 1231 1232 static const struct ethtool_ops otx2_ethtool_ops = { 1233 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 1234 ETHTOOL_COALESCE_MAX_FRAMES, 1235 .get_link = otx2_get_link, 1236 .get_drvinfo = otx2_get_drvinfo, 1237 .get_strings = otx2_get_strings, 1238 .get_ethtool_stats = otx2_get_ethtool_stats, 1239 .get_sset_count = otx2_get_sset_count, 1240 .set_channels = otx2_set_channels, 1241 .get_channels = otx2_get_channels, 1242 .get_ringparam = otx2_get_ringparam, 1243 .set_ringparam = otx2_set_ringparam, 1244 .get_coalesce = otx2_get_coalesce, 1245 .set_coalesce = otx2_set_coalesce, 1246 .get_rxnfc = otx2_get_rxnfc, 1247 .set_rxnfc = otx2_set_rxnfc, 1248 .get_rxfh_key_size = otx2_get_rxfh_key_size, 1249 .get_rxfh_indir_size = otx2_get_rxfh_indir_size, 1250 .get_rxfh = otx2_get_rxfh, 1251 .set_rxfh = otx2_set_rxfh, 1252 .get_rxfh_context = otx2_get_rxfh_context, 1253 .set_rxfh_context = otx2_set_rxfh_context, 1254 .get_msglevel = otx2_get_msglevel, 1255 .set_msglevel = otx2_set_msglevel, 1256 .get_pauseparam = otx2_get_pauseparam, 1257 .set_pauseparam = otx2_set_pauseparam, 1258 .get_ts_info = otx2_get_ts_info, 1259 .get_fecparam = otx2_get_fecparam, 1260 .set_fecparam = otx2_set_fecparam, 1261 .get_link_ksettings = otx2_get_link_ksettings, 1262 .set_link_ksettings = otx2_set_link_ksettings, 1263 }; 1264 1265 void otx2_set_ethtool_ops(struct net_device *netdev) 1266 { 1267 netdev->ethtool_ops = &otx2_ethtool_ops; 1268 } 1269 1270 /* VF's ethtool APIs */ 1271 static void otx2vf_get_drvinfo(struct net_device *netdev, 1272 struct ethtool_drvinfo *info) 1273 { 1274 struct otx2_nic *vf = netdev_priv(netdev); 1275 1276 strlcpy(info->driver, DRV_VF_NAME, sizeof(info->driver)); 1277 strlcpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info)); 1278 } 1279 1280 static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data) 1281 { 1282 struct otx2_nic *vf = netdev_priv(netdev); 1283 int stats; 1284 1285 if (sset != ETH_SS_STATS) 1286 return; 1287 1288 for (stats = 0; stats < otx2_n_dev_stats; stats++) { 1289 memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN); 1290 data += ETH_GSTRING_LEN; 1291 } 1292 1293 for (stats = 0; stats < otx2_n_drv_stats; stats++) { 1294 memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN); 1295 data += ETH_GSTRING_LEN; 1296 } 1297 1298 otx2_get_qset_strings(vf, &data, 0); 1299 1300 strcpy(data, "reset_count"); 1301 data += ETH_GSTRING_LEN; 1302 } 1303 1304 static void otx2vf_get_ethtool_stats(struct net_device *netdev, 1305 struct ethtool_stats *stats, u64 *data) 1306 { 1307 struct otx2_nic *vf = netdev_priv(netdev); 1308 int stat; 1309 1310 otx2_get_dev_stats(vf); 1311 for (stat = 0; stat < otx2_n_dev_stats; stat++) 1312 *(data++) = ((u64 *)&vf->hw.dev_stats) 1313 [otx2_dev_stats[stat].index]; 1314 1315 for (stat = 0; stat < otx2_n_drv_stats; stat++) 1316 *(data++) = atomic_read(&((atomic_t *)&vf->hw.drv_stats) 1317 [otx2_drv_stats[stat].index]); 1318 1319 otx2_get_qset_stats(vf, stats, &data); 1320 *(data++) = vf->reset_count; 1321 } 1322 1323 static int otx2vf_get_sset_count(struct net_device *netdev, int sset) 1324 { 1325 struct otx2_nic *vf = netdev_priv(netdev); 1326 int qstats_count; 1327 1328 if (sset != ETH_SS_STATS) 1329 return -EINVAL; 1330 1331 qstats_count = otx2_n_queue_stats * 1332 (vf->hw.rx_queues + vf->hw.tx_queues); 1333 1334 return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 1; 1335 } 1336 1337 static int otx2vf_get_link_ksettings(struct net_device *netdev, 1338 struct ethtool_link_ksettings *cmd) 1339 { 1340 struct otx2_nic *pfvf = netdev_priv(netdev); 1341 1342 if (is_otx2_lbkvf(pfvf->pdev)) { 1343 cmd->base.duplex = DUPLEX_FULL; 1344 cmd->base.speed = SPEED_100000; 1345 } else { 1346 return otx2_get_link_ksettings(netdev, cmd); 1347 } 1348 return 0; 1349 } 1350 1351 static const struct ethtool_ops otx2vf_ethtool_ops = { 1352 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 1353 ETHTOOL_COALESCE_MAX_FRAMES, 1354 .get_link = otx2_get_link, 1355 .get_drvinfo = otx2vf_get_drvinfo, 1356 .get_strings = otx2vf_get_strings, 1357 .get_ethtool_stats = otx2vf_get_ethtool_stats, 1358 .get_sset_count = otx2vf_get_sset_count, 1359 .set_channels = otx2_set_channels, 1360 .get_channels = otx2_get_channels, 1361 .get_rxnfc = otx2vf_get_rxnfc, 1362 .set_rxnfc = otx2vf_set_rxnfc, 1363 .get_rxfh_key_size = otx2_get_rxfh_key_size, 1364 .get_rxfh_indir_size = otx2_get_rxfh_indir_size, 1365 .get_rxfh = otx2_get_rxfh, 1366 .set_rxfh = otx2_set_rxfh, 1367 .get_rxfh_context = otx2_get_rxfh_context, 1368 .set_rxfh_context = otx2_set_rxfh_context, 1369 .get_ringparam = otx2_get_ringparam, 1370 .set_ringparam = otx2_set_ringparam, 1371 .get_coalesce = otx2_get_coalesce, 1372 .set_coalesce = otx2_set_coalesce, 1373 .get_msglevel = otx2_get_msglevel, 1374 .set_msglevel = otx2_set_msglevel, 1375 .get_pauseparam = otx2_get_pauseparam, 1376 .set_pauseparam = otx2_set_pauseparam, 1377 .get_link_ksettings = otx2vf_get_link_ksettings, 1378 }; 1379 1380 void otx2vf_set_ethtool_ops(struct net_device *netdev) 1381 { 1382 netdev->ethtool_ops = &otx2vf_ethtool_ops; 1383 } 1384 EXPORT_SYMBOL(otx2vf_set_ethtool_ops); 1385