1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2020 Marvell. 5 * 6 */ 7 8 #include <linux/pci.h> 9 #include <linux/ethtool.h> 10 #include <linux/stddef.h> 11 #include <linux/etherdevice.h> 12 #include <linux/log2.h> 13 #include <linux/net_tstamp.h> 14 #include <linux/linkmode.h> 15 16 #include "otx2_common.h" 17 #include "otx2_ptp.h" 18 19 #define DRV_NAME "rvu-nicpf" 20 #define DRV_VF_NAME "rvu-nicvf" 21 22 struct otx2_stat { 23 char name[ETH_GSTRING_LEN]; 24 unsigned int index; 25 }; 26 27 /* HW device stats */ 28 #define OTX2_DEV_STAT(stat) { \ 29 .name = #stat, \ 30 .index = offsetof(struct otx2_dev_stats, stat) / sizeof(u64), \ 31 } 32 33 enum link_mode { 34 OTX2_MODE_SUPPORTED, 35 OTX2_MODE_ADVERTISED 36 }; 37 38 static const struct otx2_stat otx2_dev_stats[] = { 39 OTX2_DEV_STAT(rx_ucast_frames), 40 OTX2_DEV_STAT(rx_bcast_frames), 41 OTX2_DEV_STAT(rx_mcast_frames), 42 43 OTX2_DEV_STAT(tx_ucast_frames), 44 OTX2_DEV_STAT(tx_bcast_frames), 45 OTX2_DEV_STAT(tx_mcast_frames), 46 }; 47 48 /* Driver level stats */ 49 #define OTX2_DRV_STAT(stat) { \ 50 .name = #stat, \ 51 .index = offsetof(struct otx2_drv_stats, stat) / sizeof(atomic_t), \ 52 } 53 54 static const struct otx2_stat otx2_drv_stats[] = { 55 OTX2_DRV_STAT(rx_fcs_errs), 56 OTX2_DRV_STAT(rx_oversize_errs), 57 OTX2_DRV_STAT(rx_undersize_errs), 58 OTX2_DRV_STAT(rx_csum_errs), 59 OTX2_DRV_STAT(rx_len_errs), 60 OTX2_DRV_STAT(rx_other_errs), 61 }; 62 63 static const struct otx2_stat otx2_queue_stats[] = { 64 { "bytes", 0 }, 65 { "frames", 1 }, 66 }; 67 68 static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats); 69 static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats); 70 static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats); 71 72 static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf); 73 74 static void otx2_get_drvinfo(struct net_device *netdev, 75 struct ethtool_drvinfo *info) 76 { 77 struct otx2_nic *pfvf = netdev_priv(netdev); 78 79 strscpy(info->driver, DRV_NAME, sizeof(info->driver)); 80 strscpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info)); 81 } 82 83 static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset) 84 { 85 int start_qidx = qset * pfvf->hw.rx_queues; 86 int qidx, stats; 87 88 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { 89 for (stats = 0; stats < otx2_n_queue_stats; stats++) { 90 sprintf(*data, "rxq%d: %s", qidx + start_qidx, 91 otx2_queue_stats[stats].name); 92 *data += ETH_GSTRING_LEN; 93 } 94 } 95 96 for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) { 97 for (stats = 0; stats < otx2_n_queue_stats; stats++) { 98 if (qidx >= pfvf->hw.non_qos_queues) 99 sprintf(*data, "txq_qos%d: %s", 100 qidx + start_qidx - pfvf->hw.non_qos_queues, 101 otx2_queue_stats[stats].name); 102 else 103 sprintf(*data, "txq%d: %s", qidx + start_qidx, 104 otx2_queue_stats[stats].name); 105 *data += ETH_GSTRING_LEN; 106 } 107 } 108 } 109 110 static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data) 111 { 112 struct otx2_nic *pfvf = netdev_priv(netdev); 113 int stats; 114 115 if (sset != ETH_SS_STATS) 116 return; 117 118 for (stats = 0; stats < otx2_n_dev_stats; stats++) { 119 memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN); 120 data += ETH_GSTRING_LEN; 121 } 122 123 for (stats = 0; stats < otx2_n_drv_stats; stats++) { 124 memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN); 125 data += ETH_GSTRING_LEN; 126 } 127 128 otx2_get_qset_strings(pfvf, &data, 0); 129 130 if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) { 131 for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) { 132 sprintf(data, "cgx_rxstat%d: ", stats); 133 data += ETH_GSTRING_LEN; 134 } 135 136 for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) { 137 sprintf(data, "cgx_txstat%d: ", stats); 138 data += ETH_GSTRING_LEN; 139 } 140 } 141 142 strcpy(data, "reset_count"); 143 data += ETH_GSTRING_LEN; 144 sprintf(data, "Fec Corrected Errors: "); 145 data += ETH_GSTRING_LEN; 146 sprintf(data, "Fec Uncorrected Errors: "); 147 data += ETH_GSTRING_LEN; 148 } 149 150 static void otx2_get_qset_stats(struct otx2_nic *pfvf, 151 struct ethtool_stats *stats, u64 **data) 152 { 153 int stat, qidx; 154 155 if (!pfvf) 156 return; 157 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { 158 if (!otx2_update_rq_stats(pfvf, qidx)) { 159 for (stat = 0; stat < otx2_n_queue_stats; stat++) 160 *((*data)++) = 0; 161 continue; 162 } 163 for (stat = 0; stat < otx2_n_queue_stats; stat++) 164 *((*data)++) = ((u64 *)&pfvf->qset.rq[qidx].stats) 165 [otx2_queue_stats[stat].index]; 166 } 167 168 for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) { 169 if (!otx2_update_sq_stats(pfvf, qidx)) { 170 for (stat = 0; stat < otx2_n_queue_stats; stat++) 171 *((*data)++) = 0; 172 continue; 173 } 174 for (stat = 0; stat < otx2_n_queue_stats; stat++) 175 *((*data)++) = ((u64 *)&pfvf->qset.sq[qidx].stats) 176 [otx2_queue_stats[stat].index]; 177 } 178 } 179 180 static int otx2_get_phy_fec_stats(struct otx2_nic *pfvf) 181 { 182 struct msg_req *req; 183 int rc = -ENOMEM; 184 185 mutex_lock(&pfvf->mbox.lock); 186 req = otx2_mbox_alloc_msg_cgx_get_phy_fec_stats(&pfvf->mbox); 187 if (!req) 188 goto end; 189 190 if (!otx2_sync_mbox_msg(&pfvf->mbox)) 191 rc = 0; 192 end: 193 mutex_unlock(&pfvf->mbox.lock); 194 return rc; 195 } 196 197 /* Get device and per queue statistics */ 198 static void otx2_get_ethtool_stats(struct net_device *netdev, 199 struct ethtool_stats *stats, u64 *data) 200 { 201 struct otx2_nic *pfvf = netdev_priv(netdev); 202 u64 fec_corr_blks, fec_uncorr_blks; 203 struct cgx_fw_data *rsp; 204 int stat; 205 206 otx2_get_dev_stats(pfvf); 207 for (stat = 0; stat < otx2_n_dev_stats; stat++) 208 *(data++) = ((u64 *)&pfvf->hw.dev_stats) 209 [otx2_dev_stats[stat].index]; 210 211 for (stat = 0; stat < otx2_n_drv_stats; stat++) 212 *(data++) = atomic_read(&((atomic_t *)&pfvf->hw.drv_stats) 213 [otx2_drv_stats[stat].index]); 214 215 otx2_get_qset_stats(pfvf, stats, &data); 216 217 if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) { 218 otx2_update_lmac_stats(pfvf); 219 for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++) 220 *(data++) = pfvf->hw.cgx_rx_stats[stat]; 221 for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++) 222 *(data++) = pfvf->hw.cgx_tx_stats[stat]; 223 } 224 225 *(data++) = pfvf->reset_count; 226 227 fec_corr_blks = pfvf->hw.cgx_fec_corr_blks; 228 fec_uncorr_blks = pfvf->hw.cgx_fec_uncorr_blks; 229 230 rsp = otx2_get_fwdata(pfvf); 231 if (!IS_ERR(rsp) && rsp->fwdata.phy.misc.has_fec_stats && 232 !otx2_get_phy_fec_stats(pfvf)) { 233 /* Fetch fwdata again because it's been recently populated with 234 * latest PHY FEC stats. 235 */ 236 rsp = otx2_get_fwdata(pfvf); 237 if (!IS_ERR(rsp)) { 238 struct fec_stats_s *p = &rsp->fwdata.phy.fec_stats; 239 240 if (pfvf->linfo.fec == OTX2_FEC_BASER) { 241 fec_corr_blks = p->brfec_corr_blks; 242 fec_uncorr_blks = p->brfec_uncorr_blks; 243 } else { 244 fec_corr_blks = p->rsfec_corr_cws; 245 fec_uncorr_blks = p->rsfec_uncorr_cws; 246 } 247 } 248 } 249 250 *(data++) = fec_corr_blks; 251 *(data++) = fec_uncorr_blks; 252 } 253 254 static int otx2_get_sset_count(struct net_device *netdev, int sset) 255 { 256 struct otx2_nic *pfvf = netdev_priv(netdev); 257 int qstats_count, mac_stats = 0; 258 259 if (sset != ETH_SS_STATS) 260 return -EINVAL; 261 262 qstats_count = otx2_n_queue_stats * 263 (pfvf->hw.rx_queues + otx2_get_total_tx_queues(pfvf)); 264 if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) 265 mac_stats = CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT; 266 otx2_update_lmac_fec_stats(pfvf); 267 268 return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 269 mac_stats + OTX2_FEC_STATS_CNT + 1; 270 } 271 272 /* Get no of queues device supports and current queue count */ 273 static void otx2_get_channels(struct net_device *dev, 274 struct ethtool_channels *channel) 275 { 276 struct otx2_nic *pfvf = netdev_priv(dev); 277 278 channel->max_rx = pfvf->hw.max_queues; 279 channel->max_tx = pfvf->hw.max_queues; 280 281 channel->rx_count = pfvf->hw.rx_queues; 282 channel->tx_count = pfvf->hw.tx_queues; 283 } 284 285 /* Set no of Tx, Rx queues to be used */ 286 static int otx2_set_channels(struct net_device *dev, 287 struct ethtool_channels *channel) 288 { 289 struct otx2_nic *pfvf = netdev_priv(dev); 290 bool if_up = netif_running(dev); 291 int err, qos_txqs; 292 293 if (!channel->rx_count || !channel->tx_count) 294 return -EINVAL; 295 296 if (bitmap_weight(&pfvf->rq_bmap, pfvf->hw.rx_queues) > 1) { 297 netdev_err(dev, 298 "Receive queues are in use by TC police action\n"); 299 return -EINVAL; 300 } 301 302 if (if_up) 303 dev->netdev_ops->ndo_stop(dev); 304 305 qos_txqs = bitmap_weight(pfvf->qos.qos_sq_bmap, 306 OTX2_QOS_MAX_LEAF_NODES); 307 308 err = otx2_set_real_num_queues(dev, channel->tx_count + qos_txqs, 309 channel->rx_count); 310 if (err) 311 return err; 312 313 pfvf->hw.rx_queues = channel->rx_count; 314 pfvf->hw.tx_queues = channel->tx_count; 315 if (pfvf->xdp_prog) 316 pfvf->hw.xdp_queues = channel->rx_count; 317 pfvf->hw.non_qos_queues = pfvf->hw.tx_queues + pfvf->hw.xdp_queues; 318 319 if (if_up) 320 err = dev->netdev_ops->ndo_open(dev); 321 322 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", 323 pfvf->hw.tx_queues, pfvf->hw.rx_queues); 324 325 return err; 326 } 327 328 static void otx2_get_pauseparam(struct net_device *netdev, 329 struct ethtool_pauseparam *pause) 330 { 331 struct otx2_nic *pfvf = netdev_priv(netdev); 332 struct cgx_pause_frm_cfg *req, *rsp; 333 334 if (is_otx2_lbkvf(pfvf->pdev)) 335 return; 336 337 mutex_lock(&pfvf->mbox.lock); 338 req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox); 339 if (!req) { 340 mutex_unlock(&pfvf->mbox.lock); 341 return; 342 } 343 344 if (!otx2_sync_mbox_msg(&pfvf->mbox)) { 345 rsp = (struct cgx_pause_frm_cfg *) 346 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); 347 pause->rx_pause = rsp->rx_pause; 348 pause->tx_pause = rsp->tx_pause; 349 } 350 mutex_unlock(&pfvf->mbox.lock); 351 } 352 353 static int otx2_set_pauseparam(struct net_device *netdev, 354 struct ethtool_pauseparam *pause) 355 { 356 struct otx2_nic *pfvf = netdev_priv(netdev); 357 358 if (pause->autoneg) 359 return -EOPNOTSUPP; 360 361 if (is_otx2_lbkvf(pfvf->pdev)) 362 return -EOPNOTSUPP; 363 364 if (pause->rx_pause) 365 pfvf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED; 366 else 367 pfvf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED; 368 369 if (pause->tx_pause) 370 pfvf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED; 371 else 372 pfvf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED; 373 374 return otx2_config_pause_frm(pfvf); 375 } 376 377 static void otx2_get_ringparam(struct net_device *netdev, 378 struct ethtool_ringparam *ring, 379 struct kernel_ethtool_ringparam *kernel_ring, 380 struct netlink_ext_ack *extack) 381 { 382 struct otx2_nic *pfvf = netdev_priv(netdev); 383 struct otx2_qset *qs = &pfvf->qset; 384 385 ring->rx_max_pending = Q_COUNT(Q_SIZE_MAX); 386 ring->rx_pending = qs->rqe_cnt ? qs->rqe_cnt : Q_COUNT(Q_SIZE_256); 387 ring->tx_max_pending = Q_COUNT(Q_SIZE_MAX); 388 ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K); 389 kernel_ring->rx_buf_len = pfvf->hw.rbuf_len; 390 kernel_ring->cqe_size = pfvf->hw.xqe_size; 391 } 392 393 static int otx2_set_ringparam(struct net_device *netdev, 394 struct ethtool_ringparam *ring, 395 struct kernel_ethtool_ringparam *kernel_ring, 396 struct netlink_ext_ack *extack) 397 { 398 struct otx2_nic *pfvf = netdev_priv(netdev); 399 u32 rx_buf_len = kernel_ring->rx_buf_len; 400 u32 old_rx_buf_len = pfvf->hw.rbuf_len; 401 u32 xqe_size = kernel_ring->cqe_size; 402 bool if_up = netif_running(netdev); 403 struct otx2_qset *qs = &pfvf->qset; 404 u32 rx_count, tx_count; 405 406 if (ring->rx_mini_pending || ring->rx_jumbo_pending) 407 return -EINVAL; 408 409 /* Hardware supports max size of 32k for a receive buffer 410 * and 1536 is typical ethernet frame size. 411 */ 412 if (rx_buf_len && (rx_buf_len < 1536 || rx_buf_len > 32768)) { 413 netdev_err(netdev, 414 "Receive buffer range is 1536 - 32768"); 415 return -EINVAL; 416 } 417 418 if (xqe_size != 128 && xqe_size != 512) { 419 netdev_err(netdev, 420 "Completion event size must be 128 or 512"); 421 return -EINVAL; 422 } 423 424 /* Permitted lengths are 16 64 256 1K 4K 16K 64K 256K 1M */ 425 rx_count = ring->rx_pending; 426 /* On some silicon variants a skid or reserved CQEs are 427 * needed to avoid CQ overflow. 428 */ 429 if (rx_count < pfvf->hw.rq_skid) 430 rx_count = pfvf->hw.rq_skid; 431 rx_count = Q_COUNT(Q_SIZE(rx_count, 3)); 432 433 /* Due pipelining impact minimum 2000 unused SQ CQE's 434 * need to be maintained to avoid CQ overflow, hence the 435 * minimum 4K size. 436 */ 437 tx_count = clamp_t(u32, ring->tx_pending, 438 Q_COUNT(Q_SIZE_4K), Q_COUNT(Q_SIZE_MAX)); 439 tx_count = Q_COUNT(Q_SIZE(tx_count, 3)); 440 441 if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt && 442 rx_buf_len == old_rx_buf_len && xqe_size == pfvf->hw.xqe_size) 443 return 0; 444 445 if (if_up) 446 netdev->netdev_ops->ndo_stop(netdev); 447 448 /* Assigned to the nearest possible exponent. */ 449 qs->sqe_cnt = tx_count; 450 qs->rqe_cnt = rx_count; 451 452 pfvf->hw.rbuf_len = rx_buf_len; 453 pfvf->hw.xqe_size = xqe_size; 454 455 if (if_up) 456 return netdev->netdev_ops->ndo_open(netdev); 457 458 return 0; 459 } 460 461 static int otx2_get_coalesce(struct net_device *netdev, 462 struct ethtool_coalesce *cmd, 463 struct kernel_ethtool_coalesce *kernel_coal, 464 struct netlink_ext_ack *extack) 465 { 466 struct otx2_nic *pfvf = netdev_priv(netdev); 467 struct otx2_hw *hw = &pfvf->hw; 468 469 cmd->rx_coalesce_usecs = hw->cq_time_wait; 470 cmd->rx_max_coalesced_frames = hw->cq_ecount_wait; 471 cmd->tx_coalesce_usecs = hw->cq_time_wait; 472 cmd->tx_max_coalesced_frames = hw->cq_ecount_wait; 473 if ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) == 474 OTX2_FLAG_ADPTV_INT_COAL_ENABLED) { 475 cmd->use_adaptive_rx_coalesce = 1; 476 cmd->use_adaptive_tx_coalesce = 1; 477 } else { 478 cmd->use_adaptive_rx_coalesce = 0; 479 cmd->use_adaptive_tx_coalesce = 0; 480 } 481 482 return 0; 483 } 484 485 static int otx2_set_coalesce(struct net_device *netdev, 486 struct ethtool_coalesce *ec, 487 struct kernel_ethtool_coalesce *kernel_coal, 488 struct netlink_ext_ack *extack) 489 { 490 struct otx2_nic *pfvf = netdev_priv(netdev); 491 struct otx2_hw *hw = &pfvf->hw; 492 u8 priv_coalesce_status; 493 int qidx; 494 495 if (!ec->rx_max_coalesced_frames || !ec->tx_max_coalesced_frames) 496 return 0; 497 498 if (ec->use_adaptive_rx_coalesce != ec->use_adaptive_tx_coalesce) { 499 netdev_err(netdev, 500 "adaptive-rx should be same as adaptive-tx"); 501 return -EINVAL; 502 } 503 504 /* Check and update coalesce status */ 505 if ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) == 506 OTX2_FLAG_ADPTV_INT_COAL_ENABLED) { 507 priv_coalesce_status = 1; 508 if (!ec->use_adaptive_rx_coalesce) 509 pfvf->flags &= ~OTX2_FLAG_ADPTV_INT_COAL_ENABLED; 510 } else { 511 priv_coalesce_status = 0; 512 if (ec->use_adaptive_rx_coalesce) 513 pfvf->flags |= OTX2_FLAG_ADPTV_INT_COAL_ENABLED; 514 } 515 516 /* 'cq_time_wait' is 8bit and is in multiple of 100ns, 517 * so clamp the user given value to the range of 1 to 25usec. 518 */ 519 ec->rx_coalesce_usecs = clamp_t(u32, ec->rx_coalesce_usecs, 520 1, CQ_TIMER_THRESH_MAX); 521 ec->tx_coalesce_usecs = clamp_t(u32, ec->tx_coalesce_usecs, 522 1, CQ_TIMER_THRESH_MAX); 523 524 /* Rx and Tx are mapped to same CQ, check which one 525 * is changed, if both then choose the min. 526 */ 527 if (hw->cq_time_wait == ec->rx_coalesce_usecs) 528 hw->cq_time_wait = ec->tx_coalesce_usecs; 529 else if (hw->cq_time_wait == ec->tx_coalesce_usecs) 530 hw->cq_time_wait = ec->rx_coalesce_usecs; 531 else 532 hw->cq_time_wait = min_t(u8, ec->rx_coalesce_usecs, 533 ec->tx_coalesce_usecs); 534 535 /* Max ecount_wait supported is 16bit, 536 * so clamp the user given value to the range of 1 to 64k. 537 */ 538 ec->rx_max_coalesced_frames = clamp_t(u32, ec->rx_max_coalesced_frames, 539 1, NAPI_POLL_WEIGHT); 540 ec->tx_max_coalesced_frames = clamp_t(u32, ec->tx_max_coalesced_frames, 541 1, NAPI_POLL_WEIGHT); 542 543 /* Rx and Tx are mapped to same CQ, check which one 544 * is changed, if both then choose the min. 545 */ 546 if (hw->cq_ecount_wait == ec->rx_max_coalesced_frames) 547 hw->cq_ecount_wait = ec->tx_max_coalesced_frames; 548 else if (hw->cq_ecount_wait == ec->tx_max_coalesced_frames) 549 hw->cq_ecount_wait = ec->rx_max_coalesced_frames; 550 else 551 hw->cq_ecount_wait = min_t(u16, ec->rx_max_coalesced_frames, 552 ec->tx_max_coalesced_frames); 553 554 /* Reset 'cq_time_wait' and 'cq_ecount_wait' to 555 * default values if coalesce status changed from 556 * 'on' to 'off'. 557 */ 558 if (priv_coalesce_status && 559 ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) != 560 OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) { 561 hw->cq_time_wait = CQ_TIMER_THRESH_DEFAULT; 562 hw->cq_ecount_wait = CQ_CQE_THRESH_DEFAULT; 563 } 564 565 if (netif_running(netdev)) { 566 for (qidx = 0; qidx < pfvf->hw.cint_cnt; qidx++) 567 otx2_config_irq_coalescing(pfvf, qidx); 568 } 569 570 return 0; 571 } 572 573 static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf, 574 struct ethtool_rxnfc *nfc) 575 { 576 struct otx2_rss_info *rss = &pfvf->hw.rss_info; 577 578 if (!(rss->flowkey_cfg & 579 (NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6))) 580 return 0; 581 582 /* Mimimum is IPv4 and IPv6, SIP/DIP */ 583 nfc->data = RXH_IP_SRC | RXH_IP_DST; 584 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_VLAN) 585 nfc->data |= RXH_VLAN; 586 587 switch (nfc->flow_type) { 588 case TCP_V4_FLOW: 589 case TCP_V6_FLOW: 590 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_TCP) 591 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 592 break; 593 case UDP_V4_FLOW: 594 case UDP_V6_FLOW: 595 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_UDP) 596 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 597 break; 598 case SCTP_V4_FLOW: 599 case SCTP_V6_FLOW: 600 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_SCTP) 601 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 602 break; 603 case AH_ESP_V4_FLOW: 604 case AH_ESP_V6_FLOW: 605 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_ESP) 606 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 607 break; 608 case AH_V4_FLOW: 609 case ESP_V4_FLOW: 610 case IPV4_FLOW: 611 break; 612 case AH_V6_FLOW: 613 case ESP_V6_FLOW: 614 case IPV6_FLOW: 615 break; 616 default: 617 return -EINVAL; 618 } 619 620 return 0; 621 } 622 623 static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf, 624 struct ethtool_rxnfc *nfc) 625 { 626 struct otx2_rss_info *rss = &pfvf->hw.rss_info; 627 u32 rxh_l4 = RXH_L4_B_0_1 | RXH_L4_B_2_3; 628 u32 rss_cfg = rss->flowkey_cfg; 629 630 if (!rss->enable) { 631 netdev_err(pfvf->netdev, 632 "RSS is disabled, cannot change settings\n"); 633 return -EIO; 634 } 635 636 /* Mimimum is IPv4 and IPv6, SIP/DIP */ 637 if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST)) 638 return -EINVAL; 639 640 if (nfc->data & RXH_VLAN) 641 rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN; 642 else 643 rss_cfg &= ~NIX_FLOW_KEY_TYPE_VLAN; 644 645 switch (nfc->flow_type) { 646 case TCP_V4_FLOW: 647 case TCP_V6_FLOW: 648 /* Different config for v4 and v6 is not supported. 649 * Both of them have to be either 4-tuple or 2-tuple. 650 */ 651 switch (nfc->data & rxh_l4) { 652 case 0: 653 rss_cfg &= ~NIX_FLOW_KEY_TYPE_TCP; 654 break; 655 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 656 rss_cfg |= NIX_FLOW_KEY_TYPE_TCP; 657 break; 658 default: 659 return -EINVAL; 660 } 661 break; 662 case UDP_V4_FLOW: 663 case UDP_V6_FLOW: 664 switch (nfc->data & rxh_l4) { 665 case 0: 666 rss_cfg &= ~NIX_FLOW_KEY_TYPE_UDP; 667 break; 668 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 669 rss_cfg |= NIX_FLOW_KEY_TYPE_UDP; 670 break; 671 default: 672 return -EINVAL; 673 } 674 break; 675 case SCTP_V4_FLOW: 676 case SCTP_V6_FLOW: 677 switch (nfc->data & rxh_l4) { 678 case 0: 679 rss_cfg &= ~NIX_FLOW_KEY_TYPE_SCTP; 680 break; 681 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 682 rss_cfg |= NIX_FLOW_KEY_TYPE_SCTP; 683 break; 684 default: 685 return -EINVAL; 686 } 687 break; 688 case AH_ESP_V4_FLOW: 689 case AH_ESP_V6_FLOW: 690 switch (nfc->data & rxh_l4) { 691 case 0: 692 rss_cfg &= ~(NIX_FLOW_KEY_TYPE_ESP | 693 NIX_FLOW_KEY_TYPE_AH); 694 rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN | 695 NIX_FLOW_KEY_TYPE_IPV4_PROTO; 696 break; 697 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 698 /* If VLAN hashing is also requested for ESP then do not 699 * allow because of hardware 40 bytes flow key limit. 700 */ 701 if (rss_cfg & NIX_FLOW_KEY_TYPE_VLAN) { 702 netdev_err(pfvf->netdev, 703 "RSS hash of ESP or AH with VLAN is not supported\n"); 704 return -EOPNOTSUPP; 705 } 706 707 rss_cfg |= NIX_FLOW_KEY_TYPE_ESP | NIX_FLOW_KEY_TYPE_AH; 708 /* Disable IPv4 proto hashing since IPv6 SA+DA(32 bytes) 709 * and ESP SPI+sequence(8 bytes) uses hardware maximum 710 * limit of 40 byte flow key. 711 */ 712 rss_cfg &= ~NIX_FLOW_KEY_TYPE_IPV4_PROTO; 713 break; 714 default: 715 return -EINVAL; 716 } 717 break; 718 case IPV4_FLOW: 719 case IPV6_FLOW: 720 rss_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; 721 break; 722 default: 723 return -EINVAL; 724 } 725 726 rss->flowkey_cfg = rss_cfg; 727 otx2_set_flowkey_cfg(pfvf); 728 return 0; 729 } 730 731 static int otx2_get_rxnfc(struct net_device *dev, 732 struct ethtool_rxnfc *nfc, u32 *rules) 733 { 734 bool ntuple = !!(dev->features & NETIF_F_NTUPLE); 735 struct otx2_nic *pfvf = netdev_priv(dev); 736 int ret = -EOPNOTSUPP; 737 738 switch (nfc->cmd) { 739 case ETHTOOL_GRXRINGS: 740 nfc->data = pfvf->hw.rx_queues; 741 ret = 0; 742 break; 743 case ETHTOOL_GRXCLSRLCNT: 744 if (netif_running(dev) && ntuple) { 745 nfc->rule_cnt = pfvf->flow_cfg->nr_flows; 746 ret = 0; 747 } 748 break; 749 case ETHTOOL_GRXCLSRULE: 750 if (netif_running(dev) && ntuple) 751 ret = otx2_get_flow(pfvf, nfc, nfc->fs.location); 752 break; 753 case ETHTOOL_GRXCLSRLALL: 754 if (netif_running(dev) && ntuple) 755 ret = otx2_get_all_flows(pfvf, nfc, rules); 756 break; 757 case ETHTOOL_GRXFH: 758 return otx2_get_rss_hash_opts(pfvf, nfc); 759 default: 760 break; 761 } 762 return ret; 763 } 764 765 static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc) 766 { 767 bool ntuple = !!(dev->features & NETIF_F_NTUPLE); 768 struct otx2_nic *pfvf = netdev_priv(dev); 769 int ret = -EOPNOTSUPP; 770 771 pfvf->flow_cfg->ntuple = ntuple; 772 switch (nfc->cmd) { 773 case ETHTOOL_SRXFH: 774 ret = otx2_set_rss_hash_opts(pfvf, nfc); 775 break; 776 case ETHTOOL_SRXCLSRLINS: 777 if (netif_running(dev) && ntuple) 778 ret = otx2_add_flow(pfvf, nfc); 779 break; 780 case ETHTOOL_SRXCLSRLDEL: 781 if (netif_running(dev) && ntuple) 782 ret = otx2_remove_flow(pfvf, nfc->fs.location); 783 break; 784 default: 785 break; 786 } 787 788 return ret; 789 } 790 791 static u32 otx2_get_rxfh_key_size(struct net_device *netdev) 792 { 793 struct otx2_nic *pfvf = netdev_priv(netdev); 794 struct otx2_rss_info *rss; 795 796 rss = &pfvf->hw.rss_info; 797 798 return sizeof(rss->key); 799 } 800 801 static u32 otx2_get_rxfh_indir_size(struct net_device *dev) 802 { 803 return MAX_RSS_INDIR_TBL_SIZE; 804 } 805 806 static int otx2_rss_ctx_delete(struct otx2_nic *pfvf, int ctx_id) 807 { 808 struct otx2_rss_info *rss = &pfvf->hw.rss_info; 809 810 otx2_rss_ctx_flow_del(pfvf, ctx_id); 811 kfree(rss->rss_ctx[ctx_id]); 812 rss->rss_ctx[ctx_id] = NULL; 813 814 return 0; 815 } 816 817 static int otx2_rss_ctx_create(struct otx2_nic *pfvf, 818 u32 *rss_context) 819 { 820 struct otx2_rss_info *rss = &pfvf->hw.rss_info; 821 u8 ctx; 822 823 for (ctx = 0; ctx < MAX_RSS_GROUPS; ctx++) { 824 if (!rss->rss_ctx[ctx]) 825 break; 826 } 827 if (ctx == MAX_RSS_GROUPS) 828 return -EINVAL; 829 830 rss->rss_ctx[ctx] = kzalloc(sizeof(*rss->rss_ctx[ctx]), GFP_KERNEL); 831 if (!rss->rss_ctx[ctx]) 832 return -ENOMEM; 833 *rss_context = ctx; 834 835 return 0; 836 } 837 838 /* RSS context configuration */ 839 static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir, 840 const u8 *hkey, const u8 hfunc, 841 u32 *rss_context, bool delete) 842 { 843 struct otx2_nic *pfvf = netdev_priv(dev); 844 struct otx2_rss_ctx *rss_ctx; 845 struct otx2_rss_info *rss; 846 int ret, idx; 847 848 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 849 return -EOPNOTSUPP; 850 851 if (*rss_context != ETH_RXFH_CONTEXT_ALLOC && 852 *rss_context >= MAX_RSS_GROUPS) 853 return -EINVAL; 854 855 rss = &pfvf->hw.rss_info; 856 857 if (!rss->enable) { 858 netdev_err(dev, "RSS is disabled, cannot change settings\n"); 859 return -EIO; 860 } 861 862 if (hkey) { 863 memcpy(rss->key, hkey, sizeof(rss->key)); 864 otx2_set_rss_key(pfvf); 865 } 866 if (delete) 867 return otx2_rss_ctx_delete(pfvf, *rss_context); 868 869 if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) { 870 ret = otx2_rss_ctx_create(pfvf, rss_context); 871 if (ret) 872 return ret; 873 } 874 if (indir) { 875 rss_ctx = rss->rss_ctx[*rss_context]; 876 for (idx = 0; idx < rss->rss_size; idx++) 877 rss_ctx->ind_tbl[idx] = indir[idx]; 878 } 879 otx2_set_rss_table(pfvf, *rss_context); 880 881 return 0; 882 } 883 884 static int otx2_get_rxfh_context(struct net_device *dev, u32 *indir, 885 u8 *hkey, u8 *hfunc, u32 rss_context) 886 { 887 struct otx2_nic *pfvf = netdev_priv(dev); 888 struct otx2_rss_ctx *rss_ctx; 889 struct otx2_rss_info *rss; 890 int idx, rx_queues; 891 892 rss = &pfvf->hw.rss_info; 893 894 if (hfunc) 895 *hfunc = ETH_RSS_HASH_TOP; 896 897 if (!indir) 898 return 0; 899 900 if (!rss->enable && rss_context == DEFAULT_RSS_CONTEXT_GROUP) { 901 rx_queues = pfvf->hw.rx_queues; 902 for (idx = 0; idx < MAX_RSS_INDIR_TBL_SIZE; idx++) 903 indir[idx] = ethtool_rxfh_indir_default(idx, rx_queues); 904 return 0; 905 } 906 if (rss_context >= MAX_RSS_GROUPS) 907 return -ENOENT; 908 909 rss_ctx = rss->rss_ctx[rss_context]; 910 if (!rss_ctx) 911 return -ENOENT; 912 913 if (indir) { 914 for (idx = 0; idx < rss->rss_size; idx++) 915 indir[idx] = rss_ctx->ind_tbl[idx]; 916 } 917 if (hkey) 918 memcpy(hkey, rss->key, sizeof(rss->key)); 919 920 return 0; 921 } 922 923 /* Get RSS configuration */ 924 static int otx2_get_rxfh(struct net_device *dev, u32 *indir, 925 u8 *hkey, u8 *hfunc) 926 { 927 return otx2_get_rxfh_context(dev, indir, hkey, hfunc, 928 DEFAULT_RSS_CONTEXT_GROUP); 929 } 930 931 /* Configure RSS table and hash key */ 932 static int otx2_set_rxfh(struct net_device *dev, const u32 *indir, 933 const u8 *hkey, const u8 hfunc) 934 { 935 936 u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP; 937 938 return otx2_set_rxfh_context(dev, indir, hkey, hfunc, &rss_context, 0); 939 } 940 941 static u32 otx2_get_msglevel(struct net_device *netdev) 942 { 943 struct otx2_nic *pfvf = netdev_priv(netdev); 944 945 return pfvf->msg_enable; 946 } 947 948 static void otx2_set_msglevel(struct net_device *netdev, u32 val) 949 { 950 struct otx2_nic *pfvf = netdev_priv(netdev); 951 952 pfvf->msg_enable = val; 953 } 954 955 static u32 otx2_get_link(struct net_device *netdev) 956 { 957 struct otx2_nic *pfvf = netdev_priv(netdev); 958 959 /* LBK link is internal and always UP */ 960 if (is_otx2_lbkvf(pfvf->pdev)) 961 return 1; 962 return pfvf->linfo.link_up; 963 } 964 965 static int otx2_get_ts_info(struct net_device *netdev, 966 struct ethtool_ts_info *info) 967 { 968 struct otx2_nic *pfvf = netdev_priv(netdev); 969 970 if (!pfvf->ptp) 971 return ethtool_op_get_ts_info(netdev, info); 972 973 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 974 SOF_TIMESTAMPING_RX_SOFTWARE | 975 SOF_TIMESTAMPING_SOFTWARE | 976 SOF_TIMESTAMPING_TX_HARDWARE | 977 SOF_TIMESTAMPING_RX_HARDWARE | 978 SOF_TIMESTAMPING_RAW_HARDWARE; 979 980 info->phc_index = otx2_ptp_clock_index(pfvf); 981 982 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); 983 if (test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag)) 984 info->tx_types |= BIT(HWTSTAMP_TX_ONESTEP_SYNC); 985 986 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | 987 BIT(HWTSTAMP_FILTER_ALL); 988 989 return 0; 990 } 991 992 static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf) 993 { 994 struct cgx_fw_data *rsp = NULL; 995 struct msg_req *req; 996 int err = 0; 997 998 mutex_lock(&pfvf->mbox.lock); 999 req = otx2_mbox_alloc_msg_cgx_get_aux_link_info(&pfvf->mbox); 1000 if (!req) { 1001 mutex_unlock(&pfvf->mbox.lock); 1002 return ERR_PTR(-ENOMEM); 1003 } 1004 1005 err = otx2_sync_mbox_msg(&pfvf->mbox); 1006 if (!err) { 1007 rsp = (struct cgx_fw_data *) 1008 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); 1009 } else { 1010 rsp = ERR_PTR(err); 1011 } 1012 1013 mutex_unlock(&pfvf->mbox.lock); 1014 return rsp; 1015 } 1016 1017 static int otx2_get_fecparam(struct net_device *netdev, 1018 struct ethtool_fecparam *fecparam) 1019 { 1020 struct otx2_nic *pfvf = netdev_priv(netdev); 1021 struct cgx_fw_data *rsp; 1022 const int fec[] = { 1023 ETHTOOL_FEC_OFF, 1024 ETHTOOL_FEC_BASER, 1025 ETHTOOL_FEC_RS, 1026 ETHTOOL_FEC_BASER | ETHTOOL_FEC_RS}; 1027 #define FEC_MAX_INDEX 4 1028 if (pfvf->linfo.fec < FEC_MAX_INDEX) 1029 fecparam->active_fec = fec[pfvf->linfo.fec]; 1030 1031 rsp = otx2_get_fwdata(pfvf); 1032 if (IS_ERR(rsp)) 1033 return PTR_ERR(rsp); 1034 1035 if (rsp->fwdata.supported_fec < FEC_MAX_INDEX) { 1036 if (!rsp->fwdata.supported_fec) 1037 fecparam->fec = ETHTOOL_FEC_NONE; 1038 else 1039 fecparam->fec = fec[rsp->fwdata.supported_fec]; 1040 } 1041 return 0; 1042 } 1043 1044 static int otx2_set_fecparam(struct net_device *netdev, 1045 struct ethtool_fecparam *fecparam) 1046 { 1047 struct otx2_nic *pfvf = netdev_priv(netdev); 1048 struct mbox *mbox = &pfvf->mbox; 1049 struct fec_mode *req, *rsp; 1050 int err = 0, fec = 0; 1051 1052 switch (fecparam->fec) { 1053 /* Firmware does not support AUTO mode consider it as FEC_OFF */ 1054 case ETHTOOL_FEC_OFF: 1055 case ETHTOOL_FEC_AUTO: 1056 fec = OTX2_FEC_OFF; 1057 break; 1058 case ETHTOOL_FEC_RS: 1059 fec = OTX2_FEC_RS; 1060 break; 1061 case ETHTOOL_FEC_BASER: 1062 fec = OTX2_FEC_BASER; 1063 break; 1064 default: 1065 netdev_warn(pfvf->netdev, "Unsupported FEC mode: %d", 1066 fecparam->fec); 1067 return -EINVAL; 1068 } 1069 1070 if (fec == pfvf->linfo.fec) 1071 return 0; 1072 1073 mutex_lock(&mbox->lock); 1074 req = otx2_mbox_alloc_msg_cgx_set_fec_param(&pfvf->mbox); 1075 if (!req) { 1076 err = -ENOMEM; 1077 goto end; 1078 } 1079 req->fec = fec; 1080 err = otx2_sync_mbox_msg(&pfvf->mbox); 1081 if (err) 1082 goto end; 1083 1084 rsp = (struct fec_mode *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 1085 0, &req->hdr); 1086 if (rsp->fec >= 0) 1087 pfvf->linfo.fec = rsp->fec; 1088 else 1089 err = rsp->fec; 1090 end: 1091 mutex_unlock(&mbox->lock); 1092 return err; 1093 } 1094 1095 static void otx2_get_fec_info(u64 index, int req_mode, 1096 struct ethtool_link_ksettings *link_ksettings) 1097 { 1098 __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_fec_modes) = { 0, }; 1099 1100 switch (index) { 1101 case OTX2_FEC_NONE: 1102 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 1103 otx2_fec_modes); 1104 break; 1105 case OTX2_FEC_BASER: 1106 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 1107 otx2_fec_modes); 1108 break; 1109 case OTX2_FEC_RS: 1110 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 1111 otx2_fec_modes); 1112 break; 1113 case OTX2_FEC_BASER | OTX2_FEC_RS: 1114 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 1115 otx2_fec_modes); 1116 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 1117 otx2_fec_modes); 1118 break; 1119 } 1120 1121 /* Add fec modes to existing modes */ 1122 if (req_mode == OTX2_MODE_ADVERTISED) 1123 linkmode_or(link_ksettings->link_modes.advertising, 1124 link_ksettings->link_modes.advertising, 1125 otx2_fec_modes); 1126 else 1127 linkmode_or(link_ksettings->link_modes.supported, 1128 link_ksettings->link_modes.supported, 1129 otx2_fec_modes); 1130 } 1131 1132 static void otx2_get_link_mode_info(u64 link_mode_bmap, 1133 bool req_mode, 1134 struct ethtool_link_ksettings 1135 *link_ksettings) 1136 { 1137 __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_link_modes) = { 0, }; 1138 const int otx2_sgmii_features[6] = { 1139 ETHTOOL_LINK_MODE_10baseT_Half_BIT, 1140 ETHTOOL_LINK_MODE_10baseT_Full_BIT, 1141 ETHTOOL_LINK_MODE_100baseT_Half_BIT, 1142 ETHTOOL_LINK_MODE_100baseT_Full_BIT, 1143 ETHTOOL_LINK_MODE_1000baseT_Half_BIT, 1144 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 1145 }; 1146 /* CGX link modes to Ethtool link mode mapping */ 1147 const int cgx_link_mode[27] = { 1148 0, /* SGMII Mode */ 1149 ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 1150 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 1151 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 1152 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 1153 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 1154 0, 1155 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 1156 0, 1157 0, 1158 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 1159 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 1160 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 1161 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 1162 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 1163 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 1164 0, 1165 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 1166 0, 1167 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 1168 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 1169 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 1170 0, 1171 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 1172 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 1173 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 1174 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 1175 }; 1176 u8 bit; 1177 1178 for_each_set_bit(bit, (unsigned long *)&link_mode_bmap, 27) { 1179 /* SGMII mode is set */ 1180 if (bit == 0) 1181 linkmode_set_bit_array(otx2_sgmii_features, 1182 ARRAY_SIZE(otx2_sgmii_features), 1183 otx2_link_modes); 1184 else 1185 linkmode_set_bit(cgx_link_mode[bit], otx2_link_modes); 1186 } 1187 1188 if (req_mode == OTX2_MODE_ADVERTISED) 1189 linkmode_copy(link_ksettings->link_modes.advertising, 1190 otx2_link_modes); 1191 else 1192 linkmode_copy(link_ksettings->link_modes.supported, 1193 otx2_link_modes); 1194 } 1195 1196 static int otx2_get_link_ksettings(struct net_device *netdev, 1197 struct ethtool_link_ksettings *cmd) 1198 { 1199 struct otx2_nic *pfvf = netdev_priv(netdev); 1200 struct cgx_fw_data *rsp = NULL; 1201 1202 cmd->base.duplex = pfvf->linfo.full_duplex; 1203 cmd->base.speed = pfvf->linfo.speed; 1204 cmd->base.autoneg = pfvf->linfo.an; 1205 1206 rsp = otx2_get_fwdata(pfvf); 1207 if (IS_ERR(rsp)) 1208 return PTR_ERR(rsp); 1209 1210 if (rsp->fwdata.supported_an) 1211 ethtool_link_ksettings_add_link_mode(cmd, 1212 supported, 1213 Autoneg); 1214 1215 otx2_get_link_mode_info(rsp->fwdata.advertised_link_modes, 1216 OTX2_MODE_ADVERTISED, cmd); 1217 otx2_get_fec_info(rsp->fwdata.advertised_fec, 1218 OTX2_MODE_ADVERTISED, cmd); 1219 otx2_get_link_mode_info(rsp->fwdata.supported_link_modes, 1220 OTX2_MODE_SUPPORTED, cmd); 1221 otx2_get_fec_info(rsp->fwdata.supported_fec, 1222 OTX2_MODE_SUPPORTED, cmd); 1223 return 0; 1224 } 1225 1226 static void otx2_get_advertised_mode(const struct ethtool_link_ksettings *cmd, 1227 u64 *mode) 1228 { 1229 u32 bit_pos; 1230 1231 /* Firmware does not support requesting multiple advertised modes 1232 * return first set bit 1233 */ 1234 bit_pos = find_first_bit(cmd->link_modes.advertising, 1235 __ETHTOOL_LINK_MODE_MASK_NBITS); 1236 if (bit_pos != __ETHTOOL_LINK_MODE_MASK_NBITS) 1237 *mode = bit_pos; 1238 } 1239 1240 static int otx2_set_link_ksettings(struct net_device *netdev, 1241 const struct ethtool_link_ksettings *cmd) 1242 { 1243 struct otx2_nic *pf = netdev_priv(netdev); 1244 struct ethtool_link_ksettings cur_ks; 1245 struct cgx_set_link_mode_req *req; 1246 struct mbox *mbox = &pf->mbox; 1247 int err = 0; 1248 1249 memset(&cur_ks, 0, sizeof(struct ethtool_link_ksettings)); 1250 1251 if (!ethtool_validate_speed(cmd->base.speed) || 1252 !ethtool_validate_duplex(cmd->base.duplex)) 1253 return -EINVAL; 1254 1255 if (cmd->base.autoneg != AUTONEG_ENABLE && 1256 cmd->base.autoneg != AUTONEG_DISABLE) 1257 return -EINVAL; 1258 1259 otx2_get_link_ksettings(netdev, &cur_ks); 1260 1261 /* Check requested modes against supported modes by hardware */ 1262 if (!linkmode_subset(cmd->link_modes.advertising, 1263 cur_ks.link_modes.supported)) 1264 return -EINVAL; 1265 1266 mutex_lock(&mbox->lock); 1267 req = otx2_mbox_alloc_msg_cgx_set_link_mode(&pf->mbox); 1268 if (!req) { 1269 err = -ENOMEM; 1270 goto end; 1271 } 1272 1273 req->args.speed = cmd->base.speed; 1274 /* firmware expects 1 for half duplex and 0 for full duplex 1275 * hence inverting 1276 */ 1277 req->args.duplex = cmd->base.duplex ^ 0x1; 1278 req->args.an = cmd->base.autoneg; 1279 otx2_get_advertised_mode(cmd, &req->args.mode); 1280 1281 err = otx2_sync_mbox_msg(&pf->mbox); 1282 end: 1283 mutex_unlock(&mbox->lock); 1284 return err; 1285 } 1286 1287 static void otx2_get_fec_stats(struct net_device *netdev, 1288 struct ethtool_fec_stats *fec_stats) 1289 { 1290 struct otx2_nic *pfvf = netdev_priv(netdev); 1291 struct cgx_fw_data *rsp; 1292 1293 otx2_update_lmac_fec_stats(pfvf); 1294 1295 /* Report MAC FEC stats */ 1296 fec_stats->corrected_blocks.total = pfvf->hw.cgx_fec_corr_blks; 1297 fec_stats->uncorrectable_blocks.total = pfvf->hw.cgx_fec_uncorr_blks; 1298 1299 rsp = otx2_get_fwdata(pfvf); 1300 if (!IS_ERR(rsp) && rsp->fwdata.phy.misc.has_fec_stats && 1301 !otx2_get_phy_fec_stats(pfvf)) { 1302 /* Fetch fwdata again because it's been recently populated with 1303 * latest PHY FEC stats. 1304 */ 1305 rsp = otx2_get_fwdata(pfvf); 1306 if (!IS_ERR(rsp)) { 1307 struct fec_stats_s *p = &rsp->fwdata.phy.fec_stats; 1308 1309 if (pfvf->linfo.fec == OTX2_FEC_BASER) { 1310 fec_stats->corrected_blocks.total = p->brfec_corr_blks; 1311 fec_stats->uncorrectable_blocks.total = p->brfec_uncorr_blks; 1312 } else { 1313 fec_stats->corrected_blocks.total = p->rsfec_corr_cws; 1314 fec_stats->uncorrectable_blocks.total = p->rsfec_uncorr_cws; 1315 } 1316 } 1317 } 1318 } 1319 1320 static const struct ethtool_ops otx2_ethtool_ops = { 1321 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 1322 ETHTOOL_COALESCE_MAX_FRAMES | 1323 ETHTOOL_COALESCE_USE_ADAPTIVE, 1324 .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN | 1325 ETHTOOL_RING_USE_CQE_SIZE, 1326 .get_link = otx2_get_link, 1327 .get_drvinfo = otx2_get_drvinfo, 1328 .get_strings = otx2_get_strings, 1329 .get_ethtool_stats = otx2_get_ethtool_stats, 1330 .get_sset_count = otx2_get_sset_count, 1331 .set_channels = otx2_set_channels, 1332 .get_channels = otx2_get_channels, 1333 .get_ringparam = otx2_get_ringparam, 1334 .set_ringparam = otx2_set_ringparam, 1335 .get_coalesce = otx2_get_coalesce, 1336 .set_coalesce = otx2_set_coalesce, 1337 .get_rxnfc = otx2_get_rxnfc, 1338 .set_rxnfc = otx2_set_rxnfc, 1339 .get_rxfh_key_size = otx2_get_rxfh_key_size, 1340 .get_rxfh_indir_size = otx2_get_rxfh_indir_size, 1341 .get_rxfh = otx2_get_rxfh, 1342 .set_rxfh = otx2_set_rxfh, 1343 .get_rxfh_context = otx2_get_rxfh_context, 1344 .set_rxfh_context = otx2_set_rxfh_context, 1345 .get_msglevel = otx2_get_msglevel, 1346 .set_msglevel = otx2_set_msglevel, 1347 .get_pauseparam = otx2_get_pauseparam, 1348 .set_pauseparam = otx2_set_pauseparam, 1349 .get_ts_info = otx2_get_ts_info, 1350 .get_fec_stats = otx2_get_fec_stats, 1351 .get_fecparam = otx2_get_fecparam, 1352 .set_fecparam = otx2_set_fecparam, 1353 .get_link_ksettings = otx2_get_link_ksettings, 1354 .set_link_ksettings = otx2_set_link_ksettings, 1355 }; 1356 1357 void otx2_set_ethtool_ops(struct net_device *netdev) 1358 { 1359 netdev->ethtool_ops = &otx2_ethtool_ops; 1360 } 1361 1362 /* VF's ethtool APIs */ 1363 static void otx2vf_get_drvinfo(struct net_device *netdev, 1364 struct ethtool_drvinfo *info) 1365 { 1366 struct otx2_nic *vf = netdev_priv(netdev); 1367 1368 strscpy(info->driver, DRV_VF_NAME, sizeof(info->driver)); 1369 strscpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info)); 1370 } 1371 1372 static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data) 1373 { 1374 struct otx2_nic *vf = netdev_priv(netdev); 1375 int stats; 1376 1377 if (sset != ETH_SS_STATS) 1378 return; 1379 1380 for (stats = 0; stats < otx2_n_dev_stats; stats++) { 1381 memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN); 1382 data += ETH_GSTRING_LEN; 1383 } 1384 1385 for (stats = 0; stats < otx2_n_drv_stats; stats++) { 1386 memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN); 1387 data += ETH_GSTRING_LEN; 1388 } 1389 1390 otx2_get_qset_strings(vf, &data, 0); 1391 1392 strcpy(data, "reset_count"); 1393 data += ETH_GSTRING_LEN; 1394 } 1395 1396 static void otx2vf_get_ethtool_stats(struct net_device *netdev, 1397 struct ethtool_stats *stats, u64 *data) 1398 { 1399 struct otx2_nic *vf = netdev_priv(netdev); 1400 int stat; 1401 1402 otx2_get_dev_stats(vf); 1403 for (stat = 0; stat < otx2_n_dev_stats; stat++) 1404 *(data++) = ((u64 *)&vf->hw.dev_stats) 1405 [otx2_dev_stats[stat].index]; 1406 1407 for (stat = 0; stat < otx2_n_drv_stats; stat++) 1408 *(data++) = atomic_read(&((atomic_t *)&vf->hw.drv_stats) 1409 [otx2_drv_stats[stat].index]); 1410 1411 otx2_get_qset_stats(vf, stats, &data); 1412 *(data++) = vf->reset_count; 1413 } 1414 1415 static int otx2vf_get_sset_count(struct net_device *netdev, int sset) 1416 { 1417 struct otx2_nic *vf = netdev_priv(netdev); 1418 int qstats_count; 1419 1420 if (sset != ETH_SS_STATS) 1421 return -EINVAL; 1422 1423 qstats_count = otx2_n_queue_stats * 1424 (vf->hw.rx_queues + otx2_get_total_tx_queues(vf)); 1425 1426 return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 1; 1427 } 1428 1429 static int otx2vf_get_link_ksettings(struct net_device *netdev, 1430 struct ethtool_link_ksettings *cmd) 1431 { 1432 struct otx2_nic *pfvf = netdev_priv(netdev); 1433 1434 if (is_otx2_lbkvf(pfvf->pdev)) { 1435 cmd->base.duplex = DUPLEX_FULL; 1436 cmd->base.speed = SPEED_100000; 1437 } else { 1438 return otx2_get_link_ksettings(netdev, cmd); 1439 } 1440 return 0; 1441 } 1442 1443 static const struct ethtool_ops otx2vf_ethtool_ops = { 1444 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 1445 ETHTOOL_COALESCE_MAX_FRAMES | 1446 ETHTOOL_COALESCE_USE_ADAPTIVE, 1447 .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN | 1448 ETHTOOL_RING_USE_CQE_SIZE, 1449 .get_link = otx2_get_link, 1450 .get_drvinfo = otx2vf_get_drvinfo, 1451 .get_strings = otx2vf_get_strings, 1452 .get_ethtool_stats = otx2vf_get_ethtool_stats, 1453 .get_sset_count = otx2vf_get_sset_count, 1454 .set_channels = otx2_set_channels, 1455 .get_channels = otx2_get_channels, 1456 .get_rxnfc = otx2_get_rxnfc, 1457 .set_rxnfc = otx2_set_rxnfc, 1458 .get_rxfh_key_size = otx2_get_rxfh_key_size, 1459 .get_rxfh_indir_size = otx2_get_rxfh_indir_size, 1460 .get_rxfh = otx2_get_rxfh, 1461 .set_rxfh = otx2_set_rxfh, 1462 .get_rxfh_context = otx2_get_rxfh_context, 1463 .set_rxfh_context = otx2_set_rxfh_context, 1464 .get_ringparam = otx2_get_ringparam, 1465 .set_ringparam = otx2_set_ringparam, 1466 .get_coalesce = otx2_get_coalesce, 1467 .set_coalesce = otx2_set_coalesce, 1468 .get_msglevel = otx2_get_msglevel, 1469 .set_msglevel = otx2_set_msglevel, 1470 .get_pauseparam = otx2_get_pauseparam, 1471 .set_pauseparam = otx2_set_pauseparam, 1472 .get_link_ksettings = otx2vf_get_link_ksettings, 1473 .get_ts_info = otx2_get_ts_info, 1474 }; 1475 1476 void otx2vf_set_ethtool_ops(struct net_device *netdev) 1477 { 1478 netdev->ethtool_ops = &otx2vf_ethtool_ops; 1479 } 1480 EXPORT_SYMBOL(otx2vf_set_ethtool_ops); 1481