1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Ethernet driver 3 * 4 * Copyright (C) 2020 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/pci.h> 12 #include <linux/ethtool.h> 13 #include <linux/stddef.h> 14 #include <linux/etherdevice.h> 15 #include <linux/log2.h> 16 17 #include "otx2_common.h" 18 19 #define DRV_NAME "octeontx2-nicpf" 20 21 struct otx2_stat { 22 char name[ETH_GSTRING_LEN]; 23 unsigned int index; 24 }; 25 26 /* HW device stats */ 27 #define OTX2_DEV_STAT(stat) { \ 28 .name = #stat, \ 29 .index = offsetof(struct otx2_dev_stats, stat) / sizeof(u64), \ 30 } 31 32 static const struct otx2_stat otx2_dev_stats[] = { 33 OTX2_DEV_STAT(rx_ucast_frames), 34 OTX2_DEV_STAT(rx_bcast_frames), 35 OTX2_DEV_STAT(rx_mcast_frames), 36 37 OTX2_DEV_STAT(tx_ucast_frames), 38 OTX2_DEV_STAT(tx_bcast_frames), 39 OTX2_DEV_STAT(tx_mcast_frames), 40 }; 41 42 /* Driver level stats */ 43 #define OTX2_DRV_STAT(stat) { \ 44 .name = #stat, \ 45 .index = offsetof(struct otx2_drv_stats, stat) / sizeof(atomic_t), \ 46 } 47 48 static const struct otx2_stat otx2_drv_stats[] = { 49 OTX2_DRV_STAT(rx_fcs_errs), 50 OTX2_DRV_STAT(rx_oversize_errs), 51 OTX2_DRV_STAT(rx_undersize_errs), 52 OTX2_DRV_STAT(rx_csum_errs), 53 OTX2_DRV_STAT(rx_len_errs), 54 OTX2_DRV_STAT(rx_other_errs), 55 }; 56 57 static const struct otx2_stat otx2_queue_stats[] = { 58 { "bytes", 0 }, 59 { "frames", 1 }, 60 }; 61 62 static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats); 63 static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats); 64 static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats); 65 66 static void otx2_dev_open(struct net_device *netdev) 67 { 68 otx2_open(netdev); 69 } 70 71 static void otx2_dev_stop(struct net_device *netdev) 72 { 73 otx2_stop(netdev); 74 } 75 76 static void otx2_get_drvinfo(struct net_device *netdev, 77 struct ethtool_drvinfo *info) 78 { 79 struct otx2_nic *pfvf = netdev_priv(netdev); 80 81 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 82 strlcpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info)); 83 } 84 85 static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset) 86 { 87 int start_qidx = qset * pfvf->hw.rx_queues; 88 int qidx, stats; 89 90 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { 91 for (stats = 0; stats < otx2_n_queue_stats; stats++) { 92 sprintf(*data, "rxq%d: %s", qidx + start_qidx, 93 otx2_queue_stats[stats].name); 94 *data += ETH_GSTRING_LEN; 95 } 96 } 97 for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { 98 for (stats = 0; stats < otx2_n_queue_stats; stats++) { 99 sprintf(*data, "txq%d: %s", qidx + start_qidx, 100 otx2_queue_stats[stats].name); 101 *data += ETH_GSTRING_LEN; 102 } 103 } 104 } 105 106 static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data) 107 { 108 struct otx2_nic *pfvf = netdev_priv(netdev); 109 int stats; 110 111 if (sset != ETH_SS_STATS) 112 return; 113 114 for (stats = 0; stats < otx2_n_dev_stats; stats++) { 115 memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN); 116 data += ETH_GSTRING_LEN; 117 } 118 119 for (stats = 0; stats < otx2_n_drv_stats; stats++) { 120 memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN); 121 data += ETH_GSTRING_LEN; 122 } 123 124 otx2_get_qset_strings(pfvf, &data, 0); 125 126 for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) { 127 sprintf(data, "cgx_rxstat%d: ", stats); 128 data += ETH_GSTRING_LEN; 129 } 130 131 for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) { 132 sprintf(data, "cgx_txstat%d: ", stats); 133 data += ETH_GSTRING_LEN; 134 } 135 136 strcpy(data, "reset_count"); 137 data += ETH_GSTRING_LEN; 138 } 139 140 static void otx2_get_qset_stats(struct otx2_nic *pfvf, 141 struct ethtool_stats *stats, u64 **data) 142 { 143 int stat, qidx; 144 145 if (!pfvf) 146 return; 147 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { 148 if (!otx2_update_rq_stats(pfvf, qidx)) { 149 for (stat = 0; stat < otx2_n_queue_stats; stat++) 150 *((*data)++) = 0; 151 continue; 152 } 153 for (stat = 0; stat < otx2_n_queue_stats; stat++) 154 *((*data)++) = ((u64 *)&pfvf->qset.rq[qidx].stats) 155 [otx2_queue_stats[stat].index]; 156 } 157 158 for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { 159 if (!otx2_update_sq_stats(pfvf, qidx)) { 160 for (stat = 0; stat < otx2_n_queue_stats; stat++) 161 *((*data)++) = 0; 162 continue; 163 } 164 for (stat = 0; stat < otx2_n_queue_stats; stat++) 165 *((*data)++) = ((u64 *)&pfvf->qset.sq[qidx].stats) 166 [otx2_queue_stats[stat].index]; 167 } 168 } 169 170 /* Get device and per queue statistics */ 171 static void otx2_get_ethtool_stats(struct net_device *netdev, 172 struct ethtool_stats *stats, u64 *data) 173 { 174 struct otx2_nic *pfvf = netdev_priv(netdev); 175 int stat; 176 177 otx2_get_dev_stats(pfvf); 178 for (stat = 0; stat < otx2_n_dev_stats; stat++) 179 *(data++) = ((u64 *)&pfvf->hw.dev_stats) 180 [otx2_dev_stats[stat].index]; 181 182 for (stat = 0; stat < otx2_n_drv_stats; stat++) 183 *(data++) = atomic_read(&((atomic_t *)&pfvf->hw.drv_stats) 184 [otx2_drv_stats[stat].index]); 185 186 otx2_get_qset_stats(pfvf, stats, &data); 187 otx2_update_lmac_stats(pfvf); 188 for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++) 189 *(data++) = pfvf->hw.cgx_rx_stats[stat]; 190 for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++) 191 *(data++) = pfvf->hw.cgx_tx_stats[stat]; 192 *(data++) = pfvf->reset_count; 193 } 194 195 static int otx2_get_sset_count(struct net_device *netdev, int sset) 196 { 197 struct otx2_nic *pfvf = netdev_priv(netdev); 198 int qstats_count; 199 200 if (sset != ETH_SS_STATS) 201 return -EINVAL; 202 203 qstats_count = otx2_n_queue_stats * 204 (pfvf->hw.rx_queues + pfvf->hw.tx_queues); 205 206 return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 207 CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + 1; 208 } 209 210 /* Get no of queues device supports and current queue count */ 211 static void otx2_get_channels(struct net_device *dev, 212 struct ethtool_channels *channel) 213 { 214 struct otx2_nic *pfvf = netdev_priv(dev); 215 216 channel->max_rx = pfvf->hw.max_queues; 217 channel->max_tx = pfvf->hw.max_queues; 218 219 channel->rx_count = pfvf->hw.rx_queues; 220 channel->tx_count = pfvf->hw.tx_queues; 221 } 222 223 /* Set no of Tx, Rx queues to be used */ 224 static int otx2_set_channels(struct net_device *dev, 225 struct ethtool_channels *channel) 226 { 227 struct otx2_nic *pfvf = netdev_priv(dev); 228 bool if_up = netif_running(dev); 229 int err = 0; 230 231 if (!channel->rx_count || !channel->tx_count) 232 return -EINVAL; 233 234 if (if_up) 235 otx2_dev_stop(dev); 236 237 err = otx2_set_real_num_queues(dev, channel->tx_count, 238 channel->rx_count); 239 if (err) 240 goto fail; 241 242 pfvf->hw.rx_queues = channel->rx_count; 243 pfvf->hw.tx_queues = channel->tx_count; 244 pfvf->qset.cq_cnt = pfvf->hw.tx_queues + pfvf->hw.rx_queues; 245 246 fail: 247 if (if_up) 248 otx2_dev_open(dev); 249 250 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", 251 pfvf->hw.tx_queues, pfvf->hw.rx_queues); 252 253 return err; 254 } 255 256 static void otx2_get_pauseparam(struct net_device *netdev, 257 struct ethtool_pauseparam *pause) 258 { 259 struct otx2_nic *pfvf = netdev_priv(netdev); 260 struct cgx_pause_frm_cfg *req, *rsp; 261 262 req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox); 263 if (!req) 264 return; 265 266 if (!otx2_sync_mbox_msg(&pfvf->mbox)) { 267 rsp = (struct cgx_pause_frm_cfg *) 268 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); 269 pause->rx_pause = rsp->rx_pause; 270 pause->tx_pause = rsp->tx_pause; 271 } 272 } 273 274 static int otx2_set_pauseparam(struct net_device *netdev, 275 struct ethtool_pauseparam *pause) 276 { 277 struct otx2_nic *pfvf = netdev_priv(netdev); 278 279 if (pause->autoneg) 280 return -EOPNOTSUPP; 281 282 if (pause->rx_pause) 283 pfvf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED; 284 else 285 pfvf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED; 286 287 if (pause->tx_pause) 288 pfvf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED; 289 else 290 pfvf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED; 291 292 return otx2_config_pause_frm(pfvf); 293 } 294 295 static void otx2_get_ringparam(struct net_device *netdev, 296 struct ethtool_ringparam *ring) 297 { 298 struct otx2_nic *pfvf = netdev_priv(netdev); 299 struct otx2_qset *qs = &pfvf->qset; 300 301 ring->rx_max_pending = Q_COUNT(Q_SIZE_MAX); 302 ring->rx_pending = qs->rqe_cnt ? qs->rqe_cnt : Q_COUNT(Q_SIZE_256); 303 ring->tx_max_pending = Q_COUNT(Q_SIZE_MAX); 304 ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K); 305 } 306 307 static int otx2_set_ringparam(struct net_device *netdev, 308 struct ethtool_ringparam *ring) 309 { 310 struct otx2_nic *pfvf = netdev_priv(netdev); 311 bool if_up = netif_running(netdev); 312 struct otx2_qset *qs = &pfvf->qset; 313 u32 rx_count, tx_count; 314 315 if (ring->rx_mini_pending || ring->rx_jumbo_pending) 316 return -EINVAL; 317 318 /* Permitted lengths are 16 64 256 1K 4K 16K 64K 256K 1M */ 319 rx_count = ring->rx_pending; 320 /* On some silicon variants a skid or reserved CQEs are 321 * needed to avoid CQ overflow. 322 */ 323 if (rx_count < pfvf->hw.rq_skid) 324 rx_count = pfvf->hw.rq_skid; 325 rx_count = Q_COUNT(Q_SIZE(rx_count, 3)); 326 327 /* Due pipelining impact minimum 2000 unused SQ CQE's 328 * need to be maintained to avoid CQ overflow, hence the 329 * minimum 4K size. 330 */ 331 tx_count = clamp_t(u32, ring->tx_pending, 332 Q_COUNT(Q_SIZE_4K), Q_COUNT(Q_SIZE_MAX)); 333 tx_count = Q_COUNT(Q_SIZE(tx_count, 3)); 334 335 if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt) 336 return 0; 337 338 if (if_up) 339 otx2_dev_stop(netdev); 340 341 /* Assigned to the nearest possible exponent. */ 342 qs->sqe_cnt = tx_count; 343 qs->rqe_cnt = rx_count; 344 345 if (if_up) 346 otx2_dev_open(netdev); 347 return 0; 348 } 349 350 static int otx2_get_coalesce(struct net_device *netdev, 351 struct ethtool_coalesce *cmd) 352 { 353 struct otx2_nic *pfvf = netdev_priv(netdev); 354 struct otx2_hw *hw = &pfvf->hw; 355 356 cmd->rx_coalesce_usecs = hw->cq_time_wait; 357 cmd->rx_max_coalesced_frames = hw->cq_ecount_wait; 358 cmd->tx_coalesce_usecs = hw->cq_time_wait; 359 cmd->tx_max_coalesced_frames = hw->cq_ecount_wait; 360 361 return 0; 362 } 363 364 static int otx2_set_coalesce(struct net_device *netdev, 365 struct ethtool_coalesce *ec) 366 { 367 struct otx2_nic *pfvf = netdev_priv(netdev); 368 struct otx2_hw *hw = &pfvf->hw; 369 int qidx; 370 371 if (ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce || 372 ec->rx_coalesce_usecs_irq || ec->rx_max_coalesced_frames_irq || 373 ec->tx_coalesce_usecs_irq || ec->tx_max_coalesced_frames_irq || 374 ec->stats_block_coalesce_usecs || ec->pkt_rate_low || 375 ec->rx_coalesce_usecs_low || ec->rx_max_coalesced_frames_low || 376 ec->tx_coalesce_usecs_low || ec->tx_max_coalesced_frames_low || 377 ec->pkt_rate_high || ec->rx_coalesce_usecs_high || 378 ec->rx_max_coalesced_frames_high || ec->tx_coalesce_usecs_high || 379 ec->tx_max_coalesced_frames_high || ec->rate_sample_interval) 380 return -EOPNOTSUPP; 381 382 if (!ec->rx_max_coalesced_frames || !ec->tx_max_coalesced_frames) 383 return 0; 384 385 /* 'cq_time_wait' is 8bit and is in multiple of 100ns, 386 * so clamp the user given value to the range of 1 to 25usec. 387 */ 388 ec->rx_coalesce_usecs = clamp_t(u32, ec->rx_coalesce_usecs, 389 1, CQ_TIMER_THRESH_MAX); 390 ec->tx_coalesce_usecs = clamp_t(u32, ec->tx_coalesce_usecs, 391 1, CQ_TIMER_THRESH_MAX); 392 393 /* Rx and Tx are mapped to same CQ, check which one 394 * is changed, if both then choose the min. 395 */ 396 if (hw->cq_time_wait == ec->rx_coalesce_usecs) 397 hw->cq_time_wait = ec->tx_coalesce_usecs; 398 else if (hw->cq_time_wait == ec->tx_coalesce_usecs) 399 hw->cq_time_wait = ec->rx_coalesce_usecs; 400 else 401 hw->cq_time_wait = min_t(u8, ec->rx_coalesce_usecs, 402 ec->tx_coalesce_usecs); 403 404 /* Max ecount_wait supported is 16bit, 405 * so clamp the user given value to the range of 1 to 64k. 406 */ 407 ec->rx_max_coalesced_frames = clamp_t(u32, ec->rx_max_coalesced_frames, 408 1, U16_MAX); 409 ec->tx_max_coalesced_frames = clamp_t(u32, ec->tx_max_coalesced_frames, 410 1, U16_MAX); 411 412 /* Rx and Tx are mapped to same CQ, check which one 413 * is changed, if both then choose the min. 414 */ 415 if (hw->cq_ecount_wait == ec->rx_max_coalesced_frames) 416 hw->cq_ecount_wait = ec->tx_max_coalesced_frames; 417 else if (hw->cq_ecount_wait == ec->tx_max_coalesced_frames) 418 hw->cq_ecount_wait = ec->rx_max_coalesced_frames; 419 else 420 hw->cq_ecount_wait = min_t(u16, ec->rx_max_coalesced_frames, 421 ec->tx_max_coalesced_frames); 422 423 if (netif_running(netdev)) { 424 for (qidx = 0; qidx < pfvf->hw.cint_cnt; qidx++) 425 otx2_config_irq_coalescing(pfvf, qidx); 426 } 427 428 return 0; 429 } 430 431 static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf, 432 struct ethtool_rxnfc *nfc) 433 { 434 struct otx2_rss_info *rss = &pfvf->hw.rss_info; 435 436 if (!(rss->flowkey_cfg & 437 (NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6))) 438 return 0; 439 440 /* Mimimum is IPv4 and IPv6, SIP/DIP */ 441 nfc->data = RXH_IP_SRC | RXH_IP_DST; 442 443 switch (nfc->flow_type) { 444 case TCP_V4_FLOW: 445 case TCP_V6_FLOW: 446 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_TCP) 447 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 448 break; 449 case UDP_V4_FLOW: 450 case UDP_V6_FLOW: 451 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_UDP) 452 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 453 break; 454 case SCTP_V4_FLOW: 455 case SCTP_V6_FLOW: 456 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_SCTP) 457 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 458 break; 459 case AH_ESP_V4_FLOW: 460 case AH_V4_FLOW: 461 case ESP_V4_FLOW: 462 case IPV4_FLOW: 463 case AH_ESP_V6_FLOW: 464 case AH_V6_FLOW: 465 case ESP_V6_FLOW: 466 case IPV6_FLOW: 467 break; 468 default: 469 return -EINVAL; 470 } 471 return 0; 472 } 473 474 static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf, 475 struct ethtool_rxnfc *nfc) 476 { 477 struct otx2_rss_info *rss = &pfvf->hw.rss_info; 478 u32 rxh_l4 = RXH_L4_B_0_1 | RXH_L4_B_2_3; 479 u32 rss_cfg = rss->flowkey_cfg; 480 481 if (!rss->enable) { 482 netdev_err(pfvf->netdev, 483 "RSS is disabled, cannot change settings\n"); 484 return -EIO; 485 } 486 487 /* Mimimum is IPv4 and IPv6, SIP/DIP */ 488 if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST)) 489 return -EINVAL; 490 491 switch (nfc->flow_type) { 492 case TCP_V4_FLOW: 493 case TCP_V6_FLOW: 494 /* Different config for v4 and v6 is not supported. 495 * Both of them have to be either 4-tuple or 2-tuple. 496 */ 497 switch (nfc->data & rxh_l4) { 498 case 0: 499 rss_cfg &= ~NIX_FLOW_KEY_TYPE_TCP; 500 break; 501 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 502 rss_cfg |= NIX_FLOW_KEY_TYPE_TCP; 503 break; 504 default: 505 return -EINVAL; 506 } 507 break; 508 case UDP_V4_FLOW: 509 case UDP_V6_FLOW: 510 switch (nfc->data & rxh_l4) { 511 case 0: 512 rss_cfg &= ~NIX_FLOW_KEY_TYPE_UDP; 513 break; 514 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 515 rss_cfg |= NIX_FLOW_KEY_TYPE_UDP; 516 break; 517 default: 518 return -EINVAL; 519 } 520 break; 521 case SCTP_V4_FLOW: 522 case SCTP_V6_FLOW: 523 switch (nfc->data & rxh_l4) { 524 case 0: 525 rss_cfg &= ~NIX_FLOW_KEY_TYPE_SCTP; 526 break; 527 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 528 rss_cfg |= NIX_FLOW_KEY_TYPE_SCTP; 529 break; 530 default: 531 return -EINVAL; 532 } 533 break; 534 case IPV4_FLOW: 535 case IPV6_FLOW: 536 rss_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; 537 break; 538 default: 539 return -EINVAL; 540 } 541 542 rss->flowkey_cfg = rss_cfg; 543 otx2_set_flowkey_cfg(pfvf); 544 return 0; 545 } 546 547 static int otx2_get_rxnfc(struct net_device *dev, 548 struct ethtool_rxnfc *nfc, u32 *rules) 549 { 550 struct otx2_nic *pfvf = netdev_priv(dev); 551 int ret = -EOPNOTSUPP; 552 553 switch (nfc->cmd) { 554 case ETHTOOL_GRXRINGS: 555 nfc->data = pfvf->hw.rx_queues; 556 ret = 0; 557 break; 558 case ETHTOOL_GRXFH: 559 return otx2_get_rss_hash_opts(pfvf, nfc); 560 default: 561 break; 562 } 563 return ret; 564 } 565 566 static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc) 567 { 568 struct otx2_nic *pfvf = netdev_priv(dev); 569 int ret = -EOPNOTSUPP; 570 571 switch (nfc->cmd) { 572 case ETHTOOL_SRXFH: 573 ret = otx2_set_rss_hash_opts(pfvf, nfc); 574 break; 575 default: 576 break; 577 } 578 579 return ret; 580 } 581 582 static u32 otx2_get_rxfh_key_size(struct net_device *netdev) 583 { 584 struct otx2_nic *pfvf = netdev_priv(netdev); 585 struct otx2_rss_info *rss; 586 587 rss = &pfvf->hw.rss_info; 588 589 return sizeof(rss->key); 590 } 591 592 static u32 otx2_get_rxfh_indir_size(struct net_device *dev) 593 { 594 struct otx2_nic *pfvf = netdev_priv(dev); 595 596 return pfvf->hw.rss_info.rss_size; 597 } 598 599 /* Get RSS configuration */ 600 static int otx2_get_rxfh(struct net_device *dev, u32 *indir, 601 u8 *hkey, u8 *hfunc) 602 { 603 struct otx2_nic *pfvf = netdev_priv(dev); 604 struct otx2_rss_info *rss; 605 int idx; 606 607 rss = &pfvf->hw.rss_info; 608 609 if (indir) { 610 for (idx = 0; idx < rss->rss_size; idx++) 611 indir[idx] = rss->ind_tbl[idx]; 612 } 613 614 if (hkey) 615 memcpy(hkey, rss->key, sizeof(rss->key)); 616 617 if (hfunc) 618 *hfunc = ETH_RSS_HASH_TOP; 619 620 return 0; 621 } 622 623 /* Configure RSS table and hash key */ 624 static int otx2_set_rxfh(struct net_device *dev, const u32 *indir, 625 const u8 *hkey, const u8 hfunc) 626 { 627 struct otx2_nic *pfvf = netdev_priv(dev); 628 struct otx2_rss_info *rss; 629 int idx; 630 631 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 632 return -EOPNOTSUPP; 633 634 rss = &pfvf->hw.rss_info; 635 636 if (!rss->enable) { 637 netdev_err(dev, "RSS is disabled, cannot change settings\n"); 638 return -EIO; 639 } 640 641 if (indir) { 642 for (idx = 0; idx < rss->rss_size; idx++) 643 rss->ind_tbl[idx] = indir[idx]; 644 } 645 646 if (hkey) { 647 memcpy(rss->key, hkey, sizeof(rss->key)); 648 otx2_set_rss_key(pfvf); 649 } 650 651 otx2_set_rss_table(pfvf); 652 return 0; 653 } 654 655 static u32 otx2_get_msglevel(struct net_device *netdev) 656 { 657 struct otx2_nic *pfvf = netdev_priv(netdev); 658 659 return pfvf->msg_enable; 660 } 661 662 static void otx2_set_msglevel(struct net_device *netdev, u32 val) 663 { 664 struct otx2_nic *pfvf = netdev_priv(netdev); 665 666 pfvf->msg_enable = val; 667 } 668 669 static u32 otx2_get_link(struct net_device *netdev) 670 { 671 struct otx2_nic *pfvf = netdev_priv(netdev); 672 673 return pfvf->linfo.link_up; 674 } 675 676 static const struct ethtool_ops otx2_ethtool_ops = { 677 .get_link = otx2_get_link, 678 .get_drvinfo = otx2_get_drvinfo, 679 .get_strings = otx2_get_strings, 680 .get_ethtool_stats = otx2_get_ethtool_stats, 681 .get_sset_count = otx2_get_sset_count, 682 .set_channels = otx2_set_channels, 683 .get_channels = otx2_get_channels, 684 .get_ringparam = otx2_get_ringparam, 685 .set_ringparam = otx2_set_ringparam, 686 .get_coalesce = otx2_get_coalesce, 687 .set_coalesce = otx2_set_coalesce, 688 .get_rxnfc = otx2_get_rxnfc, 689 .set_rxnfc = otx2_set_rxnfc, 690 .get_rxfh_key_size = otx2_get_rxfh_key_size, 691 .get_rxfh_indir_size = otx2_get_rxfh_indir_size, 692 .get_rxfh = otx2_get_rxfh, 693 .set_rxfh = otx2_set_rxfh, 694 .get_msglevel = otx2_get_msglevel, 695 .set_msglevel = otx2_set_msglevel, 696 .get_pauseparam = otx2_get_pauseparam, 697 .set_pauseparam = otx2_set_pauseparam, 698 }; 699 700 void otx2_set_ethtool_ops(struct net_device *netdev) 701 { 702 netdev->ethtool_ops = &otx2_ethtool_ops; 703 } 704