1 /* 2 * Copyright (C) 2015 Cavium, Inc. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of version 2 of the GNU General Public License 6 * as published by the Free Software Foundation. 7 */ 8 9 /* ETHTOOL Support for VNIC_VF Device*/ 10 11 #include <linux/pci.h> 12 13 #include "nic_reg.h" 14 #include "nic.h" 15 #include "nicvf_queues.h" 16 #include "q_struct.h" 17 #include "thunder_bgx.h" 18 19 #define DRV_NAME "thunder-nicvf" 20 #define DRV_VERSION "1.0" 21 22 struct nicvf_stat { 23 char name[ETH_GSTRING_LEN]; 24 unsigned int index; 25 }; 26 27 #define NICVF_HW_STAT(stat) { \ 28 .name = #stat, \ 29 .index = offsetof(struct nicvf_hw_stats, stat) / sizeof(u64), \ 30 } 31 32 #define NICVF_DRV_STAT(stat) { \ 33 .name = #stat, \ 34 .index = offsetof(struct nicvf_drv_stats, stat) / sizeof(u64), \ 35 } 36 37 static const struct nicvf_stat nicvf_hw_stats[] = { 38 NICVF_HW_STAT(rx_bytes_ok), 39 NICVF_HW_STAT(rx_ucast_frames_ok), 40 NICVF_HW_STAT(rx_bcast_frames_ok), 41 NICVF_HW_STAT(rx_mcast_frames_ok), 42 NICVF_HW_STAT(rx_fcs_errors), 43 NICVF_HW_STAT(rx_l2_errors), 44 NICVF_HW_STAT(rx_drop_red), 45 NICVF_HW_STAT(rx_drop_red_bytes), 46 NICVF_HW_STAT(rx_drop_overrun), 47 NICVF_HW_STAT(rx_drop_overrun_bytes), 48 NICVF_HW_STAT(rx_drop_bcast), 49 NICVF_HW_STAT(rx_drop_mcast), 50 NICVF_HW_STAT(rx_drop_l3_bcast), 51 NICVF_HW_STAT(rx_drop_l3_mcast), 52 NICVF_HW_STAT(tx_bytes_ok), 53 NICVF_HW_STAT(tx_ucast_frames_ok), 54 NICVF_HW_STAT(tx_bcast_frames_ok), 55 NICVF_HW_STAT(tx_mcast_frames_ok), 56 }; 57 58 static const struct nicvf_stat nicvf_drv_stats[] = { 59 NICVF_DRV_STAT(rx_frames_ok), 60 NICVF_DRV_STAT(rx_frames_64), 61 NICVF_DRV_STAT(rx_frames_127), 62 NICVF_DRV_STAT(rx_frames_255), 63 NICVF_DRV_STAT(rx_frames_511), 64 NICVF_DRV_STAT(rx_frames_1023), 65 NICVF_DRV_STAT(rx_frames_1518), 66 NICVF_DRV_STAT(rx_frames_jumbo), 67 NICVF_DRV_STAT(rx_drops), 68 NICVF_DRV_STAT(tx_frames_ok), 69 NICVF_DRV_STAT(tx_busy), 70 NICVF_DRV_STAT(tx_tso), 71 NICVF_DRV_STAT(tx_drops), 72 }; 73 74 static const struct nicvf_stat nicvf_queue_stats[] = { 75 { "bytes", 0 }, 76 { "frames", 1 }, 77 }; 78 79 static const unsigned int nicvf_n_hw_stats = ARRAY_SIZE(nicvf_hw_stats); 80 static const unsigned int nicvf_n_drv_stats = ARRAY_SIZE(nicvf_drv_stats); 81 static const unsigned int nicvf_n_queue_stats = ARRAY_SIZE(nicvf_queue_stats); 82 83 static int nicvf_get_settings(struct net_device *netdev, 84 struct ethtool_cmd *cmd) 85 { 86 struct nicvf *nic = netdev_priv(netdev); 87 88 cmd->supported = 0; 89 cmd->transceiver = XCVR_EXTERNAL; 90 if (nic->speed <= 1000) { 91 cmd->port = PORT_MII; 92 cmd->autoneg = AUTONEG_ENABLE; 93 } else { 94 cmd->port = PORT_FIBRE; 95 cmd->autoneg = AUTONEG_DISABLE; 96 } 97 cmd->duplex = nic->duplex; 98 ethtool_cmd_speed_set(cmd, nic->speed); 99 100 return 0; 101 } 102 103 static void nicvf_get_drvinfo(struct net_device *netdev, 104 struct ethtool_drvinfo *info) 105 { 106 struct nicvf *nic = netdev_priv(netdev); 107 108 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 109 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 110 strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info)); 111 } 112 113 static u32 nicvf_get_msglevel(struct net_device *netdev) 114 { 115 struct nicvf *nic = netdev_priv(netdev); 116 117 return nic->msg_enable; 118 } 119 120 static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl) 121 { 122 struct nicvf *nic = netdev_priv(netdev); 123 124 nic->msg_enable = lvl; 125 } 126 127 static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data) 128 { 129 int stats, qidx; 130 131 if (sset != ETH_SS_STATS) 132 return; 133 134 for (stats = 0; stats < nicvf_n_hw_stats; stats++) { 135 memcpy(data, nicvf_hw_stats[stats].name, ETH_GSTRING_LEN); 136 data += ETH_GSTRING_LEN; 137 } 138 139 for (stats = 0; stats < nicvf_n_drv_stats; stats++) { 140 memcpy(data, nicvf_drv_stats[stats].name, ETH_GSTRING_LEN); 141 data += ETH_GSTRING_LEN; 142 } 143 144 for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) { 145 for (stats = 0; stats < nicvf_n_queue_stats; stats++) { 146 sprintf(data, "rxq%d: %s", qidx, 147 nicvf_queue_stats[stats].name); 148 data += ETH_GSTRING_LEN; 149 } 150 } 151 152 for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) { 153 for (stats = 0; stats < nicvf_n_queue_stats; stats++) { 154 sprintf(data, "txq%d: %s", qidx, 155 nicvf_queue_stats[stats].name); 156 data += ETH_GSTRING_LEN; 157 } 158 } 159 160 for (stats = 0; stats < BGX_RX_STATS_COUNT; stats++) { 161 sprintf(data, "bgx_rxstat%d: ", stats); 162 data += ETH_GSTRING_LEN; 163 } 164 165 for (stats = 0; stats < BGX_TX_STATS_COUNT; stats++) { 166 sprintf(data, "bgx_txstat%d: ", stats); 167 data += ETH_GSTRING_LEN; 168 } 169 } 170 171 static int nicvf_get_sset_count(struct net_device *netdev, int sset) 172 { 173 if (sset != ETH_SS_STATS) 174 return -EINVAL; 175 176 return nicvf_n_hw_stats + nicvf_n_drv_stats + 177 (nicvf_n_queue_stats * 178 (MAX_RCV_QUEUES_PER_QS + MAX_SND_QUEUES_PER_QS)) + 179 BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT; 180 } 181 182 static void nicvf_get_ethtool_stats(struct net_device *netdev, 183 struct ethtool_stats *stats, u64 *data) 184 { 185 struct nicvf *nic = netdev_priv(netdev); 186 int stat, qidx; 187 188 nicvf_update_stats(nic); 189 190 /* Update LMAC stats */ 191 nicvf_update_lmac_stats(nic); 192 193 for (stat = 0; stat < nicvf_n_hw_stats; stat++) 194 *(data++) = ((u64 *)&nic->stats) 195 [nicvf_hw_stats[stat].index]; 196 for (stat = 0; stat < nicvf_n_drv_stats; stat++) 197 *(data++) = ((u64 *)&nic->drv_stats) 198 [nicvf_drv_stats[stat].index]; 199 200 for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) { 201 for (stat = 0; stat < nicvf_n_queue_stats; stat++) 202 *(data++) = ((u64 *)&nic->qs->rq[qidx].stats) 203 [nicvf_queue_stats[stat].index]; 204 } 205 206 for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) { 207 for (stat = 0; stat < nicvf_n_queue_stats; stat++) 208 *(data++) = ((u64 *)&nic->qs->sq[qidx].stats) 209 [nicvf_queue_stats[stat].index]; 210 } 211 212 for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++) 213 *(data++) = nic->bgx_stats.rx_stats[stat]; 214 for (stat = 0; stat < BGX_TX_STATS_COUNT; stat++) 215 *(data++) = nic->bgx_stats.tx_stats[stat]; 216 } 217 218 static int nicvf_get_regs_len(struct net_device *dev) 219 { 220 return sizeof(u64) * NIC_VF_REG_COUNT; 221 } 222 223 static void nicvf_get_regs(struct net_device *dev, 224 struct ethtool_regs *regs, void *reg) 225 { 226 struct nicvf *nic = netdev_priv(dev); 227 u64 *p = (u64 *)reg; 228 u64 reg_offset; 229 int mbox, key, stat, q; 230 int i = 0; 231 232 regs->version = 0; 233 memset(p, 0, NIC_VF_REG_COUNT); 234 235 p[i++] = nicvf_reg_read(nic, NIC_VNIC_CFG); 236 /* Mailbox registers */ 237 for (mbox = 0; mbox < NIC_PF_VF_MAILBOX_SIZE; mbox++) 238 p[i++] = nicvf_reg_read(nic, 239 NIC_VF_PF_MAILBOX_0_1 | (mbox << 3)); 240 241 p[i++] = nicvf_reg_read(nic, NIC_VF_INT); 242 p[i++] = nicvf_reg_read(nic, NIC_VF_INT_W1S); 243 p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1C); 244 p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1S); 245 p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG); 246 247 for (key = 0; key < RSS_HASH_KEY_SIZE; key++) 248 p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_KEY_0_4 | (key << 3)); 249 250 /* Tx/Rx statistics */ 251 for (stat = 0; stat < TX_STATS_ENUM_LAST; stat++) 252 p[i++] = nicvf_reg_read(nic, 253 NIC_VNIC_TX_STAT_0_4 | (stat << 3)); 254 255 for (i = 0; i < RX_STATS_ENUM_LAST; i++) 256 p[i++] = nicvf_reg_read(nic, 257 NIC_VNIC_RX_STAT_0_13 | (stat << 3)); 258 259 p[i++] = nicvf_reg_read(nic, NIC_QSET_RQ_GEN_CFG); 260 261 /* All completion queue's registers */ 262 for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++) { 263 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG, q); 264 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG2, q); 265 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_THRESH, q); 266 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_BASE, q); 267 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, q); 268 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, q); 269 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DOOR, q); 270 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, q); 271 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS2, q); 272 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DEBUG, q); 273 } 274 275 /* All receive queue's registers */ 276 for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++) { 277 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_CFG, q); 278 p[i++] = nicvf_queue_reg_read(nic, 279 NIC_QSET_RQ_0_7_STAT_0_1, q); 280 reg_offset = NIC_QSET_RQ_0_7_STAT_0_1 | (1 << 3); 281 p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); 282 } 283 284 for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++) { 285 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, q); 286 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_THRESH, q); 287 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_BASE, q); 288 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, q); 289 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, q); 290 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q); 291 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q); 292 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q); 293 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CNM_CHG, q); 294 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q); 295 reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3); 296 p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); 297 } 298 299 for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++) { 300 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_CFG, q); 301 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_THRESH, q); 302 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_BASE, q); 303 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, q); 304 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, q); 305 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_DOOR, q); 306 p[i++] = nicvf_queue_reg_read(nic, 307 NIC_QSET_RBDR_0_1_STATUS0, q); 308 p[i++] = nicvf_queue_reg_read(nic, 309 NIC_QSET_RBDR_0_1_STATUS1, q); 310 reg_offset = NIC_QSET_RBDR_0_1_PREFETCH_STATUS; 311 p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); 312 } 313 } 314 315 static int nicvf_get_coalesce(struct net_device *netdev, 316 struct ethtool_coalesce *cmd) 317 { 318 struct nicvf *nic = netdev_priv(netdev); 319 320 cmd->rx_coalesce_usecs = nic->cq_coalesce_usecs; 321 return 0; 322 } 323 324 static void nicvf_get_ringparam(struct net_device *netdev, 325 struct ethtool_ringparam *ring) 326 { 327 struct nicvf *nic = netdev_priv(netdev); 328 struct queue_set *qs = nic->qs; 329 330 ring->rx_max_pending = MAX_RCV_BUF_COUNT; 331 ring->rx_pending = qs->rbdr_len; 332 ring->tx_max_pending = MAX_SND_QUEUE_LEN; 333 ring->tx_pending = qs->sq_len; 334 } 335 336 static int nicvf_get_rss_hash_opts(struct nicvf *nic, 337 struct ethtool_rxnfc *info) 338 { 339 info->data = 0; 340 341 switch (info->flow_type) { 342 case TCP_V4_FLOW: 343 case TCP_V6_FLOW: 344 case UDP_V4_FLOW: 345 case UDP_V6_FLOW: 346 case SCTP_V4_FLOW: 347 case SCTP_V6_FLOW: 348 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 349 case IPV4_FLOW: 350 case IPV6_FLOW: 351 info->data |= RXH_IP_SRC | RXH_IP_DST; 352 break; 353 default: 354 return -EINVAL; 355 } 356 357 return 0; 358 } 359 360 static int nicvf_get_rxnfc(struct net_device *dev, 361 struct ethtool_rxnfc *info, u32 *rules) 362 { 363 struct nicvf *nic = netdev_priv(dev); 364 int ret = -EOPNOTSUPP; 365 366 switch (info->cmd) { 367 case ETHTOOL_GRXRINGS: 368 info->data = nic->qs->rq_cnt; 369 ret = 0; 370 break; 371 case ETHTOOL_GRXFH: 372 return nicvf_get_rss_hash_opts(nic, info); 373 default: 374 break; 375 } 376 return ret; 377 } 378 379 static int nicvf_set_rss_hash_opts(struct nicvf *nic, 380 struct ethtool_rxnfc *info) 381 { 382 struct nicvf_rss_info *rss = &nic->rss_info; 383 u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG); 384 385 if (!rss->enable) 386 netdev_err(nic->netdev, 387 "RSS is disabled, hash cannot be set\n"); 388 389 netdev_info(nic->netdev, "Set RSS flow type = %d, data = %lld\n", 390 info->flow_type, info->data); 391 392 if (!(info->data & RXH_IP_SRC) || !(info->data & RXH_IP_DST)) 393 return -EINVAL; 394 395 switch (info->flow_type) { 396 case TCP_V4_FLOW: 397 case TCP_V6_FLOW: 398 switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 399 case 0: 400 rss_cfg &= ~(1ULL << RSS_HASH_TCP); 401 break; 402 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 403 rss_cfg |= (1ULL << RSS_HASH_TCP); 404 break; 405 default: 406 return -EINVAL; 407 } 408 break; 409 case UDP_V4_FLOW: 410 case UDP_V6_FLOW: 411 switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 412 case 0: 413 rss_cfg &= ~(1ULL << RSS_HASH_UDP); 414 break; 415 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 416 rss_cfg |= (1ULL << RSS_HASH_UDP); 417 break; 418 default: 419 return -EINVAL; 420 } 421 break; 422 case SCTP_V4_FLOW: 423 case SCTP_V6_FLOW: 424 switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 425 case 0: 426 rss_cfg &= ~(1ULL << RSS_HASH_L4ETC); 427 break; 428 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 429 rss_cfg |= (1ULL << RSS_HASH_L4ETC); 430 break; 431 default: 432 return -EINVAL; 433 } 434 break; 435 case IPV4_FLOW: 436 case IPV6_FLOW: 437 rss_cfg = RSS_HASH_IP; 438 break; 439 default: 440 return -EINVAL; 441 } 442 443 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss_cfg); 444 return 0; 445 } 446 447 static int nicvf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) 448 { 449 struct nicvf *nic = netdev_priv(dev); 450 451 switch (info->cmd) { 452 case ETHTOOL_SRXFH: 453 return nicvf_set_rss_hash_opts(nic, info); 454 default: 455 break; 456 } 457 return -EOPNOTSUPP; 458 } 459 460 static u32 nicvf_get_rxfh_key_size(struct net_device *netdev) 461 { 462 return RSS_HASH_KEY_SIZE * sizeof(u64); 463 } 464 465 static u32 nicvf_get_rxfh_indir_size(struct net_device *dev) 466 { 467 struct nicvf *nic = netdev_priv(dev); 468 469 return nic->rss_info.rss_size; 470 } 471 472 static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey, 473 u8 *hfunc) 474 { 475 struct nicvf *nic = netdev_priv(dev); 476 struct nicvf_rss_info *rss = &nic->rss_info; 477 int idx; 478 479 if (indir) { 480 for (idx = 0; idx < rss->rss_size; idx++) 481 indir[idx] = rss->ind_tbl[idx]; 482 } 483 484 if (hkey) 485 memcpy(hkey, rss->key, RSS_HASH_KEY_SIZE * sizeof(u64)); 486 487 if (hfunc) 488 *hfunc = ETH_RSS_HASH_TOP; 489 490 return 0; 491 } 492 493 static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir, 494 const u8 *hkey, u8 hfunc) 495 { 496 struct nicvf *nic = netdev_priv(dev); 497 struct nicvf_rss_info *rss = &nic->rss_info; 498 int idx; 499 500 if ((nic->qs->rq_cnt <= 1) || (nic->cpi_alg != CPI_ALG_NONE)) { 501 rss->enable = false; 502 rss->hash_bits = 0; 503 return -EIO; 504 } 505 506 /* We do not allow change in unsupported parameters */ 507 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 508 return -EOPNOTSUPP; 509 510 rss->enable = true; 511 if (indir) { 512 for (idx = 0; idx < rss->rss_size; idx++) 513 rss->ind_tbl[idx] = indir[idx]; 514 } 515 516 if (hkey) { 517 memcpy(rss->key, hkey, RSS_HASH_KEY_SIZE * sizeof(u64)); 518 nicvf_set_rss_key(nic); 519 } 520 521 nicvf_config_rss(nic); 522 return 0; 523 } 524 525 /* Get no of queues device supports and current queue count */ 526 static void nicvf_get_channels(struct net_device *dev, 527 struct ethtool_channels *channel) 528 { 529 struct nicvf *nic = netdev_priv(dev); 530 531 memset(channel, 0, sizeof(*channel)); 532 533 channel->max_rx = MAX_RCV_QUEUES_PER_QS; 534 channel->max_tx = MAX_SND_QUEUES_PER_QS; 535 536 channel->rx_count = nic->qs->rq_cnt; 537 channel->tx_count = nic->qs->sq_cnt; 538 } 539 540 /* Set no of Tx, Rx queues to be used */ 541 static int nicvf_set_channels(struct net_device *dev, 542 struct ethtool_channels *channel) 543 { 544 struct nicvf *nic = netdev_priv(dev); 545 int err = 0; 546 547 if (!channel->rx_count || !channel->tx_count) 548 return -EINVAL; 549 if (channel->rx_count > MAX_RCV_QUEUES_PER_QS) 550 return -EINVAL; 551 if (channel->tx_count > MAX_SND_QUEUES_PER_QS) 552 return -EINVAL; 553 554 nic->qs->rq_cnt = channel->rx_count; 555 nic->qs->sq_cnt = channel->tx_count; 556 nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt); 557 558 err = nicvf_set_real_num_queues(dev, nic->qs->sq_cnt, nic->qs->rq_cnt); 559 if (err) 560 return err; 561 562 if (!netif_running(dev)) 563 return err; 564 565 nicvf_stop(dev); 566 nicvf_open(dev); 567 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", 568 nic->qs->sq_cnt, nic->qs->rq_cnt); 569 570 return err; 571 } 572 573 static const struct ethtool_ops nicvf_ethtool_ops = { 574 .get_settings = nicvf_get_settings, 575 .get_link = ethtool_op_get_link, 576 .get_drvinfo = nicvf_get_drvinfo, 577 .get_msglevel = nicvf_get_msglevel, 578 .set_msglevel = nicvf_set_msglevel, 579 .get_strings = nicvf_get_strings, 580 .get_sset_count = nicvf_get_sset_count, 581 .get_ethtool_stats = nicvf_get_ethtool_stats, 582 .get_regs_len = nicvf_get_regs_len, 583 .get_regs = nicvf_get_regs, 584 .get_coalesce = nicvf_get_coalesce, 585 .get_ringparam = nicvf_get_ringparam, 586 .get_rxnfc = nicvf_get_rxnfc, 587 .set_rxnfc = nicvf_set_rxnfc, 588 .get_rxfh_key_size = nicvf_get_rxfh_key_size, 589 .get_rxfh_indir_size = nicvf_get_rxfh_indir_size, 590 .get_rxfh = nicvf_get_rxfh, 591 .set_rxfh = nicvf_set_rxfh, 592 .get_channels = nicvf_get_channels, 593 .set_channels = nicvf_set_channels, 594 .get_ts_info = ethtool_op_get_ts_info, 595 }; 596 597 void nicvf_set_ethtool_ops(struct net_device *netdev) 598 { 599 netdev->ethtool_ops = &nicvf_ethtool_ops; 600 } 601