1 /** 2 * Copyright 2013 Cisco Systems, Inc. All rights reserved. 3 * 4 * This program is free software; you may redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; version 2 of the License. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 15 * SOFTWARE. 16 * 17 */ 18 19 #include <linux/netdevice.h> 20 #include <linux/ethtool.h> 21 22 #include "enic_res.h" 23 #include "enic.h" 24 #include "enic_dev.h" 25 #include "enic_clsf.h" 26 #include "vnic_rss.h" 27 #include "vnic_stats.h" 28 29 struct enic_stat { 30 char name[ETH_GSTRING_LEN]; 31 unsigned int index; 32 }; 33 34 #define ENIC_TX_STAT(stat) { \ 35 .name = #stat, \ 36 .index = offsetof(struct vnic_tx_stats, stat) / sizeof(u64) \ 37 } 38 39 #define ENIC_RX_STAT(stat) { \ 40 .name = #stat, \ 41 .index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \ 42 } 43 44 #define ENIC_GEN_STAT(stat) { \ 45 .name = #stat, \ 46 .index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\ 47 } 48 49 static const struct enic_stat enic_tx_stats[] = { 50 ENIC_TX_STAT(tx_frames_ok), 51 ENIC_TX_STAT(tx_unicast_frames_ok), 52 ENIC_TX_STAT(tx_multicast_frames_ok), 53 ENIC_TX_STAT(tx_broadcast_frames_ok), 54 ENIC_TX_STAT(tx_bytes_ok), 55 ENIC_TX_STAT(tx_unicast_bytes_ok), 56 ENIC_TX_STAT(tx_multicast_bytes_ok), 57 ENIC_TX_STAT(tx_broadcast_bytes_ok), 58 ENIC_TX_STAT(tx_drops), 59 ENIC_TX_STAT(tx_errors), 60 ENIC_TX_STAT(tx_tso), 61 }; 62 63 static const struct enic_stat enic_rx_stats[] = { 64 ENIC_RX_STAT(rx_frames_ok), 65 ENIC_RX_STAT(rx_frames_total), 66 ENIC_RX_STAT(rx_unicast_frames_ok), 67 ENIC_RX_STAT(rx_multicast_frames_ok), 68 ENIC_RX_STAT(rx_broadcast_frames_ok), 69 ENIC_RX_STAT(rx_bytes_ok), 70 ENIC_RX_STAT(rx_unicast_bytes_ok), 71 ENIC_RX_STAT(rx_multicast_bytes_ok), 72 ENIC_RX_STAT(rx_broadcast_bytes_ok), 73 ENIC_RX_STAT(rx_drop), 74 ENIC_RX_STAT(rx_no_bufs), 75 ENIC_RX_STAT(rx_errors), 76 ENIC_RX_STAT(rx_rss), 77 ENIC_RX_STAT(rx_crc_errors), 78 ENIC_RX_STAT(rx_frames_64), 79 ENIC_RX_STAT(rx_frames_127), 80 ENIC_RX_STAT(rx_frames_255), 81 ENIC_RX_STAT(rx_frames_511), 82 ENIC_RX_STAT(rx_frames_1023), 83 ENIC_RX_STAT(rx_frames_1518), 84 ENIC_RX_STAT(rx_frames_to_max), 85 }; 86 87 static const struct enic_stat enic_gen_stats[] = { 88 ENIC_GEN_STAT(dma_map_error), 89 }; 90 91 static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats); 92 static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats); 93 static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats); 94 95 static void enic_intr_coal_set_rx(struct enic *enic, u32 timer) 96 { 97 int i; 98 int intr; 99 100 for (i = 0; i < enic->rq_count; i++) { 101 intr = enic_msix_rq_intr(enic, i); 102 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer); 103 } 104 } 105 106 static int enic_get_ksettings(struct net_device *netdev, 107 struct ethtool_link_ksettings *ecmd) 108 { 109 struct enic *enic = netdev_priv(netdev); 110 struct ethtool_link_settings *base = &ecmd->base; 111 112 ethtool_link_ksettings_add_link_mode(ecmd, supported, 113 10000baseT_Full); 114 ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE); 115 ethtool_link_ksettings_add_link_mode(ecmd, advertising, 116 10000baseT_Full); 117 ethtool_link_ksettings_add_link_mode(ecmd, advertising, FIBRE); 118 base->port = PORT_FIBRE; 119 120 if (netif_carrier_ok(netdev)) { 121 base->speed = vnic_dev_port_speed(enic->vdev); 122 base->duplex = DUPLEX_FULL; 123 } else { 124 base->speed = SPEED_UNKNOWN; 125 base->duplex = DUPLEX_UNKNOWN; 126 } 127 128 base->autoneg = AUTONEG_DISABLE; 129 130 return 0; 131 } 132 133 static void enic_get_drvinfo(struct net_device *netdev, 134 struct ethtool_drvinfo *drvinfo) 135 { 136 struct enic *enic = netdev_priv(netdev); 137 struct vnic_devcmd_fw_info *fw_info; 138 int err; 139 140 err = enic_dev_fw_info(enic, &fw_info); 141 /* return only when pci_zalloc_consistent fails in vnic_dev_fw_info 142 * For other failures, like devcmd failure, we return previously 143 * recorded info. 144 */ 145 if (err == -ENOMEM) 146 return; 147 148 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); 149 strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); 150 strlcpy(drvinfo->fw_version, fw_info->fw_version, 151 sizeof(drvinfo->fw_version)); 152 strlcpy(drvinfo->bus_info, pci_name(enic->pdev), 153 sizeof(drvinfo->bus_info)); 154 } 155 156 static void enic_get_strings(struct net_device *netdev, u32 stringset, 157 u8 *data) 158 { 159 unsigned int i; 160 161 switch (stringset) { 162 case ETH_SS_STATS: 163 for (i = 0; i < enic_n_tx_stats; i++) { 164 memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN); 165 data += ETH_GSTRING_LEN; 166 } 167 for (i = 0; i < enic_n_rx_stats; i++) { 168 memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN); 169 data += ETH_GSTRING_LEN; 170 } 171 for (i = 0; i < enic_n_gen_stats; i++) { 172 memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN); 173 data += ETH_GSTRING_LEN; 174 } 175 break; 176 } 177 } 178 179 static void enic_get_ringparam(struct net_device *netdev, 180 struct ethtool_ringparam *ring) 181 { 182 struct enic *enic = netdev_priv(netdev); 183 struct vnic_enet_config *c = &enic->config; 184 185 ring->rx_max_pending = ENIC_MAX_RQ_DESCS; 186 ring->rx_pending = c->rq_desc_count; 187 ring->tx_max_pending = ENIC_MAX_WQ_DESCS; 188 ring->tx_pending = c->wq_desc_count; 189 } 190 191 static int enic_set_ringparam(struct net_device *netdev, 192 struct ethtool_ringparam *ring) 193 { 194 struct enic *enic = netdev_priv(netdev); 195 struct vnic_enet_config *c = &enic->config; 196 int running = netif_running(netdev); 197 unsigned int rx_pending; 198 unsigned int tx_pending; 199 int err = 0; 200 201 if (ring->rx_mini_max_pending || ring->rx_mini_pending) { 202 netdev_info(netdev, 203 "modifying mini ring params is not supported"); 204 return -EINVAL; 205 } 206 if (ring->rx_jumbo_max_pending || ring->rx_jumbo_pending) { 207 netdev_info(netdev, 208 "modifying jumbo ring params is not supported"); 209 return -EINVAL; 210 } 211 rx_pending = c->rq_desc_count; 212 tx_pending = c->wq_desc_count; 213 if (ring->rx_pending > ENIC_MAX_RQ_DESCS || 214 ring->rx_pending < ENIC_MIN_RQ_DESCS) { 215 netdev_info(netdev, "rx pending (%u) not in range [%u,%u]", 216 ring->rx_pending, ENIC_MIN_RQ_DESCS, 217 ENIC_MAX_RQ_DESCS); 218 return -EINVAL; 219 } 220 if (ring->tx_pending > ENIC_MAX_WQ_DESCS || 221 ring->tx_pending < ENIC_MIN_WQ_DESCS) { 222 netdev_info(netdev, "tx pending (%u) not in range [%u,%u]", 223 ring->tx_pending, ENIC_MIN_WQ_DESCS, 224 ENIC_MAX_WQ_DESCS); 225 return -EINVAL; 226 } 227 if (running) 228 dev_close(netdev); 229 c->rq_desc_count = 230 ring->rx_pending & 0xffffffe0; /* must be aligned to groups of 32 */ 231 c->wq_desc_count = 232 ring->tx_pending & 0xffffffe0; /* must be aligned to groups of 32 */ 233 enic_free_vnic_resources(enic); 234 err = enic_alloc_vnic_resources(enic); 235 if (err) { 236 netdev_err(netdev, 237 "Failed to alloc vNIC resources, aborting\n"); 238 enic_free_vnic_resources(enic); 239 goto err_out; 240 } 241 enic_init_vnic_resources(enic); 242 if (running) { 243 err = dev_open(netdev); 244 if (err) 245 goto err_out; 246 } 247 return 0; 248 err_out: 249 c->rq_desc_count = rx_pending; 250 c->wq_desc_count = tx_pending; 251 return err; 252 } 253 254 static int enic_get_sset_count(struct net_device *netdev, int sset) 255 { 256 switch (sset) { 257 case ETH_SS_STATS: 258 return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats; 259 default: 260 return -EOPNOTSUPP; 261 } 262 } 263 264 static void enic_get_ethtool_stats(struct net_device *netdev, 265 struct ethtool_stats *stats, u64 *data) 266 { 267 struct enic *enic = netdev_priv(netdev); 268 struct vnic_stats *vstats; 269 unsigned int i; 270 int err; 271 272 err = enic_dev_stats_dump(enic, &vstats); 273 /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump 274 * For other failures, like devcmd failure, we return previously 275 * recorded stats. 276 */ 277 if (err == -ENOMEM) 278 return; 279 280 for (i = 0; i < enic_n_tx_stats; i++) 281 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index]; 282 for (i = 0; i < enic_n_rx_stats; i++) 283 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index]; 284 for (i = 0; i < enic_n_gen_stats; i++) 285 *(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index]; 286 } 287 288 static u32 enic_get_msglevel(struct net_device *netdev) 289 { 290 struct enic *enic = netdev_priv(netdev); 291 return enic->msg_enable; 292 } 293 294 static void enic_set_msglevel(struct net_device *netdev, u32 value) 295 { 296 struct enic *enic = netdev_priv(netdev); 297 enic->msg_enable = value; 298 } 299 300 static int enic_get_coalesce(struct net_device *netdev, 301 struct ethtool_coalesce *ecmd) 302 { 303 struct enic *enic = netdev_priv(netdev); 304 struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting; 305 306 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) 307 ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs; 308 ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs; 309 if (rxcoal->use_adaptive_rx_coalesce) 310 ecmd->use_adaptive_rx_coalesce = 1; 311 ecmd->rx_coalesce_usecs_low = rxcoal->small_pkt_range_start; 312 ecmd->rx_coalesce_usecs_high = rxcoal->range_end; 313 314 return 0; 315 } 316 317 static int enic_coalesce_valid(struct enic *enic, 318 struct ethtool_coalesce *ec) 319 { 320 u32 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev); 321 u32 rx_coalesce_usecs_high = min_t(u32, coalesce_usecs_max, 322 ec->rx_coalesce_usecs_high); 323 u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max, 324 ec->rx_coalesce_usecs_low); 325 326 if (ec->rx_max_coalesced_frames || 327 ec->rx_coalesce_usecs_irq || 328 ec->rx_max_coalesced_frames_irq || 329 ec->tx_max_coalesced_frames || 330 ec->tx_coalesce_usecs_irq || 331 ec->tx_max_coalesced_frames_irq || 332 ec->stats_block_coalesce_usecs || 333 ec->use_adaptive_tx_coalesce || 334 ec->pkt_rate_low || 335 ec->rx_max_coalesced_frames_low || 336 ec->tx_coalesce_usecs_low || 337 ec->tx_max_coalesced_frames_low || 338 ec->pkt_rate_high || 339 ec->rx_max_coalesced_frames_high || 340 ec->tx_coalesce_usecs_high || 341 ec->tx_max_coalesced_frames_high || 342 ec->rate_sample_interval) 343 return -EINVAL; 344 345 if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) && 346 ec->tx_coalesce_usecs) 347 return -EINVAL; 348 349 if ((ec->tx_coalesce_usecs > coalesce_usecs_max) || 350 (ec->rx_coalesce_usecs > coalesce_usecs_max) || 351 (ec->rx_coalesce_usecs_low > coalesce_usecs_max) || 352 (ec->rx_coalesce_usecs_high > coalesce_usecs_max)) 353 netdev_info(enic->netdev, "ethtool_set_coalesce: adaptor supports max coalesce value of %d. Setting max value.\n", 354 coalesce_usecs_max); 355 356 if (ec->rx_coalesce_usecs_high && 357 (rx_coalesce_usecs_high < 358 rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF)) 359 return -EINVAL; 360 361 return 0; 362 } 363 364 static int enic_set_coalesce(struct net_device *netdev, 365 struct ethtool_coalesce *ecmd) 366 { 367 struct enic *enic = netdev_priv(netdev); 368 u32 tx_coalesce_usecs; 369 u32 rx_coalesce_usecs; 370 u32 rx_coalesce_usecs_low; 371 u32 rx_coalesce_usecs_high; 372 u32 coalesce_usecs_max; 373 unsigned int i, intr; 374 int ret; 375 struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting; 376 377 ret = enic_coalesce_valid(enic, ecmd); 378 if (ret) 379 return ret; 380 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev); 381 tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs, 382 coalesce_usecs_max); 383 rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs, 384 coalesce_usecs_max); 385 386 rx_coalesce_usecs_low = min_t(u32, ecmd->rx_coalesce_usecs_low, 387 coalesce_usecs_max); 388 rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high, 389 coalesce_usecs_max); 390 391 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) { 392 for (i = 0; i < enic->wq_count; i++) { 393 intr = enic_msix_wq_intr(enic, i); 394 vnic_intr_coalescing_timer_set(&enic->intr[intr], 395 tx_coalesce_usecs); 396 } 397 enic->tx_coalesce_usecs = tx_coalesce_usecs; 398 } 399 rxcoal->use_adaptive_rx_coalesce = !!ecmd->use_adaptive_rx_coalesce; 400 if (!rxcoal->use_adaptive_rx_coalesce) 401 enic_intr_coal_set_rx(enic, rx_coalesce_usecs); 402 if (ecmd->rx_coalesce_usecs_high) { 403 rxcoal->range_end = rx_coalesce_usecs_high; 404 rxcoal->small_pkt_range_start = rx_coalesce_usecs_low; 405 rxcoal->large_pkt_range_start = rx_coalesce_usecs_low + 406 ENIC_AIC_LARGE_PKT_DIFF; 407 } 408 409 enic->rx_coalesce_usecs = rx_coalesce_usecs; 410 411 return 0; 412 } 413 414 static int enic_grxclsrlall(struct enic *enic, struct ethtool_rxnfc *cmd, 415 u32 *rule_locs) 416 { 417 int j, ret = 0, cnt = 0; 418 419 cmd->data = enic->rfs_h.max - enic->rfs_h.free; 420 for (j = 0; j < (1 << ENIC_RFS_FLW_BITSHIFT); j++) { 421 struct hlist_head *hhead; 422 struct hlist_node *tmp; 423 struct enic_rfs_fltr_node *n; 424 425 hhead = &enic->rfs_h.ht_head[j]; 426 hlist_for_each_entry_safe(n, tmp, hhead, node) { 427 if (cnt == cmd->rule_cnt) 428 return -EMSGSIZE; 429 rule_locs[cnt] = n->fltr_id; 430 cnt++; 431 } 432 } 433 cmd->rule_cnt = cnt; 434 435 return ret; 436 } 437 438 static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd) 439 { 440 struct ethtool_rx_flow_spec *fsp = 441 (struct ethtool_rx_flow_spec *)&cmd->fs; 442 struct enic_rfs_fltr_node *n; 443 444 n = htbl_fltr_search(enic, (u16)fsp->location); 445 if (!n) 446 return -EINVAL; 447 switch (n->keys.basic.ip_proto) { 448 case IPPROTO_TCP: 449 fsp->flow_type = TCP_V4_FLOW; 450 break; 451 case IPPROTO_UDP: 452 fsp->flow_type = UDP_V4_FLOW; 453 break; 454 default: 455 return -EINVAL; 456 break; 457 } 458 459 fsp->h_u.tcp_ip4_spec.ip4src = flow_get_u32_src(&n->keys); 460 fsp->m_u.tcp_ip4_spec.ip4src = (__u32)~0; 461 462 fsp->h_u.tcp_ip4_spec.ip4dst = flow_get_u32_dst(&n->keys); 463 fsp->m_u.tcp_ip4_spec.ip4dst = (__u32)~0; 464 465 fsp->h_u.tcp_ip4_spec.psrc = n->keys.ports.src; 466 fsp->m_u.tcp_ip4_spec.psrc = (__u16)~0; 467 468 fsp->h_u.tcp_ip4_spec.pdst = n->keys.ports.dst; 469 fsp->m_u.tcp_ip4_spec.pdst = (__u16)~0; 470 471 fsp->ring_cookie = n->rq_id; 472 473 return 0; 474 } 475 476 static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 477 u32 *rule_locs) 478 { 479 struct enic *enic = netdev_priv(dev); 480 int ret = 0; 481 482 switch (cmd->cmd) { 483 case ETHTOOL_GRXRINGS: 484 cmd->data = enic->rq_count; 485 break; 486 case ETHTOOL_GRXCLSRLCNT: 487 spin_lock_bh(&enic->rfs_h.lock); 488 cmd->rule_cnt = enic->rfs_h.max - enic->rfs_h.free; 489 cmd->data = enic->rfs_h.max; 490 spin_unlock_bh(&enic->rfs_h.lock); 491 break; 492 case ETHTOOL_GRXCLSRLALL: 493 spin_lock_bh(&enic->rfs_h.lock); 494 ret = enic_grxclsrlall(enic, cmd, rule_locs); 495 spin_unlock_bh(&enic->rfs_h.lock); 496 break; 497 case ETHTOOL_GRXCLSRULE: 498 spin_lock_bh(&enic->rfs_h.lock); 499 ret = enic_grxclsrule(enic, cmd); 500 spin_unlock_bh(&enic->rfs_h.lock); 501 break; 502 default: 503 ret = -EOPNOTSUPP; 504 break; 505 } 506 507 return ret; 508 } 509 510 static int enic_get_tunable(struct net_device *dev, 511 const struct ethtool_tunable *tuna, void *data) 512 { 513 struct enic *enic = netdev_priv(dev); 514 int ret = 0; 515 516 switch (tuna->id) { 517 case ETHTOOL_RX_COPYBREAK: 518 *(u32 *)data = enic->rx_copybreak; 519 break; 520 default: 521 ret = -EINVAL; 522 break; 523 } 524 525 return ret; 526 } 527 528 static int enic_set_tunable(struct net_device *dev, 529 const struct ethtool_tunable *tuna, 530 const void *data) 531 { 532 struct enic *enic = netdev_priv(dev); 533 int ret = 0; 534 535 switch (tuna->id) { 536 case ETHTOOL_RX_COPYBREAK: 537 enic->rx_copybreak = *(u32 *)data; 538 break; 539 default: 540 ret = -EINVAL; 541 break; 542 } 543 544 return ret; 545 } 546 547 static u32 enic_get_rxfh_key_size(struct net_device *netdev) 548 { 549 return ENIC_RSS_LEN; 550 } 551 552 static int enic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey, 553 u8 *hfunc) 554 { 555 struct enic *enic = netdev_priv(netdev); 556 557 if (hkey) 558 memcpy(hkey, enic->rss_key, ENIC_RSS_LEN); 559 560 if (hfunc) 561 *hfunc = ETH_RSS_HASH_TOP; 562 563 return 0; 564 } 565 566 static int enic_set_rxfh(struct net_device *netdev, const u32 *indir, 567 const u8 *hkey, const u8 hfunc) 568 { 569 struct enic *enic = netdev_priv(netdev); 570 571 if ((hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) || 572 indir) 573 return -EINVAL; 574 575 if (hkey) 576 memcpy(enic->rss_key, hkey, ENIC_RSS_LEN); 577 578 return __enic_set_rsskey(enic); 579 } 580 581 static const struct ethtool_ops enic_ethtool_ops = { 582 .get_drvinfo = enic_get_drvinfo, 583 .get_msglevel = enic_get_msglevel, 584 .set_msglevel = enic_set_msglevel, 585 .get_link = ethtool_op_get_link, 586 .get_strings = enic_get_strings, 587 .get_ringparam = enic_get_ringparam, 588 .set_ringparam = enic_set_ringparam, 589 .get_sset_count = enic_get_sset_count, 590 .get_ethtool_stats = enic_get_ethtool_stats, 591 .get_coalesce = enic_get_coalesce, 592 .set_coalesce = enic_set_coalesce, 593 .get_rxnfc = enic_get_rxnfc, 594 .get_tunable = enic_get_tunable, 595 .set_tunable = enic_set_tunable, 596 .get_rxfh_key_size = enic_get_rxfh_key_size, 597 .get_rxfh = enic_get_rxfh, 598 .set_rxfh = enic_set_rxfh, 599 .get_link_ksettings = enic_get_ksettings, 600 }; 601 602 void enic_set_ethtool_ops(struct net_device *netdev) 603 { 604 netdev->ethtool_ops = &enic_ethtool_ops; 605 } 606