1 /* 2 * Linux driver for VMware's vmxnet3 ethernet NIC. 3 * 4 * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the 8 * Free Software Foundation; version 2 of the License and no later version. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 13 * NON INFRINGEMENT. See the GNU General Public License for more 14 * details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * The full GNU General Public License is included in this distribution in 21 * the file called "COPYING". 22 * 23 * Maintained by: pv-drivers@vmware.com 24 * 25 */ 26 27 28 #include "vmxnet3_int.h" 29 #include <net/vxlan.h> 30 #include <net/geneve.h> 31 32 #define VXLAN_UDP_PORT 8472 33 34 struct vmxnet3_stat_desc { 35 char desc[ETH_GSTRING_LEN]; 36 int offset; 37 }; 38 39 40 /* per tq stats maintained by the device */ 41 static const struct vmxnet3_stat_desc 42 vmxnet3_tq_dev_stats[] = { 43 /* description, offset */ 44 { "Tx Queue#", 0 }, 45 { " TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, 46 { " TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, 47 { " ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, 48 { " ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, 49 { " mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, 50 { " mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, 51 { " bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, 52 { " bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, 53 { " pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, 54 { " pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, 55 }; 56 57 /* per tq stats maintained by the driver */ 58 static const struct vmxnet3_stat_desc 59 vmxnet3_tq_driver_stats[] = { 60 /* description, offset */ 61 {" drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, 62 drop_total) }, 63 { " too many frags", offsetof(struct vmxnet3_tq_driver_stats, 64 drop_too_many_frags) }, 65 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, 66 drop_oversized_hdr) }, 67 { " hdr err", offsetof(struct vmxnet3_tq_driver_stats, 68 drop_hdr_inspect_err) }, 69 { " tso", offsetof(struct vmxnet3_tq_driver_stats, 70 drop_tso) }, 71 { " ring full", offsetof(struct vmxnet3_tq_driver_stats, 72 tx_ring_full) }, 73 { " pkts linearized", offsetof(struct vmxnet3_tq_driver_stats, 74 linearized) }, 75 { " hdr cloned", offsetof(struct vmxnet3_tq_driver_stats, 76 copy_skb_header) }, 77 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, 78 oversized_hdr) }, 79 }; 80 81 /* per rq stats maintained by the device */ 82 static const struct vmxnet3_stat_desc 83 vmxnet3_rq_dev_stats[] = { 84 { "Rx Queue#", 0 }, 85 { " LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) }, 86 { " LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) }, 87 { " ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) }, 88 { " ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) }, 89 { " mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) }, 90 { " mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) }, 91 { " bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) }, 92 { " bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) }, 93 { " pkts rx OOB", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) }, 94 { " pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) }, 95 }; 96 97 /* per rq stats maintained by the driver */ 98 static const struct vmxnet3_stat_desc 99 vmxnet3_rq_driver_stats[] = { 100 /* description, offset */ 101 { " drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats, 102 drop_total) }, 103 { " err", offsetof(struct vmxnet3_rq_driver_stats, 104 drop_err) }, 105 { " fcs", offsetof(struct vmxnet3_rq_driver_stats, 106 drop_fcs) }, 107 { " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, 108 rx_buf_alloc_failure) }, 109 }; 110 111 /* global stats maintained by the driver */ 112 static const struct vmxnet3_stat_desc 113 vmxnet3_global_stats[] = { 114 /* description, offset */ 115 { "tx timeout count", offsetof(struct vmxnet3_adapter, 116 tx_timeout_count) } 117 }; 118 119 120 void 121 vmxnet3_get_stats64(struct net_device *netdev, 122 struct rtnl_link_stats64 *stats) 123 { 124 struct vmxnet3_adapter *adapter; 125 struct vmxnet3_tq_driver_stats *drvTxStats; 126 struct vmxnet3_rq_driver_stats *drvRxStats; 127 struct UPT1_TxStats *devTxStats; 128 struct UPT1_RxStats *devRxStats; 129 unsigned long flags; 130 int i; 131 132 adapter = netdev_priv(netdev); 133 134 /* Collect the dev stats into the shared area */ 135 spin_lock_irqsave(&adapter->cmd_lock, flags); 136 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 137 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 138 139 for (i = 0; i < adapter->num_tx_queues; i++) { 140 devTxStats = &adapter->tqd_start[i].stats; 141 drvTxStats = &adapter->tx_queue[i].stats; 142 stats->tx_packets += devTxStats->ucastPktsTxOK + 143 devTxStats->mcastPktsTxOK + 144 devTxStats->bcastPktsTxOK; 145 stats->tx_bytes += devTxStats->ucastBytesTxOK + 146 devTxStats->mcastBytesTxOK + 147 devTxStats->bcastBytesTxOK; 148 stats->tx_errors += devTxStats->pktsTxError; 149 stats->tx_dropped += drvTxStats->drop_total; 150 } 151 152 for (i = 0; i < adapter->num_rx_queues; i++) { 153 devRxStats = &adapter->rqd_start[i].stats; 154 drvRxStats = &adapter->rx_queue[i].stats; 155 stats->rx_packets += devRxStats->ucastPktsRxOK + 156 devRxStats->mcastPktsRxOK + 157 devRxStats->bcastPktsRxOK; 158 159 stats->rx_bytes += devRxStats->ucastBytesRxOK + 160 devRxStats->mcastBytesRxOK + 161 devRxStats->bcastBytesRxOK; 162 163 stats->rx_errors += devRxStats->pktsRxError; 164 stats->rx_dropped += drvRxStats->drop_total; 165 stats->multicast += devRxStats->mcastPktsRxOK; 166 } 167 } 168 169 static int 170 vmxnet3_get_sset_count(struct net_device *netdev, int sset) 171 { 172 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 173 switch (sset) { 174 case ETH_SS_STATS: 175 return (ARRAY_SIZE(vmxnet3_tq_dev_stats) + 176 ARRAY_SIZE(vmxnet3_tq_driver_stats)) * 177 adapter->num_tx_queues + 178 (ARRAY_SIZE(vmxnet3_rq_dev_stats) + 179 ARRAY_SIZE(vmxnet3_rq_driver_stats)) * 180 adapter->num_rx_queues + 181 ARRAY_SIZE(vmxnet3_global_stats); 182 default: 183 return -EOPNOTSUPP; 184 } 185 } 186 187 188 /* This is a version 2 of the vmxnet3 ethtool_regs which goes hand in hand with 189 * the version 2 of the vmxnet3 support for ethtool(8) --register-dump. 190 * Therefore, if any registers are added, removed or modified, then a version 191 * bump and a corresponding change in the vmxnet3 support for ethtool(8) 192 * --register-dump would be required. 193 */ 194 static int 195 vmxnet3_get_regs_len(struct net_device *netdev) 196 { 197 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 198 199 return ((9 /* BAR1 registers */ + 200 (1 + adapter->intr.num_intrs) + 201 (1 + adapter->num_tx_queues * 17 /* Tx queue registers */) + 202 (1 + adapter->num_rx_queues * 23 /* Rx queue registers */)) * 203 sizeof(u32)); 204 } 205 206 207 static void 208 vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) 209 { 210 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 211 212 strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver)); 213 214 strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT, 215 sizeof(drvinfo->version)); 216 217 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 218 sizeof(drvinfo->bus_info)); 219 } 220 221 222 static void 223 vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) 224 { 225 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 226 int i, j; 227 228 if (stringset != ETH_SS_STATS) 229 return; 230 231 for (j = 0; j < adapter->num_tx_queues; j++) { 232 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) 233 ethtool_sprintf(&buf, vmxnet3_tq_dev_stats[i].desc); 234 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) 235 ethtool_sprintf(&buf, vmxnet3_tq_driver_stats[i].desc); 236 } 237 238 for (j = 0; j < adapter->num_rx_queues; j++) { 239 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) 240 ethtool_sprintf(&buf, vmxnet3_rq_dev_stats[i].desc); 241 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) 242 ethtool_sprintf(&buf, vmxnet3_rq_driver_stats[i].desc); 243 } 244 245 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) 246 ethtool_sprintf(&buf, vmxnet3_global_stats[i].desc); 247 } 248 249 netdev_features_t vmxnet3_fix_features(struct net_device *netdev, 250 netdev_features_t features) 251 { 252 /* If Rx checksum is disabled, then LRO should also be disabled */ 253 if (!(features & NETIF_F_RXCSUM)) 254 features &= ~NETIF_F_LRO; 255 256 return features; 257 } 258 259 netdev_features_t vmxnet3_features_check(struct sk_buff *skb, 260 struct net_device *netdev, 261 netdev_features_t features) 262 { 263 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 264 265 /* Validate if the tunneled packet is being offloaded by the device */ 266 if (VMXNET3_VERSION_GE_4(adapter) && 267 skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) { 268 u8 l4_proto = 0; 269 u16 port; 270 struct udphdr *udph; 271 272 switch (vlan_get_protocol(skb)) { 273 case htons(ETH_P_IP): 274 l4_proto = ip_hdr(skb)->protocol; 275 break; 276 case htons(ETH_P_IPV6): 277 l4_proto = ipv6_hdr(skb)->nexthdr; 278 break; 279 default: 280 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 281 } 282 283 switch (l4_proto) { 284 case IPPROTO_UDP: 285 udph = udp_hdr(skb); 286 port = be16_to_cpu(udph->dest); 287 /* Check if offloaded port is supported */ 288 if (port != GENEVE_UDP_PORT && 289 port != IANA_VXLAN_UDP_PORT && 290 port != VXLAN_UDP_PORT) { 291 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 292 } 293 break; 294 default: 295 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 296 } 297 } 298 return features; 299 } 300 301 static void vmxnet3_enable_encap_offloads(struct net_device *netdev, netdev_features_t features) 302 { 303 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 304 305 if (VMXNET3_VERSION_GE_4(adapter)) { 306 netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_RXCSUM | 307 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | 308 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 | 309 NETIF_F_LRO; 310 if (features & NETIF_F_GSO_UDP_TUNNEL) 311 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; 312 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) 313 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 314 } 315 if (VMXNET3_VERSION_GE_7(adapter)) { 316 unsigned long flags; 317 318 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], 319 VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) { 320 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD; 321 } 322 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], 323 VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) { 324 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD; 325 } 326 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], 327 VMXNET3_CAP_GENEVE_TSO)) { 328 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_TSO; 329 } 330 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], 331 VMXNET3_CAP_VXLAN_TSO)) { 332 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_TSO; 333 } 334 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], 335 VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) { 336 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD; 337 } 338 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], 339 VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD)) { 340 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD; 341 } 342 343 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]); 344 spin_lock_irqsave(&adapter->cmd_lock, flags); 345 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG); 346 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 347 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 348 349 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) && 350 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD))) { 351 netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM; 352 } 353 } 354 } 355 356 static void vmxnet3_disable_encap_offloads(struct net_device *netdev) 357 { 358 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 359 360 if (VMXNET3_VERSION_GE_4(adapter)) { 361 netdev->hw_enc_features &= ~(NETIF_F_SG | NETIF_F_RXCSUM | 362 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | 363 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 | 364 NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL | 365 NETIF_F_GSO_UDP_TUNNEL_CSUM); 366 } 367 if (VMXNET3_VERSION_GE_7(adapter)) { 368 unsigned long flags; 369 370 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD | 371 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD | 372 1UL << VMXNET3_CAP_GENEVE_TSO | 373 1UL << VMXNET3_CAP_VXLAN_TSO | 374 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD | 375 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD); 376 377 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]); 378 spin_lock_irqsave(&adapter->cmd_lock, flags); 379 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG); 380 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 381 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 382 } 383 } 384 385 int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features) 386 { 387 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 388 unsigned long flags; 389 netdev_features_t changed = features ^ netdev->features; 390 netdev_features_t tun_offload_mask = NETIF_F_GSO_UDP_TUNNEL | 391 NETIF_F_GSO_UDP_TUNNEL_CSUM; 392 u8 udp_tun_enabled = (netdev->features & tun_offload_mask) != 0; 393 394 if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO | 395 NETIF_F_HW_VLAN_CTAG_RX | tun_offload_mask)) { 396 if (features & NETIF_F_RXCSUM) 397 adapter->shared->devRead.misc.uptFeatures |= 398 UPT1_F_RXCSUM; 399 else 400 adapter->shared->devRead.misc.uptFeatures &= 401 ~UPT1_F_RXCSUM; 402 403 /* update hardware LRO capability accordingly */ 404 if (features & NETIF_F_LRO) 405 adapter->shared->devRead.misc.uptFeatures |= 406 UPT1_F_LRO; 407 else 408 adapter->shared->devRead.misc.uptFeatures &= 409 ~UPT1_F_LRO; 410 411 if (features & NETIF_F_HW_VLAN_CTAG_RX) 412 adapter->shared->devRead.misc.uptFeatures |= 413 UPT1_F_RXVLAN; 414 else 415 adapter->shared->devRead.misc.uptFeatures &= 416 ~UPT1_F_RXVLAN; 417 418 if ((features & tun_offload_mask) != 0) { 419 vmxnet3_enable_encap_offloads(netdev, features); 420 adapter->shared->devRead.misc.uptFeatures |= 421 UPT1_F_RXINNEROFLD; 422 } else if ((features & tun_offload_mask) == 0 && 423 udp_tun_enabled) { 424 vmxnet3_disable_encap_offloads(netdev); 425 adapter->shared->devRead.misc.uptFeatures &= 426 ~UPT1_F_RXINNEROFLD; 427 } 428 429 spin_lock_irqsave(&adapter->cmd_lock, flags); 430 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 431 VMXNET3_CMD_UPDATE_FEATURE); 432 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 433 } 434 return 0; 435 } 436 437 static void 438 vmxnet3_get_ethtool_stats(struct net_device *netdev, 439 struct ethtool_stats *stats, u64 *buf) 440 { 441 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 442 unsigned long flags; 443 u8 *base; 444 int i; 445 int j = 0; 446 447 spin_lock_irqsave(&adapter->cmd_lock, flags); 448 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 449 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 450 451 /* this does assume each counter is 64-bit wide */ 452 for (j = 0; j < adapter->num_tx_queues; j++) { 453 base = (u8 *)&adapter->tqd_start[j].stats; 454 *buf++ = (u64)j; 455 for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) 456 *buf++ = *(u64 *)(base + 457 vmxnet3_tq_dev_stats[i].offset); 458 459 base = (u8 *)&adapter->tx_queue[j].stats; 460 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) 461 *buf++ = *(u64 *)(base + 462 vmxnet3_tq_driver_stats[i].offset); 463 } 464 465 for (j = 0; j < adapter->num_rx_queues; j++) { 466 base = (u8 *)&adapter->rqd_start[j].stats; 467 *buf++ = (u64) j; 468 for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) 469 *buf++ = *(u64 *)(base + 470 vmxnet3_rq_dev_stats[i].offset); 471 472 base = (u8 *)&adapter->rx_queue[j].stats; 473 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) 474 *buf++ = *(u64 *)(base + 475 vmxnet3_rq_driver_stats[i].offset); 476 } 477 478 base = (u8 *)adapter; 479 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) 480 *buf++ = *(u64 *)(base + vmxnet3_global_stats[i].offset); 481 } 482 483 484 /* This is a version 2 of the vmxnet3 ethtool_regs which goes hand in hand with 485 * the version 2 of the vmxnet3 support for ethtool(8) --register-dump. 486 * Therefore, if any registers are added, removed or modified, then a version 487 * bump and a corresponding change in the vmxnet3 support for ethtool(8) 488 * --register-dump would be required. 489 */ 490 static void 491 vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) 492 { 493 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 494 u32 *buf = p; 495 int i = 0, j = 0; 496 497 memset(p, 0, vmxnet3_get_regs_len(netdev)); 498 499 regs->version = 2; 500 501 /* Update vmxnet3_get_regs_len if we want to dump more registers */ 502 503 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS); 504 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS); 505 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DSAL); 506 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DSAH); 507 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 508 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL); 509 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH); 510 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR); 511 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ECR); 512 513 buf[j++] = adapter->intr.num_intrs; 514 for (i = 0; i < adapter->intr.num_intrs; i++) { 515 buf[j++] = VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_IMR 516 + i * VMXNET3_REG_ALIGN); 517 } 518 519 buf[j++] = adapter->num_tx_queues; 520 for (i = 0; i < adapter->num_tx_queues; i++) { 521 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; 522 523 buf[j++] = VMXNET3_READ_BAR0_REG(adapter, adapter->tx_prod_offset + 524 i * VMXNET3_REG_ALIGN); 525 526 buf[j++] = VMXNET3_GET_ADDR_LO(tq->tx_ring.basePA); 527 buf[j++] = VMXNET3_GET_ADDR_HI(tq->tx_ring.basePA); 528 buf[j++] = tq->tx_ring.size; 529 buf[j++] = tq->tx_ring.next2fill; 530 buf[j++] = tq->tx_ring.next2comp; 531 buf[j++] = tq->tx_ring.gen; 532 533 buf[j++] = VMXNET3_GET_ADDR_LO(tq->data_ring.basePA); 534 buf[j++] = VMXNET3_GET_ADDR_HI(tq->data_ring.basePA); 535 buf[j++] = tq->data_ring.size; 536 buf[j++] = tq->txdata_desc_size; 537 538 buf[j++] = VMXNET3_GET_ADDR_LO(tq->comp_ring.basePA); 539 buf[j++] = VMXNET3_GET_ADDR_HI(tq->comp_ring.basePA); 540 buf[j++] = tq->comp_ring.size; 541 buf[j++] = tq->comp_ring.next2proc; 542 buf[j++] = tq->comp_ring.gen; 543 544 buf[j++] = tq->stopped; 545 } 546 547 buf[j++] = adapter->num_rx_queues; 548 for (i = 0; i < adapter->num_rx_queues; i++) { 549 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; 550 551 buf[j++] = VMXNET3_READ_BAR0_REG(adapter, adapter->rx_prod_offset + 552 i * VMXNET3_REG_ALIGN); 553 buf[j++] = VMXNET3_READ_BAR0_REG(adapter, adapter->rx_prod2_offset + 554 i * VMXNET3_REG_ALIGN); 555 556 buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[0].basePA); 557 buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[0].basePA); 558 buf[j++] = rq->rx_ring[0].size; 559 buf[j++] = rq->rx_ring[0].next2fill; 560 buf[j++] = rq->rx_ring[0].next2comp; 561 buf[j++] = rq->rx_ring[0].gen; 562 563 buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[1].basePA); 564 buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[1].basePA); 565 buf[j++] = rq->rx_ring[1].size; 566 buf[j++] = rq->rx_ring[1].next2fill; 567 buf[j++] = rq->rx_ring[1].next2comp; 568 buf[j++] = rq->rx_ring[1].gen; 569 570 buf[j++] = VMXNET3_GET_ADDR_LO(rq->data_ring.basePA); 571 buf[j++] = VMXNET3_GET_ADDR_HI(rq->data_ring.basePA); 572 buf[j++] = rq->rx_ring[0].size; 573 buf[j++] = rq->data_ring.desc_size; 574 575 buf[j++] = VMXNET3_GET_ADDR_LO(rq->comp_ring.basePA); 576 buf[j++] = VMXNET3_GET_ADDR_HI(rq->comp_ring.basePA); 577 buf[j++] = rq->comp_ring.size; 578 buf[j++] = rq->comp_ring.next2proc; 579 buf[j++] = rq->comp_ring.gen; 580 } 581 } 582 583 584 static void 585 vmxnet3_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 586 { 587 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 588 589 wol->supported = WAKE_UCAST | WAKE_ARP | WAKE_MAGIC; 590 wol->wolopts = adapter->wol; 591 } 592 593 594 static int 595 vmxnet3_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 596 { 597 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 598 599 if (wol->wolopts & (WAKE_PHY | WAKE_MCAST | WAKE_BCAST | 600 WAKE_MAGICSECURE)) { 601 return -EOPNOTSUPP; 602 } 603 604 adapter->wol = wol->wolopts; 605 606 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 607 608 return 0; 609 } 610 611 612 static int 613 vmxnet3_get_link_ksettings(struct net_device *netdev, 614 struct ethtool_link_ksettings *ecmd) 615 { 616 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 617 618 ethtool_link_ksettings_zero_link_mode(ecmd, supported); 619 ethtool_link_ksettings_add_link_mode(ecmd, supported, 10000baseT_Full); 620 ethtool_link_ksettings_add_link_mode(ecmd, supported, 1000baseT_Full); 621 ethtool_link_ksettings_add_link_mode(ecmd, supported, TP); 622 ethtool_link_ksettings_zero_link_mode(ecmd, advertising); 623 ethtool_link_ksettings_add_link_mode(ecmd, advertising, TP); 624 ecmd->base.port = PORT_TP; 625 626 if (adapter->link_speed) { 627 ecmd->base.speed = adapter->link_speed; 628 ecmd->base.duplex = DUPLEX_FULL; 629 } else { 630 ecmd->base.speed = SPEED_UNKNOWN; 631 ecmd->base.duplex = DUPLEX_UNKNOWN; 632 } 633 return 0; 634 } 635 636 static void 637 vmxnet3_get_ringparam(struct net_device *netdev, 638 struct ethtool_ringparam *param, 639 struct kernel_ethtool_ringparam *kernel_param, 640 struct netlink_ext_ack *extack) 641 { 642 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 643 644 param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE; 645 param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE; 646 param->rx_mini_max_pending = VMXNET3_VERSION_GE_3(adapter) ? 647 VMXNET3_RXDATA_DESC_MAX_SIZE : 0; 648 param->rx_jumbo_max_pending = VMXNET3_RX_RING2_MAX_SIZE; 649 650 param->rx_pending = adapter->rx_ring_size; 651 param->tx_pending = adapter->tx_ring_size; 652 param->rx_mini_pending = VMXNET3_VERSION_GE_3(adapter) ? 653 adapter->rxdata_desc_size : 0; 654 param->rx_jumbo_pending = adapter->rx_ring2_size; 655 } 656 657 static int 658 vmxnet3_set_ringparam(struct net_device *netdev, 659 struct ethtool_ringparam *param, 660 struct kernel_ethtool_ringparam *kernel_param, 661 struct netlink_ext_ack *extack) 662 { 663 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 664 u32 new_tx_ring_size, new_rx_ring_size, new_rx_ring2_size; 665 u16 new_rxdata_desc_size; 666 u32 sz; 667 int err = 0; 668 669 if (param->tx_pending == 0 || param->tx_pending > 670 VMXNET3_TX_RING_MAX_SIZE) 671 return -EINVAL; 672 673 if (param->rx_pending == 0 || param->rx_pending > 674 VMXNET3_RX_RING_MAX_SIZE) 675 return -EINVAL; 676 677 if (param->rx_jumbo_pending == 0 || 678 param->rx_jumbo_pending > VMXNET3_RX_RING2_MAX_SIZE) 679 return -EINVAL; 680 681 /* if adapter not yet initialized, do nothing */ 682 if (adapter->rx_buf_per_pkt == 0) { 683 netdev_err(netdev, "adapter not completely initialized, " 684 "ring size cannot be changed yet\n"); 685 return -EOPNOTSUPP; 686 } 687 688 if (VMXNET3_VERSION_GE_3(adapter)) { 689 if (param->rx_mini_pending > VMXNET3_RXDATA_DESC_MAX_SIZE) 690 return -EINVAL; 691 } else if (param->rx_mini_pending != 0) { 692 return -EINVAL; 693 } 694 695 /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */ 696 new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) & 697 ~VMXNET3_RING_SIZE_MASK; 698 new_tx_ring_size = min_t(u32, new_tx_ring_size, 699 VMXNET3_TX_RING_MAX_SIZE); 700 if (new_tx_ring_size > VMXNET3_TX_RING_MAX_SIZE || (new_tx_ring_size % 701 VMXNET3_RING_SIZE_ALIGN) != 0) 702 return -EINVAL; 703 704 /* ring0 has to be a multiple of 705 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN 706 */ 707 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; 708 new_rx_ring_size = (param->rx_pending + sz - 1) / sz * sz; 709 new_rx_ring_size = min_t(u32, new_rx_ring_size, 710 VMXNET3_RX_RING_MAX_SIZE / sz * sz); 711 if (new_rx_ring_size > VMXNET3_RX_RING_MAX_SIZE || (new_rx_ring_size % 712 sz) != 0) 713 return -EINVAL; 714 715 /* ring2 has to be a multiple of VMXNET3_RING_SIZE_ALIGN */ 716 new_rx_ring2_size = (param->rx_jumbo_pending + VMXNET3_RING_SIZE_MASK) & 717 ~VMXNET3_RING_SIZE_MASK; 718 new_rx_ring2_size = min_t(u32, new_rx_ring2_size, 719 VMXNET3_RX_RING2_MAX_SIZE); 720 721 /* For v7 and later, keep ring size power of 2 for UPT */ 722 if (VMXNET3_VERSION_GE_7(adapter)) { 723 new_tx_ring_size = rounddown_pow_of_two(new_tx_ring_size); 724 new_rx_ring_size = rounddown_pow_of_two(new_rx_ring_size); 725 new_rx_ring2_size = rounddown_pow_of_two(new_rx_ring2_size); 726 } 727 728 /* rx data ring buffer size has to be a multiple of 729 * VMXNET3_RXDATA_DESC_SIZE_ALIGN 730 */ 731 new_rxdata_desc_size = 732 (param->rx_mini_pending + VMXNET3_RXDATA_DESC_SIZE_MASK) & 733 ~VMXNET3_RXDATA_DESC_SIZE_MASK; 734 new_rxdata_desc_size = min_t(u16, new_rxdata_desc_size, 735 VMXNET3_RXDATA_DESC_MAX_SIZE); 736 737 if (new_tx_ring_size == adapter->tx_ring_size && 738 new_rx_ring_size == adapter->rx_ring_size && 739 new_rx_ring2_size == adapter->rx_ring2_size && 740 new_rxdata_desc_size == adapter->rxdata_desc_size) { 741 return 0; 742 } 743 744 /* 745 * Reset_work may be in the middle of resetting the device, wait for its 746 * completion. 747 */ 748 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) 749 usleep_range(1000, 2000); 750 751 if (netif_running(netdev)) { 752 vmxnet3_quiesce_dev(adapter); 753 vmxnet3_reset_dev(adapter); 754 755 /* recreate the rx queue and the tx queue based on the 756 * new sizes */ 757 vmxnet3_tq_destroy_all(adapter); 758 vmxnet3_rq_destroy_all(adapter); 759 760 err = vmxnet3_create_queues(adapter, new_tx_ring_size, 761 new_rx_ring_size, new_rx_ring2_size, 762 adapter->txdata_desc_size, 763 new_rxdata_desc_size); 764 if (err) { 765 /* failed, most likely because of OOM, try default 766 * size */ 767 netdev_err(netdev, "failed to apply new sizes, " 768 "try the default ones\n"); 769 new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; 770 new_rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE; 771 new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE; 772 new_rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ? 773 VMXNET3_DEF_RXDATA_DESC_SIZE : 0; 774 775 err = vmxnet3_create_queues(adapter, 776 new_tx_ring_size, 777 new_rx_ring_size, 778 new_rx_ring2_size, 779 adapter->txdata_desc_size, 780 new_rxdata_desc_size); 781 if (err) { 782 netdev_err(netdev, "failed to create queues " 783 "with default sizes. Closing it\n"); 784 goto out; 785 } 786 } 787 788 err = vmxnet3_activate_dev(adapter); 789 if (err) 790 netdev_err(netdev, "failed to re-activate, error %d." 791 " Closing it\n", err); 792 } 793 adapter->tx_ring_size = new_tx_ring_size; 794 adapter->rx_ring_size = new_rx_ring_size; 795 adapter->rx_ring2_size = new_rx_ring2_size; 796 adapter->rxdata_desc_size = new_rxdata_desc_size; 797 798 out: 799 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 800 if (err) 801 vmxnet3_force_close(adapter); 802 803 return err; 804 } 805 806 static int 807 vmxnet3_get_rss_hash_opts(struct vmxnet3_adapter *adapter, 808 struct ethtool_rxnfc *info) 809 { 810 enum Vmxnet3_RSSField rss_fields; 811 812 if (netif_running(adapter->netdev)) { 813 unsigned long flags; 814 815 spin_lock_irqsave(&adapter->cmd_lock, flags); 816 817 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 818 VMXNET3_CMD_GET_RSS_FIELDS); 819 rss_fields = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 820 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 821 } else { 822 rss_fields = adapter->rss_fields; 823 } 824 825 info->data = 0; 826 827 /* Report default options for RSS on vmxnet3 */ 828 switch (info->flow_type) { 829 case TCP_V4_FLOW: 830 case TCP_V6_FLOW: 831 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3 | 832 RXH_IP_SRC | RXH_IP_DST; 833 break; 834 case UDP_V4_FLOW: 835 if (rss_fields & VMXNET3_RSS_FIELDS_UDPIP4) 836 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 837 info->data |= RXH_IP_SRC | RXH_IP_DST; 838 break; 839 case AH_ESP_V4_FLOW: 840 case AH_V4_FLOW: 841 case ESP_V4_FLOW: 842 if (rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) 843 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 844 fallthrough; 845 case SCTP_V4_FLOW: 846 case IPV4_FLOW: 847 info->data |= RXH_IP_SRC | RXH_IP_DST; 848 break; 849 case UDP_V6_FLOW: 850 if (rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) 851 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 852 info->data |= RXH_IP_SRC | RXH_IP_DST; 853 break; 854 case AH_ESP_V6_FLOW: 855 case AH_V6_FLOW: 856 case ESP_V6_FLOW: 857 if (VMXNET3_VERSION_GE_6(adapter) && 858 (rss_fields & VMXNET3_RSS_FIELDS_ESPIP6)) 859 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 860 fallthrough; 861 case SCTP_V6_FLOW: 862 case IPV6_FLOW: 863 info->data |= RXH_IP_SRC | RXH_IP_DST; 864 break; 865 default: 866 return -EINVAL; 867 } 868 869 return 0; 870 } 871 872 static int 873 vmxnet3_set_rss_hash_opt(struct net_device *netdev, 874 struct vmxnet3_adapter *adapter, 875 struct ethtool_rxnfc *nfc) 876 { 877 enum Vmxnet3_RSSField rss_fields = adapter->rss_fields; 878 879 /* RSS does not support anything other than hashing 880 * to queues on src and dst IPs and ports 881 */ 882 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | 883 RXH_L4_B_0_1 | RXH_L4_B_2_3)) 884 return -EINVAL; 885 886 switch (nfc->flow_type) { 887 case TCP_V4_FLOW: 888 case TCP_V6_FLOW: 889 if (!(nfc->data & RXH_IP_SRC) || 890 !(nfc->data & RXH_IP_DST) || 891 !(nfc->data & RXH_L4_B_0_1) || 892 !(nfc->data & RXH_L4_B_2_3)) 893 return -EINVAL; 894 break; 895 case UDP_V4_FLOW: 896 if (!(nfc->data & RXH_IP_SRC) || 897 !(nfc->data & RXH_IP_DST)) 898 return -EINVAL; 899 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 900 case 0: 901 rss_fields &= ~VMXNET3_RSS_FIELDS_UDPIP4; 902 break; 903 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 904 rss_fields |= VMXNET3_RSS_FIELDS_UDPIP4; 905 break; 906 default: 907 return -EINVAL; 908 } 909 break; 910 case UDP_V6_FLOW: 911 if (!(nfc->data & RXH_IP_SRC) || 912 !(nfc->data & RXH_IP_DST)) 913 return -EINVAL; 914 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 915 case 0: 916 rss_fields &= ~VMXNET3_RSS_FIELDS_UDPIP6; 917 break; 918 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 919 rss_fields |= VMXNET3_RSS_FIELDS_UDPIP6; 920 break; 921 default: 922 return -EINVAL; 923 } 924 break; 925 case ESP_V4_FLOW: 926 case AH_V4_FLOW: 927 case AH_ESP_V4_FLOW: 928 if (!(nfc->data & RXH_IP_SRC) || 929 !(nfc->data & RXH_IP_DST)) 930 return -EINVAL; 931 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 932 case 0: 933 rss_fields &= ~VMXNET3_RSS_FIELDS_ESPIP4; 934 break; 935 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 936 rss_fields |= VMXNET3_RSS_FIELDS_ESPIP4; 937 break; 938 default: 939 return -EINVAL; 940 } 941 break; 942 case ESP_V6_FLOW: 943 case AH_V6_FLOW: 944 case AH_ESP_V6_FLOW: 945 if (!VMXNET3_VERSION_GE_6(adapter)) 946 return -EOPNOTSUPP; 947 if (!(nfc->data & RXH_IP_SRC) || 948 !(nfc->data & RXH_IP_DST)) 949 return -EINVAL; 950 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 951 case 0: 952 rss_fields &= ~VMXNET3_RSS_FIELDS_ESPIP6; 953 break; 954 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 955 rss_fields |= VMXNET3_RSS_FIELDS_ESPIP6; 956 break; 957 default: 958 return -EINVAL; 959 } 960 break; 961 case SCTP_V4_FLOW: 962 case SCTP_V6_FLOW: 963 if (!(nfc->data & RXH_IP_SRC) || 964 !(nfc->data & RXH_IP_DST) || 965 (nfc->data & RXH_L4_B_0_1) || 966 (nfc->data & RXH_L4_B_2_3)) 967 return -EINVAL; 968 break; 969 default: 970 return -EINVAL; 971 } 972 973 /* if we changed something we need to update flags */ 974 if (rss_fields != adapter->rss_fields) { 975 adapter->default_rss_fields = false; 976 if (netif_running(netdev)) { 977 struct Vmxnet3_DriverShared *shared = adapter->shared; 978 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo; 979 unsigned long flags; 980 981 if (VMXNET3_VERSION_GE_7(adapter)) { 982 if ((rss_fields & VMXNET3_RSS_FIELDS_UDPIP4 || 983 rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) && 984 vmxnet3_check_ptcapability(adapter->ptcap_supported[0], 985 VMXNET3_CAP_UDP_RSS)) { 986 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_UDP_RSS; 987 } else { 988 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_UDP_RSS); 989 } 990 if ((rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) && 991 vmxnet3_check_ptcapability(adapter->ptcap_supported[0], 992 VMXNET3_CAP_ESP_RSS_IPV4)) { 993 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV4; 994 } else { 995 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV4); 996 } 997 if ((rss_fields & VMXNET3_RSS_FIELDS_ESPIP6) && 998 vmxnet3_check_ptcapability(adapter->ptcap_supported[0], 999 VMXNET3_CAP_ESP_RSS_IPV6)) { 1000 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV6; 1001 } else { 1002 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV6); 1003 } 1004 1005 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, 1006 adapter->dev_caps[0]); 1007 spin_lock_irqsave(&adapter->cmd_lock, flags); 1008 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1009 VMXNET3_CMD_GET_DCR0_REG); 1010 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, 1011 VMXNET3_REG_CMD); 1012 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 1013 } 1014 spin_lock_irqsave(&adapter->cmd_lock, flags); 1015 cmdInfo->setRssFields = rss_fields; 1016 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1017 VMXNET3_CMD_SET_RSS_FIELDS); 1018 1019 /* Not all requested RSS may get applied, so get and 1020 * cache what was actually applied. 1021 */ 1022 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1023 VMXNET3_CMD_GET_RSS_FIELDS); 1024 adapter->rss_fields = 1025 VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 1026 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 1027 } else { 1028 /* When the device is activated, we will try to apply 1029 * these rules and cache the applied value later. 1030 */ 1031 adapter->rss_fields = rss_fields; 1032 } 1033 } 1034 return 0; 1035 } 1036 1037 static int 1038 vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, 1039 u32 *rules) 1040 { 1041 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1042 int err = 0; 1043 1044 switch (info->cmd) { 1045 case ETHTOOL_GRXRINGS: 1046 info->data = adapter->num_rx_queues; 1047 break; 1048 case ETHTOOL_GRXFH: 1049 if (!VMXNET3_VERSION_GE_4(adapter)) { 1050 err = -EOPNOTSUPP; 1051 break; 1052 } 1053 #ifdef VMXNET3_RSS 1054 if (!adapter->rss) { 1055 err = -EOPNOTSUPP; 1056 break; 1057 } 1058 #endif 1059 err = vmxnet3_get_rss_hash_opts(adapter, info); 1060 break; 1061 default: 1062 err = -EOPNOTSUPP; 1063 break; 1064 } 1065 1066 return err; 1067 } 1068 1069 static int 1070 vmxnet3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info) 1071 { 1072 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1073 int err = 0; 1074 1075 if (!VMXNET3_VERSION_GE_4(adapter)) { 1076 err = -EOPNOTSUPP; 1077 goto done; 1078 } 1079 #ifdef VMXNET3_RSS 1080 if (!adapter->rss) { 1081 err = -EOPNOTSUPP; 1082 goto done; 1083 } 1084 #endif 1085 1086 switch (info->cmd) { 1087 case ETHTOOL_SRXFH: 1088 err = vmxnet3_set_rss_hash_opt(netdev, adapter, info); 1089 break; 1090 default: 1091 err = -EOPNOTSUPP; 1092 break; 1093 } 1094 1095 done: 1096 return err; 1097 } 1098 1099 #ifdef VMXNET3_RSS 1100 static u32 1101 vmxnet3_get_rss_indir_size(struct net_device *netdev) 1102 { 1103 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1104 struct UPT1_RSSConf *rssConf = adapter->rss_conf; 1105 1106 return rssConf->indTableSize; 1107 } 1108 1109 static int 1110 vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc) 1111 { 1112 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1113 struct UPT1_RSSConf *rssConf = adapter->rss_conf; 1114 unsigned int n = rssConf->indTableSize; 1115 1116 if (hfunc) 1117 *hfunc = ETH_RSS_HASH_TOP; 1118 if (!p) 1119 return 0; 1120 if (n > UPT1_RSS_MAX_IND_TABLE_SIZE) 1121 return 0; 1122 while (n--) 1123 p[n] = rssConf->indTable[n]; 1124 return 0; 1125 1126 } 1127 1128 static int 1129 vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key, 1130 const u8 hfunc) 1131 { 1132 unsigned int i; 1133 unsigned long flags; 1134 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1135 struct UPT1_RSSConf *rssConf = adapter->rss_conf; 1136 1137 /* We do not allow change in unsupported parameters */ 1138 if (key || 1139 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) 1140 return -EOPNOTSUPP; 1141 if (!p) 1142 return 0; 1143 for (i = 0; i < rssConf->indTableSize; i++) 1144 rssConf->indTable[i] = p[i]; 1145 1146 spin_lock_irqsave(&adapter->cmd_lock, flags); 1147 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1148 VMXNET3_CMD_UPDATE_RSSIDT); 1149 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 1150 1151 return 0; 1152 1153 } 1154 #endif 1155 1156 static int vmxnet3_get_coalesce(struct net_device *netdev, 1157 struct ethtool_coalesce *ec, 1158 struct kernel_ethtool_coalesce *kernel_coal, 1159 struct netlink_ext_ack *extack) 1160 { 1161 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1162 1163 if (!VMXNET3_VERSION_GE_3(adapter)) 1164 return -EOPNOTSUPP; 1165 1166 switch (adapter->coal_conf->coalMode) { 1167 case VMXNET3_COALESCE_DISABLED: 1168 /* struct ethtool_coalesce is already initialized to 0 */ 1169 break; 1170 case VMXNET3_COALESCE_ADAPT: 1171 ec->use_adaptive_rx_coalesce = true; 1172 break; 1173 case VMXNET3_COALESCE_STATIC: 1174 ec->tx_max_coalesced_frames = 1175 adapter->coal_conf->coalPara.coalStatic.tx_comp_depth; 1176 ec->rx_max_coalesced_frames = 1177 adapter->coal_conf->coalPara.coalStatic.rx_depth; 1178 break; 1179 case VMXNET3_COALESCE_RBC: { 1180 u32 rbc_rate; 1181 1182 rbc_rate = adapter->coal_conf->coalPara.coalRbc.rbc_rate; 1183 ec->rx_coalesce_usecs = VMXNET3_COAL_RBC_USECS(rbc_rate); 1184 } 1185 break; 1186 default: 1187 return -EOPNOTSUPP; 1188 } 1189 1190 return 0; 1191 } 1192 1193 static int vmxnet3_set_coalesce(struct net_device *netdev, 1194 struct ethtool_coalesce *ec, 1195 struct kernel_ethtool_coalesce *kernel_coal, 1196 struct netlink_ext_ack *extack) 1197 { 1198 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1199 struct Vmxnet3_DriverShared *shared = adapter->shared; 1200 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo; 1201 unsigned long flags; 1202 1203 if (!VMXNET3_VERSION_GE_3(adapter)) 1204 return -EOPNOTSUPP; 1205 1206 if ((ec->rx_coalesce_usecs == 0) && 1207 (ec->use_adaptive_rx_coalesce == 0) && 1208 (ec->tx_max_coalesced_frames == 0) && 1209 (ec->rx_max_coalesced_frames == 0)) { 1210 memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf)); 1211 adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED; 1212 goto done; 1213 } 1214 1215 if (ec->rx_coalesce_usecs != 0) { 1216 u32 rbc_rate; 1217 1218 if ((ec->use_adaptive_rx_coalesce != 0) || 1219 (ec->tx_max_coalesced_frames != 0) || 1220 (ec->rx_max_coalesced_frames != 0)) { 1221 return -EINVAL; 1222 } 1223 1224 rbc_rate = VMXNET3_COAL_RBC_RATE(ec->rx_coalesce_usecs); 1225 if (rbc_rate < VMXNET3_COAL_RBC_MIN_RATE || 1226 rbc_rate > VMXNET3_COAL_RBC_MAX_RATE) { 1227 return -EINVAL; 1228 } 1229 1230 memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf)); 1231 adapter->coal_conf->coalMode = VMXNET3_COALESCE_RBC; 1232 adapter->coal_conf->coalPara.coalRbc.rbc_rate = rbc_rate; 1233 goto done; 1234 } 1235 1236 if (ec->use_adaptive_rx_coalesce != 0) { 1237 if (ec->tx_max_coalesced_frames != 0 || 1238 ec->rx_max_coalesced_frames != 0) { 1239 return -EINVAL; 1240 } 1241 memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf)); 1242 adapter->coal_conf->coalMode = VMXNET3_COALESCE_ADAPT; 1243 goto done; 1244 } 1245 1246 if ((ec->tx_max_coalesced_frames != 0) || 1247 (ec->rx_max_coalesced_frames != 0)) { 1248 if ((ec->tx_max_coalesced_frames > 1249 VMXNET3_COAL_STATIC_MAX_DEPTH) || 1250 (ec->rx_max_coalesced_frames > 1251 VMXNET3_COAL_STATIC_MAX_DEPTH)) { 1252 return -EINVAL; 1253 } 1254 1255 memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf)); 1256 adapter->coal_conf->coalMode = VMXNET3_COALESCE_STATIC; 1257 1258 adapter->coal_conf->coalPara.coalStatic.tx_comp_depth = 1259 (ec->tx_max_coalesced_frames ? 1260 ec->tx_max_coalesced_frames : 1261 VMXNET3_COAL_STATIC_DEFAULT_DEPTH); 1262 1263 adapter->coal_conf->coalPara.coalStatic.rx_depth = 1264 (ec->rx_max_coalesced_frames ? 1265 ec->rx_max_coalesced_frames : 1266 VMXNET3_COAL_STATIC_DEFAULT_DEPTH); 1267 1268 adapter->coal_conf->coalPara.coalStatic.tx_depth = 1269 VMXNET3_COAL_STATIC_DEFAULT_DEPTH; 1270 goto done; 1271 } 1272 1273 done: 1274 adapter->default_coal_mode = false; 1275 if (netif_running(netdev)) { 1276 spin_lock_irqsave(&adapter->cmd_lock, flags); 1277 cmdInfo->varConf.confVer = 1; 1278 cmdInfo->varConf.confLen = 1279 cpu_to_le32(sizeof(*adapter->coal_conf)); 1280 cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa); 1281 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1282 VMXNET3_CMD_SET_COALESCE); 1283 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 1284 } 1285 1286 return 0; 1287 } 1288 1289 static const struct ethtool_ops vmxnet3_ethtool_ops = { 1290 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | 1291 ETHTOOL_COALESCE_MAX_FRAMES | 1292 ETHTOOL_COALESCE_USE_ADAPTIVE_RX, 1293 .get_drvinfo = vmxnet3_get_drvinfo, 1294 .get_regs_len = vmxnet3_get_regs_len, 1295 .get_regs = vmxnet3_get_regs, 1296 .get_wol = vmxnet3_get_wol, 1297 .set_wol = vmxnet3_set_wol, 1298 .get_link = ethtool_op_get_link, 1299 .get_coalesce = vmxnet3_get_coalesce, 1300 .set_coalesce = vmxnet3_set_coalesce, 1301 .get_strings = vmxnet3_get_strings, 1302 .get_sset_count = vmxnet3_get_sset_count, 1303 .get_ethtool_stats = vmxnet3_get_ethtool_stats, 1304 .get_ringparam = vmxnet3_get_ringparam, 1305 .set_ringparam = vmxnet3_set_ringparam, 1306 .get_rxnfc = vmxnet3_get_rxnfc, 1307 .set_rxnfc = vmxnet3_set_rxnfc, 1308 #ifdef VMXNET3_RSS 1309 .get_rxfh_indir_size = vmxnet3_get_rss_indir_size, 1310 .get_rxfh = vmxnet3_get_rss, 1311 .set_rxfh = vmxnet3_set_rss, 1312 #endif 1313 .get_link_ksettings = vmxnet3_get_link_ksettings, 1314 }; 1315 1316 void vmxnet3_set_ethtool_ops(struct net_device *netdev) 1317 { 1318 netdev->ethtool_ops = &vmxnet3_ethtool_ops; 1319 } 1320