1 /* 2 * Linux driver for VMware's vmxnet3 ethernet NIC. 3 * 4 * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the 8 * Free Software Foundation; version 2 of the License and no later version. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 13 * NON INFRINGEMENT. See the GNU General Public License for more 14 * details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * The full GNU General Public License is included in this distribution in 21 * the file called "COPYING". 22 * 23 * Maintained by: pv-drivers@vmware.com 24 * 25 */ 26 27 28 #include "vmxnet3_int.h" 29 #include <net/vxlan.h> 30 #include <net/geneve.h> 31 32 #define VXLAN_UDP_PORT 8472 33 34 struct vmxnet3_stat_desc { 35 char desc[ETH_GSTRING_LEN]; 36 int offset; 37 }; 38 39 40 /* per tq stats maintained by the device */ 41 static const struct vmxnet3_stat_desc 42 vmxnet3_tq_dev_stats[] = { 43 /* description, offset */ 44 { "Tx Queue#", 0 }, 45 { " TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, 46 { " TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, 47 { " ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, 48 { " ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, 49 { " mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, 50 { " mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, 51 { " bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, 52 { " bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, 53 { " pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, 54 { " pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, 55 }; 56 57 /* per tq stats maintained by the driver */ 58 static const struct vmxnet3_stat_desc 59 vmxnet3_tq_driver_stats[] = { 60 /* description, offset */ 61 {" drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, 62 drop_total) }, 63 { " too many frags", offsetof(struct vmxnet3_tq_driver_stats, 64 drop_too_many_frags) }, 65 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, 66 drop_oversized_hdr) }, 67 { " hdr err", offsetof(struct vmxnet3_tq_driver_stats, 68 drop_hdr_inspect_err) }, 69 { " tso", offsetof(struct vmxnet3_tq_driver_stats, 70 drop_tso) }, 71 { " ring full", offsetof(struct vmxnet3_tq_driver_stats, 72 tx_ring_full) }, 73 { " pkts linearized", offsetof(struct vmxnet3_tq_driver_stats, 74 linearized) }, 75 { " hdr cloned", offsetof(struct vmxnet3_tq_driver_stats, 76 copy_skb_header) }, 77 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, 78 oversized_hdr) }, 79 }; 80 81 /* per rq stats maintained by the device */ 82 static const struct vmxnet3_stat_desc 83 vmxnet3_rq_dev_stats[] = { 84 { "Rx Queue#", 0 }, 85 { " LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) }, 86 { " LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) }, 87 { " ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) }, 88 { " ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) }, 89 { " mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) }, 90 { " mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) }, 91 { " bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) }, 92 { " bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) }, 93 { " pkts rx OOB", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) }, 94 { " pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) }, 95 }; 96 97 /* per rq stats maintained by the driver */ 98 static const struct vmxnet3_stat_desc 99 vmxnet3_rq_driver_stats[] = { 100 /* description, offset */ 101 { " drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats, 102 drop_total) }, 103 { " err", offsetof(struct vmxnet3_rq_driver_stats, 104 drop_err) }, 105 { " fcs", offsetof(struct vmxnet3_rq_driver_stats, 106 drop_fcs) }, 107 { " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, 108 rx_buf_alloc_failure) }, 109 }; 110 111 /* global stats maintained by the driver */ 112 static const struct vmxnet3_stat_desc 113 vmxnet3_global_stats[] = { 114 /* description, offset */ 115 { "tx timeout count", offsetof(struct vmxnet3_adapter, 116 tx_timeout_count) } 117 }; 118 119 120 void 121 vmxnet3_get_stats64(struct net_device *netdev, 122 struct rtnl_link_stats64 *stats) 123 { 124 struct vmxnet3_adapter *adapter; 125 struct vmxnet3_tq_driver_stats *drvTxStats; 126 struct vmxnet3_rq_driver_stats *drvRxStats; 127 struct UPT1_TxStats *devTxStats; 128 struct UPT1_RxStats *devRxStats; 129 unsigned long flags; 130 int i; 131 132 adapter = netdev_priv(netdev); 133 134 /* Collect the dev stats into the shared area */ 135 spin_lock_irqsave(&adapter->cmd_lock, flags); 136 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 137 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 138 139 for (i = 0; i < adapter->num_tx_queues; i++) { 140 devTxStats = &adapter->tqd_start[i].stats; 141 drvTxStats = &adapter->tx_queue[i].stats; 142 stats->tx_packets += devTxStats->ucastPktsTxOK + 143 devTxStats->mcastPktsTxOK + 144 devTxStats->bcastPktsTxOK; 145 stats->tx_bytes += devTxStats->ucastBytesTxOK + 146 devTxStats->mcastBytesTxOK + 147 devTxStats->bcastBytesTxOK; 148 stats->tx_errors += devTxStats->pktsTxError; 149 stats->tx_dropped += drvTxStats->drop_total; 150 } 151 152 for (i = 0; i < adapter->num_rx_queues; i++) { 153 devRxStats = &adapter->rqd_start[i].stats; 154 drvRxStats = &adapter->rx_queue[i].stats; 155 stats->rx_packets += devRxStats->ucastPktsRxOK + 156 devRxStats->mcastPktsRxOK + 157 devRxStats->bcastPktsRxOK; 158 159 stats->rx_bytes += devRxStats->ucastBytesRxOK + 160 devRxStats->mcastBytesRxOK + 161 devRxStats->bcastBytesRxOK; 162 163 stats->rx_errors += devRxStats->pktsRxError; 164 stats->rx_dropped += drvRxStats->drop_total; 165 stats->multicast += devRxStats->mcastPktsRxOK; 166 } 167 } 168 169 static int 170 vmxnet3_get_sset_count(struct net_device *netdev, int sset) 171 { 172 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 173 switch (sset) { 174 case ETH_SS_STATS: 175 return (ARRAY_SIZE(vmxnet3_tq_dev_stats) + 176 ARRAY_SIZE(vmxnet3_tq_driver_stats)) * 177 adapter->num_tx_queues + 178 (ARRAY_SIZE(vmxnet3_rq_dev_stats) + 179 ARRAY_SIZE(vmxnet3_rq_driver_stats)) * 180 adapter->num_rx_queues + 181 ARRAY_SIZE(vmxnet3_global_stats); 182 default: 183 return -EOPNOTSUPP; 184 } 185 } 186 187 188 /* This is a version 2 of the vmxnet3 ethtool_regs which goes hand in hand with 189 * the version 2 of the vmxnet3 support for ethtool(8) --register-dump. 190 * Therefore, if any registers are added, removed or modified, then a version 191 * bump and a corresponding change in the vmxnet3 support for ethtool(8) 192 * --register-dump would be required. 193 */ 194 static int 195 vmxnet3_get_regs_len(struct net_device *netdev) 196 { 197 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 198 199 return ((9 /* BAR1 registers */ + 200 (1 + adapter->intr.num_intrs) + 201 (1 + adapter->num_tx_queues * 17 /* Tx queue registers */) + 202 (1 + adapter->num_rx_queues * 23 /* Rx queue registers */)) * 203 sizeof(u32)); 204 } 205 206 207 static void 208 vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) 209 { 210 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 211 212 strscpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver)); 213 214 strscpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT, 215 sizeof(drvinfo->version)); 216 217 strscpy(drvinfo->bus_info, pci_name(adapter->pdev), 218 sizeof(drvinfo->bus_info)); 219 } 220 221 222 static void 223 vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) 224 { 225 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 226 int i, j; 227 228 if (stringset != ETH_SS_STATS) 229 return; 230 231 for (j = 0; j < adapter->num_tx_queues; j++) { 232 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) 233 ethtool_sprintf(&buf, vmxnet3_tq_dev_stats[i].desc); 234 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) 235 ethtool_sprintf(&buf, vmxnet3_tq_driver_stats[i].desc); 236 } 237 238 for (j = 0; j < adapter->num_rx_queues; j++) { 239 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) 240 ethtool_sprintf(&buf, vmxnet3_rq_dev_stats[i].desc); 241 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) 242 ethtool_sprintf(&buf, vmxnet3_rq_driver_stats[i].desc); 243 } 244 245 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) 246 ethtool_sprintf(&buf, vmxnet3_global_stats[i].desc); 247 } 248 249 netdev_features_t vmxnet3_fix_features(struct net_device *netdev, 250 netdev_features_t features) 251 { 252 /* If Rx checksum is disabled, then LRO should also be disabled */ 253 if (!(features & NETIF_F_RXCSUM)) 254 features &= ~NETIF_F_LRO; 255 256 return features; 257 } 258 259 netdev_features_t vmxnet3_features_check(struct sk_buff *skb, 260 struct net_device *netdev, 261 netdev_features_t features) 262 { 263 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 264 265 /* Validate if the tunneled packet is being offloaded by the device */ 266 if (VMXNET3_VERSION_GE_4(adapter) && 267 skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) { 268 u8 l4_proto = 0; 269 u16 port; 270 struct udphdr *udph; 271 272 switch (vlan_get_protocol(skb)) { 273 case htons(ETH_P_IP): 274 l4_proto = ip_hdr(skb)->protocol; 275 break; 276 case htons(ETH_P_IPV6): 277 l4_proto = ipv6_hdr(skb)->nexthdr; 278 break; 279 default: 280 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 281 } 282 283 switch (l4_proto) { 284 case IPPROTO_UDP: 285 udph = udp_hdr(skb); 286 port = be16_to_cpu(udph->dest); 287 /* Check if offloaded port is supported */ 288 if (port != GENEVE_UDP_PORT && 289 port != IANA_VXLAN_UDP_PORT && 290 port != VXLAN_UDP_PORT) { 291 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 292 } 293 break; 294 default: 295 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 296 } 297 } 298 return features; 299 } 300 301 static void vmxnet3_enable_encap_offloads(struct net_device *netdev, netdev_features_t features) 302 { 303 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 304 305 if (VMXNET3_VERSION_GE_4(adapter)) { 306 netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_RXCSUM | 307 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | 308 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 | 309 NETIF_F_LRO; 310 if (features & NETIF_F_GSO_UDP_TUNNEL) 311 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; 312 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) 313 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 314 } 315 if (VMXNET3_VERSION_GE_7(adapter)) { 316 unsigned long flags; 317 318 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], 319 VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) { 320 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD; 321 } 322 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], 323 VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) { 324 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD; 325 } 326 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], 327 VMXNET3_CAP_GENEVE_TSO)) { 328 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_TSO; 329 } 330 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], 331 VMXNET3_CAP_VXLAN_TSO)) { 332 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_TSO; 333 } 334 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], 335 VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) { 336 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD; 337 } 338 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], 339 VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD)) { 340 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD; 341 } 342 343 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]); 344 spin_lock_irqsave(&adapter->cmd_lock, flags); 345 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG); 346 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 347 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 348 349 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) && 350 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) && 351 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_TSO)) && 352 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_TSO))) { 353 netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL; 354 } 355 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) && 356 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD))) { 357 netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM; 358 } 359 } 360 } 361 362 static void vmxnet3_disable_encap_offloads(struct net_device *netdev) 363 { 364 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 365 366 if (VMXNET3_VERSION_GE_4(adapter)) { 367 netdev->hw_enc_features &= ~(NETIF_F_SG | NETIF_F_RXCSUM | 368 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | 369 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 | 370 NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL | 371 NETIF_F_GSO_UDP_TUNNEL_CSUM); 372 } 373 if (VMXNET3_VERSION_GE_7(adapter)) { 374 unsigned long flags; 375 376 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD | 377 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD | 378 1UL << VMXNET3_CAP_GENEVE_TSO | 379 1UL << VMXNET3_CAP_VXLAN_TSO | 380 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD | 381 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD); 382 383 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]); 384 spin_lock_irqsave(&adapter->cmd_lock, flags); 385 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG); 386 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 387 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 388 } 389 } 390 391 int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features) 392 { 393 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 394 unsigned long flags; 395 netdev_features_t changed = features ^ netdev->features; 396 netdev_features_t tun_offload_mask = NETIF_F_GSO_UDP_TUNNEL | 397 NETIF_F_GSO_UDP_TUNNEL_CSUM; 398 u8 udp_tun_enabled = (netdev->features & tun_offload_mask) != 0; 399 400 if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO | 401 NETIF_F_HW_VLAN_CTAG_RX | tun_offload_mask)) { 402 if (features & NETIF_F_RXCSUM) 403 adapter->shared->devRead.misc.uptFeatures |= 404 UPT1_F_RXCSUM; 405 else 406 adapter->shared->devRead.misc.uptFeatures &= 407 ~UPT1_F_RXCSUM; 408 409 /* update hardware LRO capability accordingly */ 410 if (features & NETIF_F_LRO) 411 adapter->shared->devRead.misc.uptFeatures |= 412 UPT1_F_LRO; 413 else 414 adapter->shared->devRead.misc.uptFeatures &= 415 ~UPT1_F_LRO; 416 417 if (features & NETIF_F_HW_VLAN_CTAG_RX) 418 adapter->shared->devRead.misc.uptFeatures |= 419 UPT1_F_RXVLAN; 420 else 421 adapter->shared->devRead.misc.uptFeatures &= 422 ~UPT1_F_RXVLAN; 423 424 if ((features & tun_offload_mask) != 0) { 425 vmxnet3_enable_encap_offloads(netdev, features); 426 adapter->shared->devRead.misc.uptFeatures |= 427 UPT1_F_RXINNEROFLD; 428 } else if ((features & tun_offload_mask) == 0 && 429 udp_tun_enabled) { 430 vmxnet3_disable_encap_offloads(netdev); 431 adapter->shared->devRead.misc.uptFeatures &= 432 ~UPT1_F_RXINNEROFLD; 433 } 434 435 spin_lock_irqsave(&adapter->cmd_lock, flags); 436 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 437 VMXNET3_CMD_UPDATE_FEATURE); 438 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 439 } 440 return 0; 441 } 442 443 static void 444 vmxnet3_get_ethtool_stats(struct net_device *netdev, 445 struct ethtool_stats *stats, u64 *buf) 446 { 447 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 448 unsigned long flags; 449 u8 *base; 450 int i; 451 int j = 0; 452 453 spin_lock_irqsave(&adapter->cmd_lock, flags); 454 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 455 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 456 457 /* this does assume each counter is 64-bit wide */ 458 for (j = 0; j < adapter->num_tx_queues; j++) { 459 base = (u8 *)&adapter->tqd_start[j].stats; 460 *buf++ = (u64)j; 461 for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) 462 *buf++ = *(u64 *)(base + 463 vmxnet3_tq_dev_stats[i].offset); 464 465 base = (u8 *)&adapter->tx_queue[j].stats; 466 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) 467 *buf++ = *(u64 *)(base + 468 vmxnet3_tq_driver_stats[i].offset); 469 } 470 471 for (j = 0; j < adapter->num_rx_queues; j++) { 472 base = (u8 *)&adapter->rqd_start[j].stats; 473 *buf++ = (u64) j; 474 for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) 475 *buf++ = *(u64 *)(base + 476 vmxnet3_rq_dev_stats[i].offset); 477 478 base = (u8 *)&adapter->rx_queue[j].stats; 479 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) 480 *buf++ = *(u64 *)(base + 481 vmxnet3_rq_driver_stats[i].offset); 482 } 483 484 base = (u8 *)adapter; 485 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) 486 *buf++ = *(u64 *)(base + vmxnet3_global_stats[i].offset); 487 } 488 489 490 /* This is a version 2 of the vmxnet3 ethtool_regs which goes hand in hand with 491 * the version 2 of the vmxnet3 support for ethtool(8) --register-dump. 492 * Therefore, if any registers are added, removed or modified, then a version 493 * bump and a corresponding change in the vmxnet3 support for ethtool(8) 494 * --register-dump would be required. 495 */ 496 static void 497 vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) 498 { 499 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 500 u32 *buf = p; 501 int i = 0, j = 0; 502 503 memset(p, 0, vmxnet3_get_regs_len(netdev)); 504 505 regs->version = 2; 506 507 /* Update vmxnet3_get_regs_len if we want to dump more registers */ 508 509 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS); 510 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS); 511 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DSAL); 512 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DSAH); 513 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 514 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL); 515 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH); 516 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR); 517 buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ECR); 518 519 buf[j++] = adapter->intr.num_intrs; 520 for (i = 0; i < adapter->intr.num_intrs; i++) { 521 buf[j++] = VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_IMR 522 + i * VMXNET3_REG_ALIGN); 523 } 524 525 buf[j++] = adapter->num_tx_queues; 526 for (i = 0; i < adapter->num_tx_queues; i++) { 527 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; 528 529 buf[j++] = VMXNET3_READ_BAR0_REG(adapter, adapter->tx_prod_offset + 530 i * VMXNET3_REG_ALIGN); 531 532 buf[j++] = VMXNET3_GET_ADDR_LO(tq->tx_ring.basePA); 533 buf[j++] = VMXNET3_GET_ADDR_HI(tq->tx_ring.basePA); 534 buf[j++] = tq->tx_ring.size; 535 buf[j++] = tq->tx_ring.next2fill; 536 buf[j++] = tq->tx_ring.next2comp; 537 buf[j++] = tq->tx_ring.gen; 538 539 buf[j++] = VMXNET3_GET_ADDR_LO(tq->data_ring.basePA); 540 buf[j++] = VMXNET3_GET_ADDR_HI(tq->data_ring.basePA); 541 buf[j++] = tq->data_ring.size; 542 buf[j++] = tq->txdata_desc_size; 543 544 buf[j++] = VMXNET3_GET_ADDR_LO(tq->comp_ring.basePA); 545 buf[j++] = VMXNET3_GET_ADDR_HI(tq->comp_ring.basePA); 546 buf[j++] = tq->comp_ring.size; 547 buf[j++] = tq->comp_ring.next2proc; 548 buf[j++] = tq->comp_ring.gen; 549 550 buf[j++] = tq->stopped; 551 } 552 553 buf[j++] = adapter->num_rx_queues; 554 for (i = 0; i < adapter->num_rx_queues; i++) { 555 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; 556 557 buf[j++] = VMXNET3_READ_BAR0_REG(adapter, adapter->rx_prod_offset + 558 i * VMXNET3_REG_ALIGN); 559 buf[j++] = VMXNET3_READ_BAR0_REG(adapter, adapter->rx_prod2_offset + 560 i * VMXNET3_REG_ALIGN); 561 562 buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[0].basePA); 563 buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[0].basePA); 564 buf[j++] = rq->rx_ring[0].size; 565 buf[j++] = rq->rx_ring[0].next2fill; 566 buf[j++] = rq->rx_ring[0].next2comp; 567 buf[j++] = rq->rx_ring[0].gen; 568 569 buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[1].basePA); 570 buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[1].basePA); 571 buf[j++] = rq->rx_ring[1].size; 572 buf[j++] = rq->rx_ring[1].next2fill; 573 buf[j++] = rq->rx_ring[1].next2comp; 574 buf[j++] = rq->rx_ring[1].gen; 575 576 buf[j++] = VMXNET3_GET_ADDR_LO(rq->data_ring.basePA); 577 buf[j++] = VMXNET3_GET_ADDR_HI(rq->data_ring.basePA); 578 buf[j++] = rq->rx_ring[0].size; 579 buf[j++] = rq->data_ring.desc_size; 580 581 buf[j++] = VMXNET3_GET_ADDR_LO(rq->comp_ring.basePA); 582 buf[j++] = VMXNET3_GET_ADDR_HI(rq->comp_ring.basePA); 583 buf[j++] = rq->comp_ring.size; 584 buf[j++] = rq->comp_ring.next2proc; 585 buf[j++] = rq->comp_ring.gen; 586 } 587 } 588 589 590 static void 591 vmxnet3_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 592 { 593 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 594 595 wol->supported = WAKE_UCAST | WAKE_ARP | WAKE_MAGIC; 596 wol->wolopts = adapter->wol; 597 } 598 599 600 static int 601 vmxnet3_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 602 { 603 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 604 605 if (wol->wolopts & (WAKE_PHY | WAKE_MCAST | WAKE_BCAST | 606 WAKE_MAGICSECURE)) { 607 return -EOPNOTSUPP; 608 } 609 610 adapter->wol = wol->wolopts; 611 612 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 613 614 return 0; 615 } 616 617 618 static int 619 vmxnet3_get_link_ksettings(struct net_device *netdev, 620 struct ethtool_link_ksettings *ecmd) 621 { 622 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 623 624 ethtool_link_ksettings_zero_link_mode(ecmd, supported); 625 ethtool_link_ksettings_add_link_mode(ecmd, supported, 10000baseT_Full); 626 ethtool_link_ksettings_add_link_mode(ecmd, supported, 1000baseT_Full); 627 ethtool_link_ksettings_add_link_mode(ecmd, supported, TP); 628 ethtool_link_ksettings_zero_link_mode(ecmd, advertising); 629 ethtool_link_ksettings_add_link_mode(ecmd, advertising, TP); 630 ecmd->base.port = PORT_TP; 631 632 if (adapter->link_speed) { 633 ecmd->base.speed = adapter->link_speed; 634 ecmd->base.duplex = DUPLEX_FULL; 635 } else { 636 ecmd->base.speed = SPEED_UNKNOWN; 637 ecmd->base.duplex = DUPLEX_UNKNOWN; 638 } 639 return 0; 640 } 641 642 static void 643 vmxnet3_get_ringparam(struct net_device *netdev, 644 struct ethtool_ringparam *param, 645 struct kernel_ethtool_ringparam *kernel_param, 646 struct netlink_ext_ack *extack) 647 { 648 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 649 650 param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE; 651 param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE; 652 param->rx_mini_max_pending = VMXNET3_VERSION_GE_3(adapter) ? 653 VMXNET3_RXDATA_DESC_MAX_SIZE : 0; 654 param->rx_jumbo_max_pending = VMXNET3_RX_RING2_MAX_SIZE; 655 656 param->rx_pending = adapter->rx_ring_size; 657 param->tx_pending = adapter->tx_ring_size; 658 param->rx_mini_pending = VMXNET3_VERSION_GE_3(adapter) ? 659 adapter->rxdata_desc_size : 0; 660 param->rx_jumbo_pending = adapter->rx_ring2_size; 661 } 662 663 static int 664 vmxnet3_set_ringparam(struct net_device *netdev, 665 struct ethtool_ringparam *param, 666 struct kernel_ethtool_ringparam *kernel_param, 667 struct netlink_ext_ack *extack) 668 { 669 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 670 u32 new_tx_ring_size, new_rx_ring_size, new_rx_ring2_size; 671 u16 new_rxdata_desc_size; 672 u32 sz; 673 int err = 0; 674 675 if (param->tx_pending == 0 || param->tx_pending > 676 VMXNET3_TX_RING_MAX_SIZE) 677 return -EINVAL; 678 679 if (param->rx_pending == 0 || param->rx_pending > 680 VMXNET3_RX_RING_MAX_SIZE) 681 return -EINVAL; 682 683 if (param->rx_jumbo_pending == 0 || 684 param->rx_jumbo_pending > VMXNET3_RX_RING2_MAX_SIZE) 685 return -EINVAL; 686 687 /* if adapter not yet initialized, do nothing */ 688 if (adapter->rx_buf_per_pkt == 0) { 689 netdev_err(netdev, "adapter not completely initialized, " 690 "ring size cannot be changed yet\n"); 691 return -EOPNOTSUPP; 692 } 693 694 if (VMXNET3_VERSION_GE_3(adapter)) { 695 if (param->rx_mini_pending > VMXNET3_RXDATA_DESC_MAX_SIZE) 696 return -EINVAL; 697 } else if (param->rx_mini_pending != 0) { 698 return -EINVAL; 699 } 700 701 /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */ 702 new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) & 703 ~VMXNET3_RING_SIZE_MASK; 704 new_tx_ring_size = min_t(u32, new_tx_ring_size, 705 VMXNET3_TX_RING_MAX_SIZE); 706 if (new_tx_ring_size > VMXNET3_TX_RING_MAX_SIZE || (new_tx_ring_size % 707 VMXNET3_RING_SIZE_ALIGN) != 0) 708 return -EINVAL; 709 710 /* ring0 has to be a multiple of 711 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN 712 */ 713 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; 714 new_rx_ring_size = (param->rx_pending + sz - 1) / sz * sz; 715 new_rx_ring_size = min_t(u32, new_rx_ring_size, 716 VMXNET3_RX_RING_MAX_SIZE / sz * sz); 717 if (new_rx_ring_size > VMXNET3_RX_RING_MAX_SIZE || (new_rx_ring_size % 718 sz) != 0) 719 return -EINVAL; 720 721 /* ring2 has to be a multiple of VMXNET3_RING_SIZE_ALIGN */ 722 new_rx_ring2_size = (param->rx_jumbo_pending + VMXNET3_RING_SIZE_MASK) & 723 ~VMXNET3_RING_SIZE_MASK; 724 new_rx_ring2_size = min_t(u32, new_rx_ring2_size, 725 VMXNET3_RX_RING2_MAX_SIZE); 726 727 /* For v7 and later, keep ring size power of 2 for UPT */ 728 if (VMXNET3_VERSION_GE_7(adapter)) { 729 new_tx_ring_size = rounddown_pow_of_two(new_tx_ring_size); 730 new_rx_ring_size = rounddown_pow_of_two(new_rx_ring_size); 731 new_rx_ring2_size = rounddown_pow_of_two(new_rx_ring2_size); 732 } 733 734 /* rx data ring buffer size has to be a multiple of 735 * VMXNET3_RXDATA_DESC_SIZE_ALIGN 736 */ 737 new_rxdata_desc_size = 738 (param->rx_mini_pending + VMXNET3_RXDATA_DESC_SIZE_MASK) & 739 ~VMXNET3_RXDATA_DESC_SIZE_MASK; 740 new_rxdata_desc_size = min_t(u16, new_rxdata_desc_size, 741 VMXNET3_RXDATA_DESC_MAX_SIZE); 742 743 if (new_tx_ring_size == adapter->tx_ring_size && 744 new_rx_ring_size == adapter->rx_ring_size && 745 new_rx_ring2_size == adapter->rx_ring2_size && 746 new_rxdata_desc_size == adapter->rxdata_desc_size) { 747 return 0; 748 } 749 750 /* 751 * Reset_work may be in the middle of resetting the device, wait for its 752 * completion. 753 */ 754 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) 755 usleep_range(1000, 2000); 756 757 if (netif_running(netdev)) { 758 vmxnet3_quiesce_dev(adapter); 759 vmxnet3_reset_dev(adapter); 760 761 /* recreate the rx queue and the tx queue based on the 762 * new sizes */ 763 vmxnet3_tq_destroy_all(adapter); 764 vmxnet3_rq_destroy_all(adapter); 765 766 err = vmxnet3_create_queues(adapter, new_tx_ring_size, 767 new_rx_ring_size, new_rx_ring2_size, 768 adapter->txdata_desc_size, 769 new_rxdata_desc_size); 770 if (err) { 771 /* failed, most likely because of OOM, try default 772 * size */ 773 netdev_err(netdev, "failed to apply new sizes, " 774 "try the default ones\n"); 775 new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; 776 new_rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE; 777 new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE; 778 new_rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ? 779 VMXNET3_DEF_RXDATA_DESC_SIZE : 0; 780 781 err = vmxnet3_create_queues(adapter, 782 new_tx_ring_size, 783 new_rx_ring_size, 784 new_rx_ring2_size, 785 adapter->txdata_desc_size, 786 new_rxdata_desc_size); 787 if (err) { 788 netdev_err(netdev, "failed to create queues " 789 "with default sizes. Closing it\n"); 790 goto out; 791 } 792 } 793 794 err = vmxnet3_activate_dev(adapter); 795 if (err) 796 netdev_err(netdev, "failed to re-activate, error %d." 797 " Closing it\n", err); 798 } 799 adapter->tx_ring_size = new_tx_ring_size; 800 adapter->rx_ring_size = new_rx_ring_size; 801 adapter->rx_ring2_size = new_rx_ring2_size; 802 adapter->rxdata_desc_size = new_rxdata_desc_size; 803 804 out: 805 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 806 if (err) 807 vmxnet3_force_close(adapter); 808 809 return err; 810 } 811 812 static int 813 vmxnet3_get_rss_hash_opts(struct vmxnet3_adapter *adapter, 814 struct ethtool_rxnfc *info) 815 { 816 enum Vmxnet3_RSSField rss_fields; 817 818 if (netif_running(adapter->netdev)) { 819 unsigned long flags; 820 821 spin_lock_irqsave(&adapter->cmd_lock, flags); 822 823 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 824 VMXNET3_CMD_GET_RSS_FIELDS); 825 rss_fields = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 826 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 827 } else { 828 rss_fields = adapter->rss_fields; 829 } 830 831 info->data = 0; 832 833 /* Report default options for RSS on vmxnet3 */ 834 switch (info->flow_type) { 835 case TCP_V4_FLOW: 836 case TCP_V6_FLOW: 837 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3 | 838 RXH_IP_SRC | RXH_IP_DST; 839 break; 840 case UDP_V4_FLOW: 841 if (rss_fields & VMXNET3_RSS_FIELDS_UDPIP4) 842 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 843 info->data |= RXH_IP_SRC | RXH_IP_DST; 844 break; 845 case AH_ESP_V4_FLOW: 846 case AH_V4_FLOW: 847 case ESP_V4_FLOW: 848 if (rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) 849 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 850 fallthrough; 851 case SCTP_V4_FLOW: 852 case IPV4_FLOW: 853 info->data |= RXH_IP_SRC | RXH_IP_DST; 854 break; 855 case UDP_V6_FLOW: 856 if (rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) 857 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 858 info->data |= RXH_IP_SRC | RXH_IP_DST; 859 break; 860 case AH_ESP_V6_FLOW: 861 case AH_V6_FLOW: 862 case ESP_V6_FLOW: 863 if (VMXNET3_VERSION_GE_6(adapter) && 864 (rss_fields & VMXNET3_RSS_FIELDS_ESPIP6)) 865 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; 866 fallthrough; 867 case SCTP_V6_FLOW: 868 case IPV6_FLOW: 869 info->data |= RXH_IP_SRC | RXH_IP_DST; 870 break; 871 default: 872 return -EINVAL; 873 } 874 875 return 0; 876 } 877 878 static int 879 vmxnet3_set_rss_hash_opt(struct net_device *netdev, 880 struct vmxnet3_adapter *adapter, 881 struct ethtool_rxnfc *nfc) 882 { 883 enum Vmxnet3_RSSField rss_fields = adapter->rss_fields; 884 885 /* RSS does not support anything other than hashing 886 * to queues on src and dst IPs and ports 887 */ 888 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | 889 RXH_L4_B_0_1 | RXH_L4_B_2_3)) 890 return -EINVAL; 891 892 switch (nfc->flow_type) { 893 case TCP_V4_FLOW: 894 case TCP_V6_FLOW: 895 if (!(nfc->data & RXH_IP_SRC) || 896 !(nfc->data & RXH_IP_DST) || 897 !(nfc->data & RXH_L4_B_0_1) || 898 !(nfc->data & RXH_L4_B_2_3)) 899 return -EINVAL; 900 break; 901 case UDP_V4_FLOW: 902 if (!(nfc->data & RXH_IP_SRC) || 903 !(nfc->data & RXH_IP_DST)) 904 return -EINVAL; 905 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 906 case 0: 907 rss_fields &= ~VMXNET3_RSS_FIELDS_UDPIP4; 908 break; 909 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 910 rss_fields |= VMXNET3_RSS_FIELDS_UDPIP4; 911 break; 912 default: 913 return -EINVAL; 914 } 915 break; 916 case UDP_V6_FLOW: 917 if (!(nfc->data & RXH_IP_SRC) || 918 !(nfc->data & RXH_IP_DST)) 919 return -EINVAL; 920 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 921 case 0: 922 rss_fields &= ~VMXNET3_RSS_FIELDS_UDPIP6; 923 break; 924 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 925 rss_fields |= VMXNET3_RSS_FIELDS_UDPIP6; 926 break; 927 default: 928 return -EINVAL; 929 } 930 break; 931 case ESP_V4_FLOW: 932 case AH_V4_FLOW: 933 case AH_ESP_V4_FLOW: 934 if (!(nfc->data & RXH_IP_SRC) || 935 !(nfc->data & RXH_IP_DST)) 936 return -EINVAL; 937 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 938 case 0: 939 rss_fields &= ~VMXNET3_RSS_FIELDS_ESPIP4; 940 break; 941 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 942 rss_fields |= VMXNET3_RSS_FIELDS_ESPIP4; 943 break; 944 default: 945 return -EINVAL; 946 } 947 break; 948 case ESP_V6_FLOW: 949 case AH_V6_FLOW: 950 case AH_ESP_V6_FLOW: 951 if (!VMXNET3_VERSION_GE_6(adapter)) 952 return -EOPNOTSUPP; 953 if (!(nfc->data & RXH_IP_SRC) || 954 !(nfc->data & RXH_IP_DST)) 955 return -EINVAL; 956 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 957 case 0: 958 rss_fields &= ~VMXNET3_RSS_FIELDS_ESPIP6; 959 break; 960 case (RXH_L4_B_0_1 | RXH_L4_B_2_3): 961 rss_fields |= VMXNET3_RSS_FIELDS_ESPIP6; 962 break; 963 default: 964 return -EINVAL; 965 } 966 break; 967 case SCTP_V4_FLOW: 968 case SCTP_V6_FLOW: 969 if (!(nfc->data & RXH_IP_SRC) || 970 !(nfc->data & RXH_IP_DST) || 971 (nfc->data & RXH_L4_B_0_1) || 972 (nfc->data & RXH_L4_B_2_3)) 973 return -EINVAL; 974 break; 975 default: 976 return -EINVAL; 977 } 978 979 /* if we changed something we need to update flags */ 980 if (rss_fields != adapter->rss_fields) { 981 adapter->default_rss_fields = false; 982 if (netif_running(netdev)) { 983 struct Vmxnet3_DriverShared *shared = adapter->shared; 984 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo; 985 unsigned long flags; 986 987 if (VMXNET3_VERSION_GE_7(adapter)) { 988 if ((rss_fields & VMXNET3_RSS_FIELDS_UDPIP4 || 989 rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) && 990 vmxnet3_check_ptcapability(adapter->ptcap_supported[0], 991 VMXNET3_CAP_UDP_RSS)) { 992 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_UDP_RSS; 993 } else { 994 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_UDP_RSS); 995 } 996 if ((rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) && 997 vmxnet3_check_ptcapability(adapter->ptcap_supported[0], 998 VMXNET3_CAP_ESP_RSS_IPV4)) { 999 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV4; 1000 } else { 1001 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV4); 1002 } 1003 if ((rss_fields & VMXNET3_RSS_FIELDS_ESPIP6) && 1004 vmxnet3_check_ptcapability(adapter->ptcap_supported[0], 1005 VMXNET3_CAP_ESP_RSS_IPV6)) { 1006 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV6; 1007 } else { 1008 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV6); 1009 } 1010 1011 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, 1012 adapter->dev_caps[0]); 1013 spin_lock_irqsave(&adapter->cmd_lock, flags); 1014 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1015 VMXNET3_CMD_GET_DCR0_REG); 1016 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, 1017 VMXNET3_REG_CMD); 1018 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 1019 } 1020 spin_lock_irqsave(&adapter->cmd_lock, flags); 1021 cmdInfo->setRssFields = rss_fields; 1022 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1023 VMXNET3_CMD_SET_RSS_FIELDS); 1024 1025 /* Not all requested RSS may get applied, so get and 1026 * cache what was actually applied. 1027 */ 1028 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1029 VMXNET3_CMD_GET_RSS_FIELDS); 1030 adapter->rss_fields = 1031 VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 1032 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 1033 } else { 1034 /* When the device is activated, we will try to apply 1035 * these rules and cache the applied value later. 1036 */ 1037 adapter->rss_fields = rss_fields; 1038 } 1039 } 1040 return 0; 1041 } 1042 1043 static int 1044 vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, 1045 u32 *rules) 1046 { 1047 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1048 int err = 0; 1049 1050 switch (info->cmd) { 1051 case ETHTOOL_GRXRINGS: 1052 info->data = adapter->num_rx_queues; 1053 break; 1054 case ETHTOOL_GRXFH: 1055 if (!VMXNET3_VERSION_GE_4(adapter)) { 1056 err = -EOPNOTSUPP; 1057 break; 1058 } 1059 #ifdef VMXNET3_RSS 1060 if (!adapter->rss) { 1061 err = -EOPNOTSUPP; 1062 break; 1063 } 1064 #endif 1065 err = vmxnet3_get_rss_hash_opts(adapter, info); 1066 break; 1067 default: 1068 err = -EOPNOTSUPP; 1069 break; 1070 } 1071 1072 return err; 1073 } 1074 1075 static int 1076 vmxnet3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info) 1077 { 1078 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1079 int err = 0; 1080 1081 if (!VMXNET3_VERSION_GE_4(adapter)) { 1082 err = -EOPNOTSUPP; 1083 goto done; 1084 } 1085 #ifdef VMXNET3_RSS 1086 if (!adapter->rss) { 1087 err = -EOPNOTSUPP; 1088 goto done; 1089 } 1090 #endif 1091 1092 switch (info->cmd) { 1093 case ETHTOOL_SRXFH: 1094 err = vmxnet3_set_rss_hash_opt(netdev, adapter, info); 1095 break; 1096 default: 1097 err = -EOPNOTSUPP; 1098 break; 1099 } 1100 1101 done: 1102 return err; 1103 } 1104 1105 #ifdef VMXNET3_RSS 1106 static u32 1107 vmxnet3_get_rss_indir_size(struct net_device *netdev) 1108 { 1109 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1110 struct UPT1_RSSConf *rssConf = adapter->rss_conf; 1111 1112 return rssConf->indTableSize; 1113 } 1114 1115 static int 1116 vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc) 1117 { 1118 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1119 struct UPT1_RSSConf *rssConf = adapter->rss_conf; 1120 unsigned int n = rssConf->indTableSize; 1121 1122 if (hfunc) 1123 *hfunc = ETH_RSS_HASH_TOP; 1124 if (!p) 1125 return 0; 1126 if (n > UPT1_RSS_MAX_IND_TABLE_SIZE) 1127 return 0; 1128 while (n--) 1129 p[n] = rssConf->indTable[n]; 1130 return 0; 1131 1132 } 1133 1134 static int 1135 vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key, 1136 const u8 hfunc) 1137 { 1138 unsigned int i; 1139 unsigned long flags; 1140 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1141 struct UPT1_RSSConf *rssConf = adapter->rss_conf; 1142 1143 /* We do not allow change in unsupported parameters */ 1144 if (key || 1145 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) 1146 return -EOPNOTSUPP; 1147 if (!p) 1148 return 0; 1149 for (i = 0; i < rssConf->indTableSize; i++) 1150 rssConf->indTable[i] = p[i]; 1151 1152 spin_lock_irqsave(&adapter->cmd_lock, flags); 1153 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1154 VMXNET3_CMD_UPDATE_RSSIDT); 1155 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 1156 1157 return 0; 1158 1159 } 1160 #endif 1161 1162 static int vmxnet3_get_coalesce(struct net_device *netdev, 1163 struct ethtool_coalesce *ec, 1164 struct kernel_ethtool_coalesce *kernel_coal, 1165 struct netlink_ext_ack *extack) 1166 { 1167 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1168 1169 if (!VMXNET3_VERSION_GE_3(adapter)) 1170 return -EOPNOTSUPP; 1171 1172 switch (adapter->coal_conf->coalMode) { 1173 case VMXNET3_COALESCE_DISABLED: 1174 /* struct ethtool_coalesce is already initialized to 0 */ 1175 break; 1176 case VMXNET3_COALESCE_ADAPT: 1177 ec->use_adaptive_rx_coalesce = true; 1178 break; 1179 case VMXNET3_COALESCE_STATIC: 1180 ec->tx_max_coalesced_frames = 1181 adapter->coal_conf->coalPara.coalStatic.tx_comp_depth; 1182 ec->rx_max_coalesced_frames = 1183 adapter->coal_conf->coalPara.coalStatic.rx_depth; 1184 break; 1185 case VMXNET3_COALESCE_RBC: { 1186 u32 rbc_rate; 1187 1188 rbc_rate = adapter->coal_conf->coalPara.coalRbc.rbc_rate; 1189 ec->rx_coalesce_usecs = VMXNET3_COAL_RBC_USECS(rbc_rate); 1190 } 1191 break; 1192 default: 1193 return -EOPNOTSUPP; 1194 } 1195 1196 return 0; 1197 } 1198 1199 static int vmxnet3_set_coalesce(struct net_device *netdev, 1200 struct ethtool_coalesce *ec, 1201 struct kernel_ethtool_coalesce *kernel_coal, 1202 struct netlink_ext_ack *extack) 1203 { 1204 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1205 struct Vmxnet3_DriverShared *shared = adapter->shared; 1206 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo; 1207 unsigned long flags; 1208 1209 if (!VMXNET3_VERSION_GE_3(adapter)) 1210 return -EOPNOTSUPP; 1211 1212 if ((ec->rx_coalesce_usecs == 0) && 1213 (ec->use_adaptive_rx_coalesce == 0) && 1214 (ec->tx_max_coalesced_frames == 0) && 1215 (ec->rx_max_coalesced_frames == 0)) { 1216 memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf)); 1217 adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED; 1218 goto done; 1219 } 1220 1221 if (ec->rx_coalesce_usecs != 0) { 1222 u32 rbc_rate; 1223 1224 if ((ec->use_adaptive_rx_coalesce != 0) || 1225 (ec->tx_max_coalesced_frames != 0) || 1226 (ec->rx_max_coalesced_frames != 0)) { 1227 return -EINVAL; 1228 } 1229 1230 rbc_rate = VMXNET3_COAL_RBC_RATE(ec->rx_coalesce_usecs); 1231 if (rbc_rate < VMXNET3_COAL_RBC_MIN_RATE || 1232 rbc_rate > VMXNET3_COAL_RBC_MAX_RATE) { 1233 return -EINVAL; 1234 } 1235 1236 memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf)); 1237 adapter->coal_conf->coalMode = VMXNET3_COALESCE_RBC; 1238 adapter->coal_conf->coalPara.coalRbc.rbc_rate = rbc_rate; 1239 goto done; 1240 } 1241 1242 if (ec->use_adaptive_rx_coalesce != 0) { 1243 if (ec->tx_max_coalesced_frames != 0 || 1244 ec->rx_max_coalesced_frames != 0) { 1245 return -EINVAL; 1246 } 1247 memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf)); 1248 adapter->coal_conf->coalMode = VMXNET3_COALESCE_ADAPT; 1249 goto done; 1250 } 1251 1252 if ((ec->tx_max_coalesced_frames != 0) || 1253 (ec->rx_max_coalesced_frames != 0)) { 1254 if ((ec->tx_max_coalesced_frames > 1255 VMXNET3_COAL_STATIC_MAX_DEPTH) || 1256 (ec->rx_max_coalesced_frames > 1257 VMXNET3_COAL_STATIC_MAX_DEPTH)) { 1258 return -EINVAL; 1259 } 1260 1261 memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf)); 1262 adapter->coal_conf->coalMode = VMXNET3_COALESCE_STATIC; 1263 1264 adapter->coal_conf->coalPara.coalStatic.tx_comp_depth = 1265 (ec->tx_max_coalesced_frames ? 1266 ec->tx_max_coalesced_frames : 1267 VMXNET3_COAL_STATIC_DEFAULT_DEPTH); 1268 1269 adapter->coal_conf->coalPara.coalStatic.rx_depth = 1270 (ec->rx_max_coalesced_frames ? 1271 ec->rx_max_coalesced_frames : 1272 VMXNET3_COAL_STATIC_DEFAULT_DEPTH); 1273 1274 adapter->coal_conf->coalPara.coalStatic.tx_depth = 1275 VMXNET3_COAL_STATIC_DEFAULT_DEPTH; 1276 goto done; 1277 } 1278 1279 done: 1280 adapter->default_coal_mode = false; 1281 if (netif_running(netdev)) { 1282 spin_lock_irqsave(&adapter->cmd_lock, flags); 1283 cmdInfo->varConf.confVer = 1; 1284 cmdInfo->varConf.confLen = 1285 cpu_to_le32(sizeof(*adapter->coal_conf)); 1286 cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa); 1287 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1288 VMXNET3_CMD_SET_COALESCE); 1289 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 1290 } 1291 1292 return 0; 1293 } 1294 1295 static void vmxnet3_get_channels(struct net_device *netdev, 1296 struct ethtool_channels *ec) 1297 { 1298 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1299 1300 if (IS_ENABLED(CONFIG_PCI_MSI) && adapter->intr.type == VMXNET3_IT_MSIX) { 1301 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) { 1302 ec->combined_count = adapter->num_tx_queues; 1303 } else { 1304 ec->rx_count = adapter->num_rx_queues; 1305 ec->tx_count = 1306 adapter->share_intr == VMXNET3_INTR_TXSHARE ? 1307 1 : adapter->num_tx_queues; 1308 } 1309 } else { 1310 ec->combined_count = 1; 1311 } 1312 1313 ec->other_count = 1; 1314 1315 /* Number of interrupts cannot be changed on the fly */ 1316 /* Just set maximums to actual values */ 1317 ec->max_rx = ec->rx_count; 1318 ec->max_tx = ec->tx_count; 1319 ec->max_combined = ec->combined_count; 1320 ec->max_other = ec->other_count; 1321 } 1322 1323 static const struct ethtool_ops vmxnet3_ethtool_ops = { 1324 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | 1325 ETHTOOL_COALESCE_MAX_FRAMES | 1326 ETHTOOL_COALESCE_USE_ADAPTIVE_RX, 1327 .get_drvinfo = vmxnet3_get_drvinfo, 1328 .get_regs_len = vmxnet3_get_regs_len, 1329 .get_regs = vmxnet3_get_regs, 1330 .get_wol = vmxnet3_get_wol, 1331 .set_wol = vmxnet3_set_wol, 1332 .get_link = ethtool_op_get_link, 1333 .get_coalesce = vmxnet3_get_coalesce, 1334 .set_coalesce = vmxnet3_set_coalesce, 1335 .get_strings = vmxnet3_get_strings, 1336 .get_sset_count = vmxnet3_get_sset_count, 1337 .get_ethtool_stats = vmxnet3_get_ethtool_stats, 1338 .get_ringparam = vmxnet3_get_ringparam, 1339 .set_ringparam = vmxnet3_set_ringparam, 1340 .get_rxnfc = vmxnet3_get_rxnfc, 1341 .set_rxnfc = vmxnet3_set_rxnfc, 1342 #ifdef VMXNET3_RSS 1343 .get_rxfh_indir_size = vmxnet3_get_rss_indir_size, 1344 .get_rxfh = vmxnet3_get_rss, 1345 .set_rxfh = vmxnet3_set_rss, 1346 #endif 1347 .get_link_ksettings = vmxnet3_get_link_ksettings, 1348 .get_channels = vmxnet3_get_channels, 1349 }; 1350 1351 void vmxnet3_set_ethtool_ops(struct net_device *netdev) 1352 { 1353 netdev->ethtool_ops = &vmxnet3_ethtool_ops; 1354 } 1355