1d1a890faSShreyas Bhatewara /* 2d1a890faSShreyas Bhatewara * Linux driver for VMware's vmxnet3 ethernet NIC. 3d1a890faSShreyas Bhatewara * 4d1a890faSShreyas Bhatewara * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. 5d1a890faSShreyas Bhatewara * 6d1a890faSShreyas Bhatewara * This program is free software; you can redistribute it and/or modify it 7d1a890faSShreyas Bhatewara * under the terms of the GNU General Public License as published by the 8d1a890faSShreyas Bhatewara * Free Software Foundation; version 2 of the License and no later version. 9d1a890faSShreyas Bhatewara * 10d1a890faSShreyas Bhatewara * This program is distributed in the hope that it will be useful, but 11d1a890faSShreyas Bhatewara * WITHOUT ANY WARRANTY; without even the implied warranty of 12d1a890faSShreyas Bhatewara * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 13d1a890faSShreyas Bhatewara * NON INFRINGEMENT. See the GNU General Public License for more 14d1a890faSShreyas Bhatewara * details. 15d1a890faSShreyas Bhatewara * 16d1a890faSShreyas Bhatewara * You should have received a copy of the GNU General Public License 17d1a890faSShreyas Bhatewara * along with this program; if not, write to the Free Software 18d1a890faSShreyas Bhatewara * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19d1a890faSShreyas Bhatewara * 20d1a890faSShreyas Bhatewara * The full GNU General Public License is included in this distribution in 21d1a890faSShreyas Bhatewara * the file called "COPYING". 22d1a890faSShreyas Bhatewara * 23d1a890faSShreyas Bhatewara * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> 24d1a890faSShreyas Bhatewara * 25d1a890faSShreyas Bhatewara */ 26d1a890faSShreyas Bhatewara 279d9779e7SPaul Gortmaker #include <linux/module.h> 28b038b040SStephen Rothwell #include <net/ip6_checksum.h> 29b038b040SStephen Rothwell 30d1a890faSShreyas Bhatewara #include "vmxnet3_int.h" 31d1a890faSShreyas Bhatewara 32d1a890faSShreyas Bhatewara char vmxnet3_driver_name[] = "vmxnet3"; 33d1a890faSShreyas Bhatewara #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver" 34d1a890faSShreyas Bhatewara 35d1a890faSShreyas Bhatewara /* 36d1a890faSShreyas Bhatewara * PCI Device ID Table 37d1a890faSShreyas Bhatewara * Last entry must be all 0s 38d1a890faSShreyas Bhatewara */ 399baa3c34SBenoit Taine static const struct pci_device_id vmxnet3_pciid_table[] = { 40d1a890faSShreyas Bhatewara {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)}, 41d1a890faSShreyas Bhatewara {0} 42d1a890faSShreyas Bhatewara }; 43d1a890faSShreyas Bhatewara 44d1a890faSShreyas Bhatewara MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table); 45d1a890faSShreyas Bhatewara 4609c5088eSShreyas Bhatewara static int enable_mq = 1; 47d1a890faSShreyas Bhatewara 48f9f25026SShreyas Bhatewara static void 49f9f25026SShreyas Bhatewara vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac); 50f9f25026SShreyas Bhatewara 51d1a890faSShreyas Bhatewara /* 52d1a890faSShreyas Bhatewara * Enable/Disable the given intr 53d1a890faSShreyas Bhatewara */ 54d1a890faSShreyas Bhatewara static void 55d1a890faSShreyas Bhatewara vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx) 56d1a890faSShreyas Bhatewara { 57d1a890faSShreyas Bhatewara VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0); 58d1a890faSShreyas Bhatewara } 59d1a890faSShreyas Bhatewara 60d1a890faSShreyas Bhatewara 61d1a890faSShreyas Bhatewara static void 62d1a890faSShreyas Bhatewara vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx) 63d1a890faSShreyas Bhatewara { 64d1a890faSShreyas Bhatewara VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1); 65d1a890faSShreyas Bhatewara } 66d1a890faSShreyas Bhatewara 67d1a890faSShreyas Bhatewara 68d1a890faSShreyas Bhatewara /* 69d1a890faSShreyas Bhatewara * Enable/Disable all intrs used by the device 70d1a890faSShreyas Bhatewara */ 71d1a890faSShreyas Bhatewara static void 72d1a890faSShreyas Bhatewara vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter) 73d1a890faSShreyas Bhatewara { 74d1a890faSShreyas Bhatewara int i; 75d1a890faSShreyas Bhatewara 76d1a890faSShreyas Bhatewara for (i = 0; i < adapter->intr.num_intrs; i++) 77d1a890faSShreyas Bhatewara vmxnet3_enable_intr(adapter, i); 786929fe8aSRonghua Zang adapter->shared->devRead.intrConf.intrCtrl &= 796929fe8aSRonghua Zang cpu_to_le32(~VMXNET3_IC_DISABLE_ALL); 80d1a890faSShreyas Bhatewara } 81d1a890faSShreyas Bhatewara 82d1a890faSShreyas Bhatewara 83d1a890faSShreyas Bhatewara static void 84d1a890faSShreyas Bhatewara vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter) 85d1a890faSShreyas Bhatewara { 86d1a890faSShreyas Bhatewara int i; 87d1a890faSShreyas Bhatewara 886929fe8aSRonghua Zang adapter->shared->devRead.intrConf.intrCtrl |= 896929fe8aSRonghua Zang cpu_to_le32(VMXNET3_IC_DISABLE_ALL); 90d1a890faSShreyas Bhatewara for (i = 0; i < adapter->intr.num_intrs; i++) 91d1a890faSShreyas Bhatewara vmxnet3_disable_intr(adapter, i); 92d1a890faSShreyas Bhatewara } 93d1a890faSShreyas Bhatewara 94d1a890faSShreyas Bhatewara 95d1a890faSShreyas Bhatewara static void 96d1a890faSShreyas Bhatewara vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events) 97d1a890faSShreyas Bhatewara { 98d1a890faSShreyas Bhatewara VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events); 99d1a890faSShreyas Bhatewara } 100d1a890faSShreyas Bhatewara 101d1a890faSShreyas Bhatewara 102d1a890faSShreyas Bhatewara static bool 103d1a890faSShreyas Bhatewara vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 104d1a890faSShreyas Bhatewara { 10509c5088eSShreyas Bhatewara return tq->stopped; 106d1a890faSShreyas Bhatewara } 107d1a890faSShreyas Bhatewara 108d1a890faSShreyas Bhatewara 109d1a890faSShreyas Bhatewara static void 110d1a890faSShreyas Bhatewara vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 111d1a890faSShreyas Bhatewara { 112d1a890faSShreyas Bhatewara tq->stopped = false; 11309c5088eSShreyas Bhatewara netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue); 114d1a890faSShreyas Bhatewara } 115d1a890faSShreyas Bhatewara 116d1a890faSShreyas Bhatewara 117d1a890faSShreyas Bhatewara static void 118d1a890faSShreyas Bhatewara vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 119d1a890faSShreyas Bhatewara { 120d1a890faSShreyas Bhatewara tq->stopped = false; 12109c5088eSShreyas Bhatewara netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue)); 122d1a890faSShreyas Bhatewara } 123d1a890faSShreyas Bhatewara 124d1a890faSShreyas Bhatewara 125d1a890faSShreyas Bhatewara static void 126d1a890faSShreyas Bhatewara vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 127d1a890faSShreyas Bhatewara { 128d1a890faSShreyas Bhatewara tq->stopped = true; 129d1a890faSShreyas Bhatewara tq->num_stop++; 13009c5088eSShreyas Bhatewara netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue)); 131d1a890faSShreyas Bhatewara } 132d1a890faSShreyas Bhatewara 133d1a890faSShreyas Bhatewara 134d1a890faSShreyas Bhatewara /* 135d1a890faSShreyas Bhatewara * Check the link state. This may start or stop the tx queue. 136d1a890faSShreyas Bhatewara */ 137d1a890faSShreyas Bhatewara static void 1384a1745fcSShreyas Bhatewara vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) 139d1a890faSShreyas Bhatewara { 140d1a890faSShreyas Bhatewara u32 ret; 14109c5088eSShreyas Bhatewara int i; 14283d0feffSShreyas Bhatewara unsigned long flags; 143d1a890faSShreyas Bhatewara 14483d0feffSShreyas Bhatewara spin_lock_irqsave(&adapter->cmd_lock, flags); 145d1a890faSShreyas Bhatewara VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); 146d1a890faSShreyas Bhatewara ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 14783d0feffSShreyas Bhatewara spin_unlock_irqrestore(&adapter->cmd_lock, flags); 14883d0feffSShreyas Bhatewara 149d1a890faSShreyas Bhatewara adapter->link_speed = ret >> 16; 150d1a890faSShreyas Bhatewara if (ret & 1) { /* Link is up. */ 151204a6e65SStephen Hemminger netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n", 152204a6e65SStephen Hemminger adapter->link_speed); 153d1a890faSShreyas Bhatewara netif_carrier_on(adapter->netdev); 154d1a890faSShreyas Bhatewara 15509c5088eSShreyas Bhatewara if (affectTxQueue) { 15609c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_tx_queues; i++) 15709c5088eSShreyas Bhatewara vmxnet3_tq_start(&adapter->tx_queue[i], 15809c5088eSShreyas Bhatewara adapter); 15909c5088eSShreyas Bhatewara } 160d1a890faSShreyas Bhatewara } else { 161204a6e65SStephen Hemminger netdev_info(adapter->netdev, "NIC Link is Down\n"); 162d1a890faSShreyas Bhatewara netif_carrier_off(adapter->netdev); 163d1a890faSShreyas Bhatewara 16409c5088eSShreyas Bhatewara if (affectTxQueue) { 16509c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_tx_queues; i++) 16609c5088eSShreyas Bhatewara vmxnet3_tq_stop(&adapter->tx_queue[i], adapter); 16709c5088eSShreyas Bhatewara } 168d1a890faSShreyas Bhatewara } 169d1a890faSShreyas Bhatewara } 170d1a890faSShreyas Bhatewara 171d1a890faSShreyas Bhatewara static void 172d1a890faSShreyas Bhatewara vmxnet3_process_events(struct vmxnet3_adapter *adapter) 173d1a890faSShreyas Bhatewara { 17409c5088eSShreyas Bhatewara int i; 175e328d410SRoland Dreier unsigned long flags; 176115924b6SShreyas Bhatewara u32 events = le32_to_cpu(adapter->shared->ecr); 177d1a890faSShreyas Bhatewara if (!events) 178d1a890faSShreyas Bhatewara return; 179d1a890faSShreyas Bhatewara 180d1a890faSShreyas Bhatewara vmxnet3_ack_events(adapter, events); 181d1a890faSShreyas Bhatewara 182d1a890faSShreyas Bhatewara /* Check if link state has changed */ 183d1a890faSShreyas Bhatewara if (events & VMXNET3_ECR_LINK) 1844a1745fcSShreyas Bhatewara vmxnet3_check_link(adapter, true); 185d1a890faSShreyas Bhatewara 186d1a890faSShreyas Bhatewara /* Check if there is an error on xmit/recv queues */ 187d1a890faSShreyas Bhatewara if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { 188e328d410SRoland Dreier spin_lock_irqsave(&adapter->cmd_lock, flags); 189d1a890faSShreyas Bhatewara VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 190d1a890faSShreyas Bhatewara VMXNET3_CMD_GET_QUEUE_STATUS); 191e328d410SRoland Dreier spin_unlock_irqrestore(&adapter->cmd_lock, flags); 192d1a890faSShreyas Bhatewara 19309c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_tx_queues; i++) 19409c5088eSShreyas Bhatewara if (adapter->tqd_start[i].status.stopped) 19509c5088eSShreyas Bhatewara dev_err(&adapter->netdev->dev, 19609c5088eSShreyas Bhatewara "%s: tq[%d] error 0x%x\n", 19709c5088eSShreyas Bhatewara adapter->netdev->name, i, le32_to_cpu( 19809c5088eSShreyas Bhatewara adapter->tqd_start[i].status.error)); 19909c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_rx_queues; i++) 20009c5088eSShreyas Bhatewara if (adapter->rqd_start[i].status.stopped) 20109c5088eSShreyas Bhatewara dev_err(&adapter->netdev->dev, 20209c5088eSShreyas Bhatewara "%s: rq[%d] error 0x%x\n", 20309c5088eSShreyas Bhatewara adapter->netdev->name, i, 20409c5088eSShreyas Bhatewara adapter->rqd_start[i].status.error); 205d1a890faSShreyas Bhatewara 206d1a890faSShreyas Bhatewara schedule_work(&adapter->work); 207d1a890faSShreyas Bhatewara } 208d1a890faSShreyas Bhatewara } 209d1a890faSShreyas Bhatewara 210115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD 211115924b6SShreyas Bhatewara /* 212115924b6SShreyas Bhatewara * The device expects the bitfields in shared structures to be written in 213115924b6SShreyas Bhatewara * little endian. When CPU is big endian, the following routines are used to 214115924b6SShreyas Bhatewara * correctly read and write into ABI. 215115924b6SShreyas Bhatewara * The general technique used here is : double word bitfields are defined in 216115924b6SShreyas Bhatewara * opposite order for big endian architecture. Then before reading them in 217115924b6SShreyas Bhatewara * driver the complete double word is translated using le32_to_cpu. Similarly 218115924b6SShreyas Bhatewara * After the driver writes into bitfields, cpu_to_le32 is used to translate the 219115924b6SShreyas Bhatewara * double words into required format. 220115924b6SShreyas Bhatewara * In order to avoid touching bits in shared structure more than once, temporary 221115924b6SShreyas Bhatewara * descriptors are used. These are passed as srcDesc to following functions. 222115924b6SShreyas Bhatewara */ 223115924b6SShreyas Bhatewara static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc, 224115924b6SShreyas Bhatewara struct Vmxnet3_RxDesc *dstDesc) 225115924b6SShreyas Bhatewara { 226115924b6SShreyas Bhatewara u32 *src = (u32 *)srcDesc + 2; 227115924b6SShreyas Bhatewara u32 *dst = (u32 *)dstDesc + 2; 228115924b6SShreyas Bhatewara dstDesc->addr = le64_to_cpu(srcDesc->addr); 229115924b6SShreyas Bhatewara *dst = le32_to_cpu(*src); 230115924b6SShreyas Bhatewara dstDesc->ext1 = le32_to_cpu(srcDesc->ext1); 231115924b6SShreyas Bhatewara } 232115924b6SShreyas Bhatewara 233115924b6SShreyas Bhatewara static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc, 234115924b6SShreyas Bhatewara struct Vmxnet3_TxDesc *dstDesc) 235115924b6SShreyas Bhatewara { 236115924b6SShreyas Bhatewara int i; 237115924b6SShreyas Bhatewara u32 *src = (u32 *)(srcDesc + 1); 238115924b6SShreyas Bhatewara u32 *dst = (u32 *)(dstDesc + 1); 239115924b6SShreyas Bhatewara 240115924b6SShreyas Bhatewara /* Working backwards so that the gen bit is set at the end. */ 241115924b6SShreyas Bhatewara for (i = 2; i > 0; i--) { 242115924b6SShreyas Bhatewara src--; 243115924b6SShreyas Bhatewara dst--; 244115924b6SShreyas Bhatewara *dst = cpu_to_le32(*src); 245115924b6SShreyas Bhatewara } 246115924b6SShreyas Bhatewara } 247115924b6SShreyas Bhatewara 248115924b6SShreyas Bhatewara 249115924b6SShreyas Bhatewara static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc, 250115924b6SShreyas Bhatewara struct Vmxnet3_RxCompDesc *dstDesc) 251115924b6SShreyas Bhatewara { 252115924b6SShreyas Bhatewara int i = 0; 253115924b6SShreyas Bhatewara u32 *src = (u32 *)srcDesc; 254115924b6SShreyas Bhatewara u32 *dst = (u32 *)dstDesc; 255115924b6SShreyas Bhatewara for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) { 256115924b6SShreyas Bhatewara *dst = le32_to_cpu(*src); 257115924b6SShreyas Bhatewara src++; 258115924b6SShreyas Bhatewara dst++; 259115924b6SShreyas Bhatewara } 260115924b6SShreyas Bhatewara } 261115924b6SShreyas Bhatewara 262115924b6SShreyas Bhatewara 263115924b6SShreyas Bhatewara /* Used to read bitfield values from double words. */ 264115924b6SShreyas Bhatewara static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size) 265115924b6SShreyas Bhatewara { 266115924b6SShreyas Bhatewara u32 temp = le32_to_cpu(*bitfield); 267115924b6SShreyas Bhatewara u32 mask = ((1 << size) - 1) << pos; 268115924b6SShreyas Bhatewara temp &= mask; 269115924b6SShreyas Bhatewara temp >>= pos; 270115924b6SShreyas Bhatewara return temp; 271115924b6SShreyas Bhatewara } 272115924b6SShreyas Bhatewara 273115924b6SShreyas Bhatewara 274115924b6SShreyas Bhatewara 275115924b6SShreyas Bhatewara #endif /* __BIG_ENDIAN_BITFIELD */ 276115924b6SShreyas Bhatewara 277115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD 278115924b6SShreyas Bhatewara 279115924b6SShreyas Bhatewara # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \ 280115924b6SShreyas Bhatewara txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \ 281115924b6SShreyas Bhatewara VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE) 282115924b6SShreyas Bhatewara # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \ 283115924b6SShreyas Bhatewara txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \ 284115924b6SShreyas Bhatewara VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE) 285115924b6SShreyas Bhatewara # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \ 286115924b6SShreyas Bhatewara VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \ 287115924b6SShreyas Bhatewara VMXNET3_TCD_GEN_SIZE) 288115924b6SShreyas Bhatewara # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \ 289115924b6SShreyas Bhatewara VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE) 290115924b6SShreyas Bhatewara # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \ 291115924b6SShreyas Bhatewara (dstrcd) = (tmp); \ 292115924b6SShreyas Bhatewara vmxnet3_RxCompToCPU((rcd), (tmp)); \ 293115924b6SShreyas Bhatewara } while (0) 294115924b6SShreyas Bhatewara # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \ 295115924b6SShreyas Bhatewara (dstrxd) = (tmp); \ 296115924b6SShreyas Bhatewara vmxnet3_RxDescToCPU((rxd), (tmp)); \ 297115924b6SShreyas Bhatewara } while (0) 298115924b6SShreyas Bhatewara 299115924b6SShreyas Bhatewara #else 300115924b6SShreyas Bhatewara 301115924b6SShreyas Bhatewara # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen) 302115924b6SShreyas Bhatewara # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop) 303115924b6SShreyas Bhatewara # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen) 304115924b6SShreyas Bhatewara # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx) 305115924b6SShreyas Bhatewara # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd) 306115924b6SShreyas Bhatewara # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd) 307115924b6SShreyas Bhatewara 308115924b6SShreyas Bhatewara #endif /* __BIG_ENDIAN_BITFIELD */ 309115924b6SShreyas Bhatewara 310d1a890faSShreyas Bhatewara 311d1a890faSShreyas Bhatewara static void 312d1a890faSShreyas Bhatewara vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi, 313d1a890faSShreyas Bhatewara struct pci_dev *pdev) 314d1a890faSShreyas Bhatewara { 315d1a890faSShreyas Bhatewara if (tbi->map_type == VMXNET3_MAP_SINGLE) 316b0eb57cbSAndy King dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len, 317d1a890faSShreyas Bhatewara PCI_DMA_TODEVICE); 318d1a890faSShreyas Bhatewara else if (tbi->map_type == VMXNET3_MAP_PAGE) 319b0eb57cbSAndy King dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len, 320d1a890faSShreyas Bhatewara PCI_DMA_TODEVICE); 321d1a890faSShreyas Bhatewara else 322d1a890faSShreyas Bhatewara BUG_ON(tbi->map_type != VMXNET3_MAP_NONE); 323d1a890faSShreyas Bhatewara 324d1a890faSShreyas Bhatewara tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */ 325d1a890faSShreyas Bhatewara } 326d1a890faSShreyas Bhatewara 327d1a890faSShreyas Bhatewara 328d1a890faSShreyas Bhatewara static int 329d1a890faSShreyas Bhatewara vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq, 330d1a890faSShreyas Bhatewara struct pci_dev *pdev, struct vmxnet3_adapter *adapter) 331d1a890faSShreyas Bhatewara { 332d1a890faSShreyas Bhatewara struct sk_buff *skb; 333d1a890faSShreyas Bhatewara int entries = 0; 334d1a890faSShreyas Bhatewara 335d1a890faSShreyas Bhatewara /* no out of order completion */ 336d1a890faSShreyas Bhatewara BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp); 337115924b6SShreyas Bhatewara BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1); 338d1a890faSShreyas Bhatewara 339d1a890faSShreyas Bhatewara skb = tq->buf_info[eop_idx].skb; 340d1a890faSShreyas Bhatewara BUG_ON(skb == NULL); 341d1a890faSShreyas Bhatewara tq->buf_info[eop_idx].skb = NULL; 342d1a890faSShreyas Bhatewara 343d1a890faSShreyas Bhatewara VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size); 344d1a890faSShreyas Bhatewara 345d1a890faSShreyas Bhatewara while (tq->tx_ring.next2comp != eop_idx) { 346d1a890faSShreyas Bhatewara vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp, 347d1a890faSShreyas Bhatewara pdev); 348d1a890faSShreyas Bhatewara 349d1a890faSShreyas Bhatewara /* update next2comp w/o tx_lock. Since we are marking more, 350d1a890faSShreyas Bhatewara * instead of less, tx ring entries avail, the worst case is 351d1a890faSShreyas Bhatewara * that the tx routine incorrectly re-queues a pkt due to 352d1a890faSShreyas Bhatewara * insufficient tx ring entries. 353d1a890faSShreyas Bhatewara */ 354d1a890faSShreyas Bhatewara vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); 355d1a890faSShreyas Bhatewara entries++; 356d1a890faSShreyas Bhatewara } 357d1a890faSShreyas Bhatewara 358d1a890faSShreyas Bhatewara dev_kfree_skb_any(skb); 359d1a890faSShreyas Bhatewara return entries; 360d1a890faSShreyas Bhatewara } 361d1a890faSShreyas Bhatewara 362d1a890faSShreyas Bhatewara 363d1a890faSShreyas Bhatewara static int 364d1a890faSShreyas Bhatewara vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq, 365d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter) 366d1a890faSShreyas Bhatewara { 367d1a890faSShreyas Bhatewara int completed = 0; 368d1a890faSShreyas Bhatewara union Vmxnet3_GenericDesc *gdesc; 369d1a890faSShreyas Bhatewara 370d1a890faSShreyas Bhatewara gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; 371115924b6SShreyas Bhatewara while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) { 372115924b6SShreyas Bhatewara completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX( 373115924b6SShreyas Bhatewara &gdesc->tcd), tq, adapter->pdev, 374115924b6SShreyas Bhatewara adapter); 375d1a890faSShreyas Bhatewara 376d1a890faSShreyas Bhatewara vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring); 377d1a890faSShreyas Bhatewara gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; 378d1a890faSShreyas Bhatewara } 379d1a890faSShreyas Bhatewara 380d1a890faSShreyas Bhatewara if (completed) { 381d1a890faSShreyas Bhatewara spin_lock(&tq->tx_lock); 382d1a890faSShreyas Bhatewara if (unlikely(vmxnet3_tq_stopped(tq, adapter) && 383d1a890faSShreyas Bhatewara vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) > 384d1a890faSShreyas Bhatewara VMXNET3_WAKE_QUEUE_THRESHOLD(tq) && 385d1a890faSShreyas Bhatewara netif_carrier_ok(adapter->netdev))) { 386d1a890faSShreyas Bhatewara vmxnet3_tq_wake(tq, adapter); 387d1a890faSShreyas Bhatewara } 388d1a890faSShreyas Bhatewara spin_unlock(&tq->tx_lock); 389d1a890faSShreyas Bhatewara } 390d1a890faSShreyas Bhatewara return completed; 391d1a890faSShreyas Bhatewara } 392d1a890faSShreyas Bhatewara 393d1a890faSShreyas Bhatewara 394d1a890faSShreyas Bhatewara static void 395d1a890faSShreyas Bhatewara vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq, 396d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter) 397d1a890faSShreyas Bhatewara { 398d1a890faSShreyas Bhatewara int i; 399d1a890faSShreyas Bhatewara 400d1a890faSShreyas Bhatewara while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) { 401d1a890faSShreyas Bhatewara struct vmxnet3_tx_buf_info *tbi; 402d1a890faSShreyas Bhatewara 403d1a890faSShreyas Bhatewara tbi = tq->buf_info + tq->tx_ring.next2comp; 404d1a890faSShreyas Bhatewara 405d1a890faSShreyas Bhatewara vmxnet3_unmap_tx_buf(tbi, adapter->pdev); 406d1a890faSShreyas Bhatewara if (tbi->skb) { 407d1a890faSShreyas Bhatewara dev_kfree_skb_any(tbi->skb); 408d1a890faSShreyas Bhatewara tbi->skb = NULL; 409d1a890faSShreyas Bhatewara } 410d1a890faSShreyas Bhatewara vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); 411d1a890faSShreyas Bhatewara } 412d1a890faSShreyas Bhatewara 413d1a890faSShreyas Bhatewara /* sanity check, verify all buffers are indeed unmapped and freed */ 414d1a890faSShreyas Bhatewara for (i = 0; i < tq->tx_ring.size; i++) { 415d1a890faSShreyas Bhatewara BUG_ON(tq->buf_info[i].skb != NULL || 416d1a890faSShreyas Bhatewara tq->buf_info[i].map_type != VMXNET3_MAP_NONE); 417d1a890faSShreyas Bhatewara } 418d1a890faSShreyas Bhatewara 419d1a890faSShreyas Bhatewara tq->tx_ring.gen = VMXNET3_INIT_GEN; 420d1a890faSShreyas Bhatewara tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; 421d1a890faSShreyas Bhatewara 422d1a890faSShreyas Bhatewara tq->comp_ring.gen = VMXNET3_INIT_GEN; 423d1a890faSShreyas Bhatewara tq->comp_ring.next2proc = 0; 424d1a890faSShreyas Bhatewara } 425d1a890faSShreyas Bhatewara 426d1a890faSShreyas Bhatewara 42709c5088eSShreyas Bhatewara static void 428d1a890faSShreyas Bhatewara vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, 429d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter) 430d1a890faSShreyas Bhatewara { 431d1a890faSShreyas Bhatewara if (tq->tx_ring.base) { 432b0eb57cbSAndy King dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size * 433d1a890faSShreyas Bhatewara sizeof(struct Vmxnet3_TxDesc), 434d1a890faSShreyas Bhatewara tq->tx_ring.base, tq->tx_ring.basePA); 435d1a890faSShreyas Bhatewara tq->tx_ring.base = NULL; 436d1a890faSShreyas Bhatewara } 437d1a890faSShreyas Bhatewara if (tq->data_ring.base) { 438b0eb57cbSAndy King dma_free_coherent(&adapter->pdev->dev, tq->data_ring.size * 439d1a890faSShreyas Bhatewara sizeof(struct Vmxnet3_TxDataDesc), 440d1a890faSShreyas Bhatewara tq->data_ring.base, tq->data_ring.basePA); 441d1a890faSShreyas Bhatewara tq->data_ring.base = NULL; 442d1a890faSShreyas Bhatewara } 443d1a890faSShreyas Bhatewara if (tq->comp_ring.base) { 444b0eb57cbSAndy King dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size * 445d1a890faSShreyas Bhatewara sizeof(struct Vmxnet3_TxCompDesc), 446d1a890faSShreyas Bhatewara tq->comp_ring.base, tq->comp_ring.basePA); 447d1a890faSShreyas Bhatewara tq->comp_ring.base = NULL; 448d1a890faSShreyas Bhatewara } 449b0eb57cbSAndy King if (tq->buf_info) { 450b0eb57cbSAndy King dma_free_coherent(&adapter->pdev->dev, 451b0eb57cbSAndy King tq->tx_ring.size * sizeof(tq->buf_info[0]), 452b0eb57cbSAndy King tq->buf_info, tq->buf_info_pa); 453d1a890faSShreyas Bhatewara tq->buf_info = NULL; 454d1a890faSShreyas Bhatewara } 455b0eb57cbSAndy King } 456d1a890faSShreyas Bhatewara 457d1a890faSShreyas Bhatewara 45809c5088eSShreyas Bhatewara /* Destroy all tx queues */ 45909c5088eSShreyas Bhatewara void 46009c5088eSShreyas Bhatewara vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter) 46109c5088eSShreyas Bhatewara { 46209c5088eSShreyas Bhatewara int i; 46309c5088eSShreyas Bhatewara 46409c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_tx_queues; i++) 46509c5088eSShreyas Bhatewara vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter); 46609c5088eSShreyas Bhatewara } 46709c5088eSShreyas Bhatewara 46809c5088eSShreyas Bhatewara 469d1a890faSShreyas Bhatewara static void 470d1a890faSShreyas Bhatewara vmxnet3_tq_init(struct vmxnet3_tx_queue *tq, 471d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter) 472d1a890faSShreyas Bhatewara { 473d1a890faSShreyas Bhatewara int i; 474d1a890faSShreyas Bhatewara 475d1a890faSShreyas Bhatewara /* reset the tx ring contents to 0 and reset the tx ring states */ 476d1a890faSShreyas Bhatewara memset(tq->tx_ring.base, 0, tq->tx_ring.size * 477d1a890faSShreyas Bhatewara sizeof(struct Vmxnet3_TxDesc)); 478d1a890faSShreyas Bhatewara tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; 479d1a890faSShreyas Bhatewara tq->tx_ring.gen = VMXNET3_INIT_GEN; 480d1a890faSShreyas Bhatewara 481d1a890faSShreyas Bhatewara memset(tq->data_ring.base, 0, tq->data_ring.size * 482d1a890faSShreyas Bhatewara sizeof(struct Vmxnet3_TxDataDesc)); 483d1a890faSShreyas Bhatewara 484d1a890faSShreyas Bhatewara /* reset the tx comp ring contents to 0 and reset comp ring states */ 485d1a890faSShreyas Bhatewara memset(tq->comp_ring.base, 0, tq->comp_ring.size * 486d1a890faSShreyas Bhatewara sizeof(struct Vmxnet3_TxCompDesc)); 487d1a890faSShreyas Bhatewara tq->comp_ring.next2proc = 0; 488d1a890faSShreyas Bhatewara tq->comp_ring.gen = VMXNET3_INIT_GEN; 489d1a890faSShreyas Bhatewara 490d1a890faSShreyas Bhatewara /* reset the bookkeeping data */ 491d1a890faSShreyas Bhatewara memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size); 492d1a890faSShreyas Bhatewara for (i = 0; i < tq->tx_ring.size; i++) 493d1a890faSShreyas Bhatewara tq->buf_info[i].map_type = VMXNET3_MAP_NONE; 494d1a890faSShreyas Bhatewara 495d1a890faSShreyas Bhatewara /* stats are not reset */ 496d1a890faSShreyas Bhatewara } 497d1a890faSShreyas Bhatewara 498d1a890faSShreyas Bhatewara 499d1a890faSShreyas Bhatewara static int 500d1a890faSShreyas Bhatewara vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, 501d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter) 502d1a890faSShreyas Bhatewara { 503b0eb57cbSAndy King size_t sz; 504b0eb57cbSAndy King 505d1a890faSShreyas Bhatewara BUG_ON(tq->tx_ring.base || tq->data_ring.base || 506d1a890faSShreyas Bhatewara tq->comp_ring.base || tq->buf_info); 507d1a890faSShreyas Bhatewara 508b0eb57cbSAndy King tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev, 509b0eb57cbSAndy King tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc), 510b0eb57cbSAndy King &tq->tx_ring.basePA, GFP_KERNEL); 511d1a890faSShreyas Bhatewara if (!tq->tx_ring.base) { 512204a6e65SStephen Hemminger netdev_err(adapter->netdev, "failed to allocate tx ring\n"); 513d1a890faSShreyas Bhatewara goto err; 514d1a890faSShreyas Bhatewara } 515d1a890faSShreyas Bhatewara 516b0eb57cbSAndy King tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev, 517b0eb57cbSAndy King tq->data_ring.size * sizeof(struct Vmxnet3_TxDataDesc), 518b0eb57cbSAndy King &tq->data_ring.basePA, GFP_KERNEL); 519d1a890faSShreyas Bhatewara if (!tq->data_ring.base) { 520204a6e65SStephen Hemminger netdev_err(adapter->netdev, "failed to allocate data ring\n"); 521d1a890faSShreyas Bhatewara goto err; 522d1a890faSShreyas Bhatewara } 523d1a890faSShreyas Bhatewara 524b0eb57cbSAndy King tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, 525b0eb57cbSAndy King tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc), 526b0eb57cbSAndy King &tq->comp_ring.basePA, GFP_KERNEL); 527d1a890faSShreyas Bhatewara if (!tq->comp_ring.base) { 528204a6e65SStephen Hemminger netdev_err(adapter->netdev, "failed to allocate tx comp ring\n"); 529d1a890faSShreyas Bhatewara goto err; 530d1a890faSShreyas Bhatewara } 531d1a890faSShreyas Bhatewara 532b0eb57cbSAndy King sz = tq->tx_ring.size * sizeof(tq->buf_info[0]); 533b0eb57cbSAndy King tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz, 534b0eb57cbSAndy King &tq->buf_info_pa, GFP_KERNEL); 535e404decbSJoe Perches if (!tq->buf_info) 536d1a890faSShreyas Bhatewara goto err; 537d1a890faSShreyas Bhatewara 538d1a890faSShreyas Bhatewara return 0; 539d1a890faSShreyas Bhatewara 540d1a890faSShreyas Bhatewara err: 541d1a890faSShreyas Bhatewara vmxnet3_tq_destroy(tq, adapter); 542d1a890faSShreyas Bhatewara return -ENOMEM; 543d1a890faSShreyas Bhatewara } 544d1a890faSShreyas Bhatewara 54509c5088eSShreyas Bhatewara static void 54609c5088eSShreyas Bhatewara vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter) 54709c5088eSShreyas Bhatewara { 54809c5088eSShreyas Bhatewara int i; 54909c5088eSShreyas Bhatewara 55009c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_tx_queues; i++) 55109c5088eSShreyas Bhatewara vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter); 55209c5088eSShreyas Bhatewara } 553d1a890faSShreyas Bhatewara 554d1a890faSShreyas Bhatewara /* 555d1a890faSShreyas Bhatewara * starting from ring->next2fill, allocate rx buffers for the given ring 556d1a890faSShreyas Bhatewara * of the rx queue and update the rx desc. stop after @num_to_alloc buffers 557d1a890faSShreyas Bhatewara * are allocated or allocation fails 558d1a890faSShreyas Bhatewara */ 559d1a890faSShreyas Bhatewara 560d1a890faSShreyas Bhatewara static int 561d1a890faSShreyas Bhatewara vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, 562d1a890faSShreyas Bhatewara int num_to_alloc, struct vmxnet3_adapter *adapter) 563d1a890faSShreyas Bhatewara { 564d1a890faSShreyas Bhatewara int num_allocated = 0; 565d1a890faSShreyas Bhatewara struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx]; 566d1a890faSShreyas Bhatewara struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx]; 567d1a890faSShreyas Bhatewara u32 val; 568d1a890faSShreyas Bhatewara 5695318d809SShreyas Bhatewara while (num_allocated <= num_to_alloc) { 570d1a890faSShreyas Bhatewara struct vmxnet3_rx_buf_info *rbi; 571d1a890faSShreyas Bhatewara union Vmxnet3_GenericDesc *gd; 572d1a890faSShreyas Bhatewara 573d1a890faSShreyas Bhatewara rbi = rbi_base + ring->next2fill; 574d1a890faSShreyas Bhatewara gd = ring->base + ring->next2fill; 575d1a890faSShreyas Bhatewara 576d1a890faSShreyas Bhatewara if (rbi->buf_type == VMXNET3_RX_BUF_SKB) { 577d1a890faSShreyas Bhatewara if (rbi->skb == NULL) { 5780d735f13SStephen Hemminger rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev, 5790d735f13SStephen Hemminger rbi->len, 5800d735f13SStephen Hemminger GFP_KERNEL); 581d1a890faSShreyas Bhatewara if (unlikely(rbi->skb == NULL)) { 582d1a890faSShreyas Bhatewara rq->stats.rx_buf_alloc_failure++; 583d1a890faSShreyas Bhatewara break; 584d1a890faSShreyas Bhatewara } 585d1a890faSShreyas Bhatewara 586b0eb57cbSAndy King rbi->dma_addr = dma_map_single( 587b0eb57cbSAndy King &adapter->pdev->dev, 588d1a890faSShreyas Bhatewara rbi->skb->data, rbi->len, 589d1a890faSShreyas Bhatewara PCI_DMA_FROMDEVICE); 5905738a09dSAlexey Khoroshilov if (dma_mapping_error(&adapter->pdev->dev, 5915738a09dSAlexey Khoroshilov rbi->dma_addr)) { 5925738a09dSAlexey Khoroshilov dev_kfree_skb_any(rbi->skb); 5935738a09dSAlexey Khoroshilov rq->stats.rx_buf_alloc_failure++; 5945738a09dSAlexey Khoroshilov break; 5955738a09dSAlexey Khoroshilov } 596d1a890faSShreyas Bhatewara } else { 597d1a890faSShreyas Bhatewara /* rx buffer skipped by the device */ 598d1a890faSShreyas Bhatewara } 599d1a890faSShreyas Bhatewara val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT; 600d1a890faSShreyas Bhatewara } else { 601d1a890faSShreyas Bhatewara BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE || 602d1a890faSShreyas Bhatewara rbi->len != PAGE_SIZE); 603d1a890faSShreyas Bhatewara 604d1a890faSShreyas Bhatewara if (rbi->page == NULL) { 605d1a890faSShreyas Bhatewara rbi->page = alloc_page(GFP_ATOMIC); 606d1a890faSShreyas Bhatewara if (unlikely(rbi->page == NULL)) { 607d1a890faSShreyas Bhatewara rq->stats.rx_buf_alloc_failure++; 608d1a890faSShreyas Bhatewara break; 609d1a890faSShreyas Bhatewara } 610b0eb57cbSAndy King rbi->dma_addr = dma_map_page( 611b0eb57cbSAndy King &adapter->pdev->dev, 612d1a890faSShreyas Bhatewara rbi->page, 0, PAGE_SIZE, 613d1a890faSShreyas Bhatewara PCI_DMA_FROMDEVICE); 6145738a09dSAlexey Khoroshilov if (dma_mapping_error(&adapter->pdev->dev, 6155738a09dSAlexey Khoroshilov rbi->dma_addr)) { 6165738a09dSAlexey Khoroshilov put_page(rbi->page); 6175738a09dSAlexey Khoroshilov rq->stats.rx_buf_alloc_failure++; 6185738a09dSAlexey Khoroshilov break; 6195738a09dSAlexey Khoroshilov } 620d1a890faSShreyas Bhatewara } else { 621d1a890faSShreyas Bhatewara /* rx buffers skipped by the device */ 622d1a890faSShreyas Bhatewara } 623d1a890faSShreyas Bhatewara val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT; 624d1a890faSShreyas Bhatewara } 625d1a890faSShreyas Bhatewara 626115924b6SShreyas Bhatewara gd->rxd.addr = cpu_to_le64(rbi->dma_addr); 6275318d809SShreyas Bhatewara gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT) 628115924b6SShreyas Bhatewara | val | rbi->len); 629d1a890faSShreyas Bhatewara 6305318d809SShreyas Bhatewara /* Fill the last buffer but dont mark it ready, or else the 6315318d809SShreyas Bhatewara * device will think that the queue is full */ 6325318d809SShreyas Bhatewara if (num_allocated == num_to_alloc) 6335318d809SShreyas Bhatewara break; 6345318d809SShreyas Bhatewara 6355318d809SShreyas Bhatewara gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT); 636d1a890faSShreyas Bhatewara num_allocated++; 637d1a890faSShreyas Bhatewara vmxnet3_cmd_ring_adv_next2fill(ring); 638d1a890faSShreyas Bhatewara } 639d1a890faSShreyas Bhatewara 640fdcd79b9SStephen Hemminger netdev_dbg(adapter->netdev, 64169b9a712SStephen Hemminger "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n", 64269b9a712SStephen Hemminger num_allocated, ring->next2fill, ring->next2comp); 643d1a890faSShreyas Bhatewara 644d1a890faSShreyas Bhatewara /* so that the device can distinguish a full ring and an empty ring */ 645d1a890faSShreyas Bhatewara BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp); 646d1a890faSShreyas Bhatewara 647d1a890faSShreyas Bhatewara return num_allocated; 648d1a890faSShreyas Bhatewara } 649d1a890faSShreyas Bhatewara 650d1a890faSShreyas Bhatewara 651d1a890faSShreyas Bhatewara static void 652d1a890faSShreyas Bhatewara vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd, 653d1a890faSShreyas Bhatewara struct vmxnet3_rx_buf_info *rbi) 654d1a890faSShreyas Bhatewara { 655d1a890faSShreyas Bhatewara struct skb_frag_struct *frag = skb_shinfo(skb)->frags + 656d1a890faSShreyas Bhatewara skb_shinfo(skb)->nr_frags; 657d1a890faSShreyas Bhatewara 658d1a890faSShreyas Bhatewara BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); 659d1a890faSShreyas Bhatewara 6600e0634d2SIan Campbell __skb_frag_set_page(frag, rbi->page); 661d1a890faSShreyas Bhatewara frag->page_offset = 0; 6629e903e08SEric Dumazet skb_frag_size_set(frag, rcd->len); 6639e903e08SEric Dumazet skb->data_len += rcd->len; 6645e6c355cSEric Dumazet skb->truesize += PAGE_SIZE; 665d1a890faSShreyas Bhatewara skb_shinfo(skb)->nr_frags++; 666d1a890faSShreyas Bhatewara } 667d1a890faSShreyas Bhatewara 668d1a890faSShreyas Bhatewara 6695738a09dSAlexey Khoroshilov static int 670d1a890faSShreyas Bhatewara vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, 671d1a890faSShreyas Bhatewara struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, 672d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter) 673d1a890faSShreyas Bhatewara { 674d1a890faSShreyas Bhatewara u32 dw2, len; 675d1a890faSShreyas Bhatewara unsigned long buf_offset; 676d1a890faSShreyas Bhatewara int i; 677d1a890faSShreyas Bhatewara union Vmxnet3_GenericDesc *gdesc; 678d1a890faSShreyas Bhatewara struct vmxnet3_tx_buf_info *tbi = NULL; 679d1a890faSShreyas Bhatewara 680d1a890faSShreyas Bhatewara BUG_ON(ctx->copy_size > skb_headlen(skb)); 681d1a890faSShreyas Bhatewara 682d1a890faSShreyas Bhatewara /* use the previous gen bit for the SOP desc */ 683d1a890faSShreyas Bhatewara dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT; 684d1a890faSShreyas Bhatewara 685d1a890faSShreyas Bhatewara ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill; 686d1a890faSShreyas Bhatewara gdesc = ctx->sop_txd; /* both loops below can be skipped */ 687d1a890faSShreyas Bhatewara 688d1a890faSShreyas Bhatewara /* no need to map the buffer if headers are copied */ 689d1a890faSShreyas Bhatewara if (ctx->copy_size) { 690115924b6SShreyas Bhatewara ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA + 691d1a890faSShreyas Bhatewara tq->tx_ring.next2fill * 692115924b6SShreyas Bhatewara sizeof(struct Vmxnet3_TxDataDesc)); 693115924b6SShreyas Bhatewara ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size); 694d1a890faSShreyas Bhatewara ctx->sop_txd->dword[3] = 0; 695d1a890faSShreyas Bhatewara 696d1a890faSShreyas Bhatewara tbi = tq->buf_info + tq->tx_ring.next2fill; 697d1a890faSShreyas Bhatewara tbi->map_type = VMXNET3_MAP_NONE; 698d1a890faSShreyas Bhatewara 699fdcd79b9SStephen Hemminger netdev_dbg(adapter->netdev, 700f6965582SRandy Dunlap "txd[%u]: 0x%Lx 0x%x 0x%x\n", 701115924b6SShreyas Bhatewara tq->tx_ring.next2fill, 702115924b6SShreyas Bhatewara le64_to_cpu(ctx->sop_txd->txd.addr), 703d1a890faSShreyas Bhatewara ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]); 704d1a890faSShreyas Bhatewara vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 705d1a890faSShreyas Bhatewara 706d1a890faSShreyas Bhatewara /* use the right gen for non-SOP desc */ 707d1a890faSShreyas Bhatewara dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 708d1a890faSShreyas Bhatewara } 709d1a890faSShreyas Bhatewara 710d1a890faSShreyas Bhatewara /* linear part can use multiple tx desc if it's big */ 711d1a890faSShreyas Bhatewara len = skb_headlen(skb) - ctx->copy_size; 712d1a890faSShreyas Bhatewara buf_offset = ctx->copy_size; 713d1a890faSShreyas Bhatewara while (len) { 714d1a890faSShreyas Bhatewara u32 buf_size; 715d1a890faSShreyas Bhatewara 7161f4b1612SBhavesh Davda if (len < VMXNET3_MAX_TX_BUF_SIZE) { 7171f4b1612SBhavesh Davda buf_size = len; 7181f4b1612SBhavesh Davda dw2 |= len; 7191f4b1612SBhavesh Davda } else { 7201f4b1612SBhavesh Davda buf_size = VMXNET3_MAX_TX_BUF_SIZE; 7211f4b1612SBhavesh Davda /* spec says that for TxDesc.len, 0 == 2^14 */ 7221f4b1612SBhavesh Davda } 723d1a890faSShreyas Bhatewara 724d1a890faSShreyas Bhatewara tbi = tq->buf_info + tq->tx_ring.next2fill; 725d1a890faSShreyas Bhatewara tbi->map_type = VMXNET3_MAP_SINGLE; 726b0eb57cbSAndy King tbi->dma_addr = dma_map_single(&adapter->pdev->dev, 727d1a890faSShreyas Bhatewara skb->data + buf_offset, buf_size, 728d1a890faSShreyas Bhatewara PCI_DMA_TODEVICE); 7295738a09dSAlexey Khoroshilov if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) 7305738a09dSAlexey Khoroshilov return -EFAULT; 731d1a890faSShreyas Bhatewara 7321f4b1612SBhavesh Davda tbi->len = buf_size; 733d1a890faSShreyas Bhatewara 734d1a890faSShreyas Bhatewara gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 735d1a890faSShreyas Bhatewara BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 736d1a890faSShreyas Bhatewara 737115924b6SShreyas Bhatewara gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); 7381f4b1612SBhavesh Davda gdesc->dword[2] = cpu_to_le32(dw2); 739d1a890faSShreyas Bhatewara gdesc->dword[3] = 0; 740d1a890faSShreyas Bhatewara 741fdcd79b9SStephen Hemminger netdev_dbg(adapter->netdev, 742f6965582SRandy Dunlap "txd[%u]: 0x%Lx 0x%x 0x%x\n", 743115924b6SShreyas Bhatewara tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), 744115924b6SShreyas Bhatewara le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); 745d1a890faSShreyas Bhatewara vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 746d1a890faSShreyas Bhatewara dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 747d1a890faSShreyas Bhatewara 748d1a890faSShreyas Bhatewara len -= buf_size; 749d1a890faSShreyas Bhatewara buf_offset += buf_size; 750d1a890faSShreyas Bhatewara } 751d1a890faSShreyas Bhatewara 752d1a890faSShreyas Bhatewara for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 7539e903e08SEric Dumazet const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 754a4d7e485SEric Dumazet u32 buf_size; 755d1a890faSShreyas Bhatewara 756a4d7e485SEric Dumazet buf_offset = 0; 757a4d7e485SEric Dumazet len = skb_frag_size(frag); 758a4d7e485SEric Dumazet while (len) { 759d1a890faSShreyas Bhatewara tbi = tq->buf_info + tq->tx_ring.next2fill; 760a4d7e485SEric Dumazet if (len < VMXNET3_MAX_TX_BUF_SIZE) { 761a4d7e485SEric Dumazet buf_size = len; 762a4d7e485SEric Dumazet dw2 |= len; 763a4d7e485SEric Dumazet } else { 764a4d7e485SEric Dumazet buf_size = VMXNET3_MAX_TX_BUF_SIZE; 765a4d7e485SEric Dumazet /* spec says that for TxDesc.len, 0 == 2^14 */ 766a4d7e485SEric Dumazet } 767d1a890faSShreyas Bhatewara tbi->map_type = VMXNET3_MAP_PAGE; 7680e0634d2SIan Campbell tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, 769a4d7e485SEric Dumazet buf_offset, buf_size, 7705d6bcdfeSIan Campbell DMA_TO_DEVICE); 7715738a09dSAlexey Khoroshilov if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) 7725738a09dSAlexey Khoroshilov return -EFAULT; 773d1a890faSShreyas Bhatewara 774a4d7e485SEric Dumazet tbi->len = buf_size; 775d1a890faSShreyas Bhatewara 776d1a890faSShreyas Bhatewara gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 777d1a890faSShreyas Bhatewara BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 778d1a890faSShreyas Bhatewara 779115924b6SShreyas Bhatewara gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); 780a4d7e485SEric Dumazet gdesc->dword[2] = cpu_to_le32(dw2); 781d1a890faSShreyas Bhatewara gdesc->dword[3] = 0; 782d1a890faSShreyas Bhatewara 783fdcd79b9SStephen Hemminger netdev_dbg(adapter->netdev, 7848b429468SHans Wennborg "txd[%u]: 0x%llx %u %u\n", 785115924b6SShreyas Bhatewara tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), 786115924b6SShreyas Bhatewara le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); 787d1a890faSShreyas Bhatewara vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 788d1a890faSShreyas Bhatewara dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 789a4d7e485SEric Dumazet 790a4d7e485SEric Dumazet len -= buf_size; 791a4d7e485SEric Dumazet buf_offset += buf_size; 792a4d7e485SEric Dumazet } 793d1a890faSShreyas Bhatewara } 794d1a890faSShreyas Bhatewara 795d1a890faSShreyas Bhatewara ctx->eop_txd = gdesc; 796d1a890faSShreyas Bhatewara 797d1a890faSShreyas Bhatewara /* set the last buf_info for the pkt */ 798d1a890faSShreyas Bhatewara tbi->skb = skb; 799d1a890faSShreyas Bhatewara tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base; 8005738a09dSAlexey Khoroshilov 8015738a09dSAlexey Khoroshilov return 0; 802d1a890faSShreyas Bhatewara } 803d1a890faSShreyas Bhatewara 804d1a890faSShreyas Bhatewara 80509c5088eSShreyas Bhatewara /* Init all tx queues */ 80609c5088eSShreyas Bhatewara static void 80709c5088eSShreyas Bhatewara vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter) 80809c5088eSShreyas Bhatewara { 80909c5088eSShreyas Bhatewara int i; 81009c5088eSShreyas Bhatewara 81109c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_tx_queues; i++) 81209c5088eSShreyas Bhatewara vmxnet3_tq_init(&adapter->tx_queue[i], adapter); 81309c5088eSShreyas Bhatewara } 81409c5088eSShreyas Bhatewara 81509c5088eSShreyas Bhatewara 816d1a890faSShreyas Bhatewara /* 817cec05562SNeil Horman * parse relevant protocol headers: 818d1a890faSShreyas Bhatewara * For a tso pkt, relevant headers are L2/3/4 including options 819d1a890faSShreyas Bhatewara * For a pkt requesting csum offloading, they are L2/3 and may include L4 820d1a890faSShreyas Bhatewara * if it's a TCP/UDP pkt 821d1a890faSShreyas Bhatewara * 822d1a890faSShreyas Bhatewara * Returns: 823d1a890faSShreyas Bhatewara * -1: error happens during parsing 824d1a890faSShreyas Bhatewara * 0: protocol headers parsed, but too big to be copied 825d1a890faSShreyas Bhatewara * 1: protocol headers parsed and copied 826d1a890faSShreyas Bhatewara * 827d1a890faSShreyas Bhatewara * Other effects: 828d1a890faSShreyas Bhatewara * 1. related *ctx fields are updated. 829d1a890faSShreyas Bhatewara * 2. ctx->copy_size is # of bytes copied 830cec05562SNeil Horman * 3. the portion to be copied is guaranteed to be in the linear part 831d1a890faSShreyas Bhatewara * 832d1a890faSShreyas Bhatewara */ 833d1a890faSShreyas Bhatewara static int 834cec05562SNeil Horman vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, 835d1a890faSShreyas Bhatewara struct vmxnet3_tx_ctx *ctx, 836d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter) 837d1a890faSShreyas Bhatewara { 838759c9359SShrikrishna Khare u8 protocol = 0; 839d1a890faSShreyas Bhatewara 8400d0b1672SMichał Mirosław if (ctx->mss) { /* TSO */ 841d1a890faSShreyas Bhatewara ctx->eth_ip_hdr_size = skb_transport_offset(skb); 8428bca5d1eSEric Dumazet ctx->l4_hdr_size = tcp_hdrlen(skb); 843d1a890faSShreyas Bhatewara ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; 844d1a890faSShreyas Bhatewara } else { 845d1a890faSShreyas Bhatewara if (skb->ip_summed == CHECKSUM_PARTIAL) { 8460d0b1672SMichał Mirosław ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb); 847d1a890faSShreyas Bhatewara 848d1a890faSShreyas Bhatewara if (ctx->ipv4) { 8498bca5d1eSEric Dumazet const struct iphdr *iph = ip_hdr(skb); 8508bca5d1eSEric Dumazet 851759c9359SShrikrishna Khare protocol = iph->protocol; 852759c9359SShrikrishna Khare } else if (ctx->ipv6) { 853759c9359SShrikrishna Khare const struct ipv6hdr *ipv6h = ipv6_hdr(skb); 854759c9359SShrikrishna Khare 855759c9359SShrikrishna Khare protocol = ipv6h->nexthdr; 856d1a890faSShreyas Bhatewara } 857759c9359SShrikrishna Khare 858759c9359SShrikrishna Khare switch (protocol) { 859759c9359SShrikrishna Khare case IPPROTO_TCP: 860759c9359SShrikrishna Khare ctx->l4_hdr_size = tcp_hdrlen(skb); 861759c9359SShrikrishna Khare break; 862759c9359SShrikrishna Khare case IPPROTO_UDP: 863759c9359SShrikrishna Khare ctx->l4_hdr_size = sizeof(struct udphdr); 864759c9359SShrikrishna Khare break; 865759c9359SShrikrishna Khare default: 866759c9359SShrikrishna Khare ctx->l4_hdr_size = 0; 867759c9359SShrikrishna Khare break; 868759c9359SShrikrishna Khare } 869759c9359SShrikrishna Khare 870b203262dSNeil Horman ctx->copy_size = min(ctx->eth_ip_hdr_size + 871b203262dSNeil Horman ctx->l4_hdr_size, skb->len); 872d1a890faSShreyas Bhatewara } else { 873d1a890faSShreyas Bhatewara ctx->eth_ip_hdr_size = 0; 874d1a890faSShreyas Bhatewara ctx->l4_hdr_size = 0; 875d1a890faSShreyas Bhatewara /* copy as much as allowed */ 876d1a890faSShreyas Bhatewara ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE 877d1a890faSShreyas Bhatewara , skb_headlen(skb)); 878d1a890faSShreyas Bhatewara } 879d1a890faSShreyas Bhatewara 880c41fcce9SShreyas Bhatewara if (skb->len <= VMXNET3_HDR_COPY_SIZE) 881c41fcce9SShreyas Bhatewara ctx->copy_size = skb->len; 882c41fcce9SShreyas Bhatewara 883d1a890faSShreyas Bhatewara /* make sure headers are accessible directly */ 884d1a890faSShreyas Bhatewara if (unlikely(!pskb_may_pull(skb, ctx->copy_size))) 885d1a890faSShreyas Bhatewara goto err; 886d1a890faSShreyas Bhatewara } 887d1a890faSShreyas Bhatewara 888d1a890faSShreyas Bhatewara if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) { 889d1a890faSShreyas Bhatewara tq->stats.oversized_hdr++; 890d1a890faSShreyas Bhatewara ctx->copy_size = 0; 891d1a890faSShreyas Bhatewara return 0; 892d1a890faSShreyas Bhatewara } 893d1a890faSShreyas Bhatewara 894cec05562SNeil Horman return 1; 895cec05562SNeil Horman err: 896cec05562SNeil Horman return -1; 897cec05562SNeil Horman } 898cec05562SNeil Horman 899cec05562SNeil Horman /* 900cec05562SNeil Horman * copy relevant protocol headers to the transmit ring: 901cec05562SNeil Horman * For a tso pkt, relevant headers are L2/3/4 including options 902cec05562SNeil Horman * For a pkt requesting csum offloading, they are L2/3 and may include L4 903cec05562SNeil Horman * if it's a TCP/UDP pkt 904cec05562SNeil Horman * 905cec05562SNeil Horman * 906cec05562SNeil Horman * Note that this requires that vmxnet3_parse_hdr be called first to set the 907cec05562SNeil Horman * appropriate bits in ctx first 908cec05562SNeil Horman */ 909cec05562SNeil Horman static void 910cec05562SNeil Horman vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, 911cec05562SNeil Horman struct vmxnet3_tx_ctx *ctx, 912cec05562SNeil Horman struct vmxnet3_adapter *adapter) 913cec05562SNeil Horman { 914cec05562SNeil Horman struct Vmxnet3_TxDataDesc *tdd; 915cec05562SNeil Horman 916d1a890faSShreyas Bhatewara tdd = tq->data_ring.base + tq->tx_ring.next2fill; 917d1a890faSShreyas Bhatewara 918d1a890faSShreyas Bhatewara memcpy(tdd->data, skb->data, ctx->copy_size); 919fdcd79b9SStephen Hemminger netdev_dbg(adapter->netdev, 920f6965582SRandy Dunlap "copy %u bytes to dataRing[%u]\n", 921d1a890faSShreyas Bhatewara ctx->copy_size, tq->tx_ring.next2fill); 922d1a890faSShreyas Bhatewara } 923d1a890faSShreyas Bhatewara 924d1a890faSShreyas Bhatewara 925d1a890faSShreyas Bhatewara static void 926d1a890faSShreyas Bhatewara vmxnet3_prepare_tso(struct sk_buff *skb, 927d1a890faSShreyas Bhatewara struct vmxnet3_tx_ctx *ctx) 928d1a890faSShreyas Bhatewara { 9298bca5d1eSEric Dumazet struct tcphdr *tcph = tcp_hdr(skb); 9308bca5d1eSEric Dumazet 931d1a890faSShreyas Bhatewara if (ctx->ipv4) { 9328bca5d1eSEric Dumazet struct iphdr *iph = ip_hdr(skb); 9338bca5d1eSEric Dumazet 934d1a890faSShreyas Bhatewara iph->check = 0; 935d1a890faSShreyas Bhatewara tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, 936d1a890faSShreyas Bhatewara IPPROTO_TCP, 0); 937759c9359SShrikrishna Khare } else if (ctx->ipv6) { 9388bca5d1eSEric Dumazet struct ipv6hdr *iph = ipv6_hdr(skb); 9398bca5d1eSEric Dumazet 940d1a890faSShreyas Bhatewara tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0, 941d1a890faSShreyas Bhatewara IPPROTO_TCP, 0); 942d1a890faSShreyas Bhatewara } 943d1a890faSShreyas Bhatewara } 944d1a890faSShreyas Bhatewara 945a4d7e485SEric Dumazet static int txd_estimate(const struct sk_buff *skb) 946a4d7e485SEric Dumazet { 947a4d7e485SEric Dumazet int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1; 948a4d7e485SEric Dumazet int i; 949a4d7e485SEric Dumazet 950a4d7e485SEric Dumazet for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 951a4d7e485SEric Dumazet const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 952a4d7e485SEric Dumazet 953a4d7e485SEric Dumazet count += VMXNET3_TXD_NEEDED(skb_frag_size(frag)); 954a4d7e485SEric Dumazet } 955a4d7e485SEric Dumazet return count; 956a4d7e485SEric Dumazet } 957d1a890faSShreyas Bhatewara 958d1a890faSShreyas Bhatewara /* 959d1a890faSShreyas Bhatewara * Transmits a pkt thru a given tq 960d1a890faSShreyas Bhatewara * Returns: 961d1a890faSShreyas Bhatewara * NETDEV_TX_OK: descriptors are setup successfully 96225985edcSLucas De Marchi * NETDEV_TX_OK: error occurred, the pkt is dropped 963d1a890faSShreyas Bhatewara * NETDEV_TX_BUSY: tx ring is full, queue is stopped 964d1a890faSShreyas Bhatewara * 965d1a890faSShreyas Bhatewara * Side-effects: 966d1a890faSShreyas Bhatewara * 1. tx ring may be changed 967d1a890faSShreyas Bhatewara * 2. tq stats may be updated accordingly 968d1a890faSShreyas Bhatewara * 3. shared->txNumDeferred may be updated 969d1a890faSShreyas Bhatewara */ 970d1a890faSShreyas Bhatewara 971d1a890faSShreyas Bhatewara static int 972d1a890faSShreyas Bhatewara vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, 973d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter, struct net_device *netdev) 974d1a890faSShreyas Bhatewara { 975d1a890faSShreyas Bhatewara int ret; 976d1a890faSShreyas Bhatewara u32 count; 977d1a890faSShreyas Bhatewara unsigned long flags; 978d1a890faSShreyas Bhatewara struct vmxnet3_tx_ctx ctx; 979d1a890faSShreyas Bhatewara union Vmxnet3_GenericDesc *gdesc; 980115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD 981115924b6SShreyas Bhatewara /* Use temporary descriptor to avoid touching bits multiple times */ 982115924b6SShreyas Bhatewara union Vmxnet3_GenericDesc tempTxDesc; 983115924b6SShreyas Bhatewara #endif 984d1a890faSShreyas Bhatewara 985a4d7e485SEric Dumazet count = txd_estimate(skb); 986d1a890faSShreyas Bhatewara 98772e85c45SJesse Gross ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP)); 988759c9359SShrikrishna Khare ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6)); 989d1a890faSShreyas Bhatewara 990d1a890faSShreyas Bhatewara ctx.mss = skb_shinfo(skb)->gso_size; 991d1a890faSShreyas Bhatewara if (ctx.mss) { 992d1a890faSShreyas Bhatewara if (skb_header_cloned(skb)) { 993d1a890faSShreyas Bhatewara if (unlikely(pskb_expand_head(skb, 0, 0, 994d1a890faSShreyas Bhatewara GFP_ATOMIC) != 0)) { 995d1a890faSShreyas Bhatewara tq->stats.drop_tso++; 996d1a890faSShreyas Bhatewara goto drop_pkt; 997d1a890faSShreyas Bhatewara } 998d1a890faSShreyas Bhatewara tq->stats.copy_skb_header++; 999d1a890faSShreyas Bhatewara } 1000d1a890faSShreyas Bhatewara vmxnet3_prepare_tso(skb, &ctx); 1001d1a890faSShreyas Bhatewara } else { 1002d1a890faSShreyas Bhatewara if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) { 1003d1a890faSShreyas Bhatewara 1004d1a890faSShreyas Bhatewara /* non-tso pkts must not use more than 1005d1a890faSShreyas Bhatewara * VMXNET3_MAX_TXD_PER_PKT entries 1006d1a890faSShreyas Bhatewara */ 1007d1a890faSShreyas Bhatewara if (skb_linearize(skb) != 0) { 1008d1a890faSShreyas Bhatewara tq->stats.drop_too_many_frags++; 1009d1a890faSShreyas Bhatewara goto drop_pkt; 1010d1a890faSShreyas Bhatewara } 1011d1a890faSShreyas Bhatewara tq->stats.linearized++; 1012d1a890faSShreyas Bhatewara 1013d1a890faSShreyas Bhatewara /* recalculate the # of descriptors to use */ 1014d1a890faSShreyas Bhatewara count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1; 1015d1a890faSShreyas Bhatewara } 1016d1a890faSShreyas Bhatewara } 1017d1a890faSShreyas Bhatewara 1018cec05562SNeil Horman ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter); 1019d1a890faSShreyas Bhatewara if (ret >= 0) { 1020d1a890faSShreyas Bhatewara BUG_ON(ret <= 0 && ctx.copy_size != 0); 1021d1a890faSShreyas Bhatewara /* hdrs parsed, check against other limits */ 1022d1a890faSShreyas Bhatewara if (ctx.mss) { 1023d1a890faSShreyas Bhatewara if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size > 1024d1a890faSShreyas Bhatewara VMXNET3_MAX_TX_BUF_SIZE)) { 1025efc21d95SArnd Bergmann tq->stats.drop_oversized_hdr++; 1026efc21d95SArnd Bergmann goto drop_pkt; 1027d1a890faSShreyas Bhatewara } 1028d1a890faSShreyas Bhatewara } else { 1029d1a890faSShreyas Bhatewara if (skb->ip_summed == CHECKSUM_PARTIAL) { 1030d1a890faSShreyas Bhatewara if (unlikely(ctx.eth_ip_hdr_size + 1031d1a890faSShreyas Bhatewara skb->csum_offset > 1032d1a890faSShreyas Bhatewara VMXNET3_MAX_CSUM_OFFSET)) { 1033efc21d95SArnd Bergmann tq->stats.drop_oversized_hdr++; 1034efc21d95SArnd Bergmann goto drop_pkt; 1035d1a890faSShreyas Bhatewara } 1036d1a890faSShreyas Bhatewara } 1037d1a890faSShreyas Bhatewara } 1038d1a890faSShreyas Bhatewara } else { 1039d1a890faSShreyas Bhatewara tq->stats.drop_hdr_inspect_err++; 1040cec05562SNeil Horman goto drop_pkt; 1041d1a890faSShreyas Bhatewara } 1042d1a890faSShreyas Bhatewara 1043cec05562SNeil Horman spin_lock_irqsave(&tq->tx_lock, flags); 1044cec05562SNeil Horman 1045cec05562SNeil Horman if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) { 1046cec05562SNeil Horman tq->stats.tx_ring_full++; 1047cec05562SNeil Horman netdev_dbg(adapter->netdev, 1048cec05562SNeil Horman "tx queue stopped on %s, next2comp %u" 1049cec05562SNeil Horman " next2fill %u\n", adapter->netdev->name, 1050cec05562SNeil Horman tq->tx_ring.next2comp, tq->tx_ring.next2fill); 1051cec05562SNeil Horman 1052cec05562SNeil Horman vmxnet3_tq_stop(tq, adapter); 1053cec05562SNeil Horman spin_unlock_irqrestore(&tq->tx_lock, flags); 1054cec05562SNeil Horman return NETDEV_TX_BUSY; 1055cec05562SNeil Horman } 1056cec05562SNeil Horman 1057cec05562SNeil Horman 1058cec05562SNeil Horman vmxnet3_copy_hdr(skb, tq, &ctx, adapter); 1059cec05562SNeil Horman 1060d1a890faSShreyas Bhatewara /* fill tx descs related to addr & len */ 10615738a09dSAlexey Khoroshilov if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter)) 10625738a09dSAlexey Khoroshilov goto unlock_drop_pkt; 1063d1a890faSShreyas Bhatewara 1064d1a890faSShreyas Bhatewara /* setup the EOP desc */ 1065115924b6SShreyas Bhatewara ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP); 1066d1a890faSShreyas Bhatewara 1067d1a890faSShreyas Bhatewara /* setup the SOP desc */ 1068115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD 1069115924b6SShreyas Bhatewara gdesc = &tempTxDesc; 1070115924b6SShreyas Bhatewara gdesc->dword[2] = ctx.sop_txd->dword[2]; 1071115924b6SShreyas Bhatewara gdesc->dword[3] = ctx.sop_txd->dword[3]; 1072115924b6SShreyas Bhatewara #else 1073d1a890faSShreyas Bhatewara gdesc = ctx.sop_txd; 1074115924b6SShreyas Bhatewara #endif 1075d1a890faSShreyas Bhatewara if (ctx.mss) { 1076d1a890faSShreyas Bhatewara gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size; 1077d1a890faSShreyas Bhatewara gdesc->txd.om = VMXNET3_OM_TSO; 1078d1a890faSShreyas Bhatewara gdesc->txd.msscof = ctx.mss; 1079115924b6SShreyas Bhatewara le32_add_cpu(&tq->shared->txNumDeferred, (skb->len - 1080115924b6SShreyas Bhatewara gdesc->txd.hlen + ctx.mss - 1) / ctx.mss); 1081d1a890faSShreyas Bhatewara } else { 1082d1a890faSShreyas Bhatewara if (skb->ip_summed == CHECKSUM_PARTIAL) { 1083d1a890faSShreyas Bhatewara gdesc->txd.hlen = ctx.eth_ip_hdr_size; 1084d1a890faSShreyas Bhatewara gdesc->txd.om = VMXNET3_OM_CSUM; 1085d1a890faSShreyas Bhatewara gdesc->txd.msscof = ctx.eth_ip_hdr_size + 1086d1a890faSShreyas Bhatewara skb->csum_offset; 1087d1a890faSShreyas Bhatewara } else { 1088d1a890faSShreyas Bhatewara gdesc->txd.om = 0; 1089d1a890faSShreyas Bhatewara gdesc->txd.msscof = 0; 1090d1a890faSShreyas Bhatewara } 1091115924b6SShreyas Bhatewara le32_add_cpu(&tq->shared->txNumDeferred, 1); 1092d1a890faSShreyas Bhatewara } 1093d1a890faSShreyas Bhatewara 1094df8a39deSJiri Pirko if (skb_vlan_tag_present(skb)) { 1095d1a890faSShreyas Bhatewara gdesc->txd.ti = 1; 1096df8a39deSJiri Pirko gdesc->txd.tci = skb_vlan_tag_get(skb); 1097d1a890faSShreyas Bhatewara } 1098d1a890faSShreyas Bhatewara 1099115924b6SShreyas Bhatewara /* finally flips the GEN bit of the SOP desc. */ 1100115924b6SShreyas Bhatewara gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^ 1101115924b6SShreyas Bhatewara VMXNET3_TXD_GEN); 1102115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD 1103115924b6SShreyas Bhatewara /* Finished updating in bitfields of Tx Desc, so write them in original 1104115924b6SShreyas Bhatewara * place. 1105115924b6SShreyas Bhatewara */ 1106115924b6SShreyas Bhatewara vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc, 1107115924b6SShreyas Bhatewara (struct Vmxnet3_TxDesc *)ctx.sop_txd); 1108115924b6SShreyas Bhatewara gdesc = ctx.sop_txd; 1109115924b6SShreyas Bhatewara #endif 1110fdcd79b9SStephen Hemminger netdev_dbg(adapter->netdev, 1111f6965582SRandy Dunlap "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n", 1112c2fd03a0SJoe Perches (u32)(ctx.sop_txd - 1113115924b6SShreyas Bhatewara tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr), 1114115924b6SShreyas Bhatewara le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3])); 1115d1a890faSShreyas Bhatewara 1116d1a890faSShreyas Bhatewara spin_unlock_irqrestore(&tq->tx_lock, flags); 1117d1a890faSShreyas Bhatewara 1118115924b6SShreyas Bhatewara if (le32_to_cpu(tq->shared->txNumDeferred) >= 1119115924b6SShreyas Bhatewara le32_to_cpu(tq->shared->txThreshold)) { 1120d1a890faSShreyas Bhatewara tq->shared->txNumDeferred = 0; 112109c5088eSShreyas Bhatewara VMXNET3_WRITE_BAR0_REG(adapter, 112209c5088eSShreyas Bhatewara VMXNET3_REG_TXPROD + tq->qid * 8, 1123d1a890faSShreyas Bhatewara tq->tx_ring.next2fill); 1124d1a890faSShreyas Bhatewara } 1125d1a890faSShreyas Bhatewara 1126d1a890faSShreyas Bhatewara return NETDEV_TX_OK; 1127d1a890faSShreyas Bhatewara 1128f955e141SDan Carpenter unlock_drop_pkt: 1129f955e141SDan Carpenter spin_unlock_irqrestore(&tq->tx_lock, flags); 1130d1a890faSShreyas Bhatewara drop_pkt: 1131d1a890faSShreyas Bhatewara tq->stats.drop_total++; 1132b1b71817SEric W. Biederman dev_kfree_skb_any(skb); 1133d1a890faSShreyas Bhatewara return NETDEV_TX_OK; 1134d1a890faSShreyas Bhatewara } 1135d1a890faSShreyas Bhatewara 1136d1a890faSShreyas Bhatewara 1137d1a890faSShreyas Bhatewara static netdev_tx_t 1138d1a890faSShreyas Bhatewara vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 1139d1a890faSShreyas Bhatewara { 1140d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1141d1a890faSShreyas Bhatewara 114209c5088eSShreyas Bhatewara BUG_ON(skb->queue_mapping > adapter->num_tx_queues); 114309c5088eSShreyas Bhatewara return vmxnet3_tq_xmit(skb, 114409c5088eSShreyas Bhatewara &adapter->tx_queue[skb->queue_mapping], 114509c5088eSShreyas Bhatewara adapter, netdev); 1146d1a890faSShreyas Bhatewara } 1147d1a890faSShreyas Bhatewara 1148d1a890faSShreyas Bhatewara 1149d1a890faSShreyas Bhatewara static void 1150d1a890faSShreyas Bhatewara vmxnet3_rx_csum(struct vmxnet3_adapter *adapter, 1151d1a890faSShreyas Bhatewara struct sk_buff *skb, 1152d1a890faSShreyas Bhatewara union Vmxnet3_GenericDesc *gdesc) 1153d1a890faSShreyas Bhatewara { 1154a0d2730cSMichał Mirosław if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) { 1155d1a890faSShreyas Bhatewara /* typical case: TCP/UDP over IP and both csums are correct */ 1156115924b6SShreyas Bhatewara if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) == 1157d1a890faSShreyas Bhatewara VMXNET3_RCD_CSUM_OK) { 1158d1a890faSShreyas Bhatewara skb->ip_summed = CHECKSUM_UNNECESSARY; 1159d1a890faSShreyas Bhatewara BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); 1160d1a890faSShreyas Bhatewara BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6)); 1161d1a890faSShreyas Bhatewara BUG_ON(gdesc->rcd.frg); 1162d1a890faSShreyas Bhatewara } else { 1163d1a890faSShreyas Bhatewara if (gdesc->rcd.csum) { 1164d1a890faSShreyas Bhatewara skb->csum = htons(gdesc->rcd.csum); 1165d1a890faSShreyas Bhatewara skb->ip_summed = CHECKSUM_PARTIAL; 1166d1a890faSShreyas Bhatewara } else { 1167bc8acf2cSEric Dumazet skb_checksum_none_assert(skb); 1168d1a890faSShreyas Bhatewara } 1169d1a890faSShreyas Bhatewara } 1170d1a890faSShreyas Bhatewara } else { 1171bc8acf2cSEric Dumazet skb_checksum_none_assert(skb); 1172d1a890faSShreyas Bhatewara } 1173d1a890faSShreyas Bhatewara } 1174d1a890faSShreyas Bhatewara 1175d1a890faSShreyas Bhatewara 1176d1a890faSShreyas Bhatewara static void 1177d1a890faSShreyas Bhatewara vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd, 1178d1a890faSShreyas Bhatewara struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter) 1179d1a890faSShreyas Bhatewara { 1180d1a890faSShreyas Bhatewara rq->stats.drop_err++; 1181d1a890faSShreyas Bhatewara if (!rcd->fcs) 1182d1a890faSShreyas Bhatewara rq->stats.drop_fcs++; 1183d1a890faSShreyas Bhatewara 1184d1a890faSShreyas Bhatewara rq->stats.drop_total++; 1185d1a890faSShreyas Bhatewara 1186d1a890faSShreyas Bhatewara /* 1187d1a890faSShreyas Bhatewara * We do not unmap and chain the rx buffer to the skb. 1188d1a890faSShreyas Bhatewara * We basically pretend this buffer is not used and will be recycled 1189d1a890faSShreyas Bhatewara * by vmxnet3_rq_alloc_rx_buf() 1190d1a890faSShreyas Bhatewara */ 1191d1a890faSShreyas Bhatewara 1192d1a890faSShreyas Bhatewara /* 1193d1a890faSShreyas Bhatewara * ctx->skb may be NULL if this is the first and the only one 1194d1a890faSShreyas Bhatewara * desc for the pkt 1195d1a890faSShreyas Bhatewara */ 1196d1a890faSShreyas Bhatewara if (ctx->skb) 1197d1a890faSShreyas Bhatewara dev_kfree_skb_irq(ctx->skb); 1198d1a890faSShreyas Bhatewara 1199d1a890faSShreyas Bhatewara ctx->skb = NULL; 1200d1a890faSShreyas Bhatewara } 1201d1a890faSShreyas Bhatewara 1202d1a890faSShreyas Bhatewara 120345dac1d6SShreyas Bhatewara static u32 120445dac1d6SShreyas Bhatewara vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb, 120545dac1d6SShreyas Bhatewara union Vmxnet3_GenericDesc *gdesc) 120645dac1d6SShreyas Bhatewara { 120745dac1d6SShreyas Bhatewara u32 hlen, maplen; 120845dac1d6SShreyas Bhatewara union { 120945dac1d6SShreyas Bhatewara void *ptr; 121045dac1d6SShreyas Bhatewara struct ethhdr *eth; 121145dac1d6SShreyas Bhatewara struct iphdr *ipv4; 121245dac1d6SShreyas Bhatewara struct ipv6hdr *ipv6; 121345dac1d6SShreyas Bhatewara struct tcphdr *tcp; 121445dac1d6SShreyas Bhatewara } hdr; 121545dac1d6SShreyas Bhatewara BUG_ON(gdesc->rcd.tcp == 0); 121645dac1d6SShreyas Bhatewara 121745dac1d6SShreyas Bhatewara maplen = skb_headlen(skb); 121845dac1d6SShreyas Bhatewara if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen)) 121945dac1d6SShreyas Bhatewara return 0; 122045dac1d6SShreyas Bhatewara 122145dac1d6SShreyas Bhatewara hdr.eth = eth_hdr(skb); 122245dac1d6SShreyas Bhatewara if (gdesc->rcd.v4) { 122345dac1d6SShreyas Bhatewara BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP)); 122445dac1d6SShreyas Bhatewara hdr.ptr += sizeof(struct ethhdr); 122545dac1d6SShreyas Bhatewara BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP); 122645dac1d6SShreyas Bhatewara hlen = hdr.ipv4->ihl << 2; 122745dac1d6SShreyas Bhatewara hdr.ptr += hdr.ipv4->ihl << 2; 122845dac1d6SShreyas Bhatewara } else if (gdesc->rcd.v6) { 122945dac1d6SShreyas Bhatewara BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6)); 123045dac1d6SShreyas Bhatewara hdr.ptr += sizeof(struct ethhdr); 123145dac1d6SShreyas Bhatewara /* Use an estimated value, since we also need to handle 123245dac1d6SShreyas Bhatewara * TSO case. 123345dac1d6SShreyas Bhatewara */ 123445dac1d6SShreyas Bhatewara if (hdr.ipv6->nexthdr != IPPROTO_TCP) 123545dac1d6SShreyas Bhatewara return sizeof(struct ipv6hdr) + sizeof(struct tcphdr); 123645dac1d6SShreyas Bhatewara hlen = sizeof(struct ipv6hdr); 123745dac1d6SShreyas Bhatewara hdr.ptr += sizeof(struct ipv6hdr); 123845dac1d6SShreyas Bhatewara } else { 123945dac1d6SShreyas Bhatewara /* Non-IP pkt, dont estimate header length */ 124045dac1d6SShreyas Bhatewara return 0; 124145dac1d6SShreyas Bhatewara } 124245dac1d6SShreyas Bhatewara 124345dac1d6SShreyas Bhatewara if (hlen + sizeof(struct tcphdr) > maplen) 124445dac1d6SShreyas Bhatewara return 0; 124545dac1d6SShreyas Bhatewara 124645dac1d6SShreyas Bhatewara return (hlen + (hdr.tcp->doff << 2)); 124745dac1d6SShreyas Bhatewara } 124845dac1d6SShreyas Bhatewara 1249d1a890faSShreyas Bhatewara static int 1250d1a890faSShreyas Bhatewara vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, 1251d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter, int quota) 1252d1a890faSShreyas Bhatewara { 1253215faf9cSJoe Perches static const u32 rxprod_reg[2] = { 1254215faf9cSJoe Perches VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2 1255215faf9cSJoe Perches }; 12560769636cSNeil Horman u32 num_pkts = 0; 12575318d809SShreyas Bhatewara bool skip_page_frags = false; 1258d1a890faSShreyas Bhatewara struct Vmxnet3_RxCompDesc *rcd; 1259d1a890faSShreyas Bhatewara struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; 126045dac1d6SShreyas Bhatewara u16 segCnt = 0, mss = 0; 1261115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD 1262115924b6SShreyas Bhatewara struct Vmxnet3_RxDesc rxCmdDesc; 1263115924b6SShreyas Bhatewara struct Vmxnet3_RxCompDesc rxComp; 1264115924b6SShreyas Bhatewara #endif 1265115924b6SShreyas Bhatewara vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, 1266115924b6SShreyas Bhatewara &rxComp); 1267d1a890faSShreyas Bhatewara while (rcd->gen == rq->comp_ring.gen) { 1268d1a890faSShreyas Bhatewara struct vmxnet3_rx_buf_info *rbi; 12695318d809SShreyas Bhatewara struct sk_buff *skb, *new_skb = NULL; 12705318d809SShreyas Bhatewara struct page *new_page = NULL; 12715738a09dSAlexey Khoroshilov dma_addr_t new_dma_addr; 1272d1a890faSShreyas Bhatewara int num_to_alloc; 1273d1a890faSShreyas Bhatewara struct Vmxnet3_RxDesc *rxd; 1274d1a890faSShreyas Bhatewara u32 idx, ring_idx; 12755318d809SShreyas Bhatewara struct vmxnet3_cmd_ring *ring = NULL; 12760769636cSNeil Horman if (num_pkts >= quota) { 1277d1a890faSShreyas Bhatewara /* we may stop even before we see the EOP desc of 1278d1a890faSShreyas Bhatewara * the current pkt 1279d1a890faSShreyas Bhatewara */ 1280d1a890faSShreyas Bhatewara break; 1281d1a890faSShreyas Bhatewara } 128209c5088eSShreyas Bhatewara BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2); 1283d1a890faSShreyas Bhatewara idx = rcd->rxdIdx; 128409c5088eSShreyas Bhatewara ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1; 12855318d809SShreyas Bhatewara ring = rq->rx_ring + ring_idx; 1286115924b6SShreyas Bhatewara vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd, 1287115924b6SShreyas Bhatewara &rxCmdDesc); 1288d1a890faSShreyas Bhatewara rbi = rq->buf_info[ring_idx] + idx; 1289d1a890faSShreyas Bhatewara 1290115924b6SShreyas Bhatewara BUG_ON(rxd->addr != rbi->dma_addr || 1291115924b6SShreyas Bhatewara rxd->len != rbi->len); 1292d1a890faSShreyas Bhatewara 1293d1a890faSShreyas Bhatewara if (unlikely(rcd->eop && rcd->err)) { 1294d1a890faSShreyas Bhatewara vmxnet3_rx_error(rq, rcd, ctx, adapter); 1295d1a890faSShreyas Bhatewara goto rcd_done; 1296d1a890faSShreyas Bhatewara } 1297d1a890faSShreyas Bhatewara 1298d1a890faSShreyas Bhatewara if (rcd->sop) { /* first buf of the pkt */ 1299d1a890faSShreyas Bhatewara BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD || 1300d1a890faSShreyas Bhatewara rcd->rqID != rq->qid); 1301d1a890faSShreyas Bhatewara 1302d1a890faSShreyas Bhatewara BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB); 1303d1a890faSShreyas Bhatewara BUG_ON(ctx->skb != NULL || rbi->skb == NULL); 1304d1a890faSShreyas Bhatewara 1305d1a890faSShreyas Bhatewara if (unlikely(rcd->len == 0)) { 1306d1a890faSShreyas Bhatewara /* Pretend the rx buffer is skipped. */ 1307d1a890faSShreyas Bhatewara BUG_ON(!(rcd->sop && rcd->eop)); 1308fdcd79b9SStephen Hemminger netdev_dbg(adapter->netdev, 1309f6965582SRandy Dunlap "rxRing[%u][%u] 0 length\n", 1310d1a890faSShreyas Bhatewara ring_idx, idx); 1311d1a890faSShreyas Bhatewara goto rcd_done; 1312d1a890faSShreyas Bhatewara } 1313d1a890faSShreyas Bhatewara 13145318d809SShreyas Bhatewara skip_page_frags = false; 1315d1a890faSShreyas Bhatewara ctx->skb = rbi->skb; 13160d735f13SStephen Hemminger new_skb = netdev_alloc_skb_ip_align(adapter->netdev, 13170d735f13SStephen Hemminger rbi->len); 13185318d809SShreyas Bhatewara if (new_skb == NULL) { 13195318d809SShreyas Bhatewara /* Skb allocation failed, do not handover this 13205318d809SShreyas Bhatewara * skb to stack. Reuse it. Drop the existing pkt 13215318d809SShreyas Bhatewara */ 13225318d809SShreyas Bhatewara rq->stats.rx_buf_alloc_failure++; 13235318d809SShreyas Bhatewara ctx->skb = NULL; 13245318d809SShreyas Bhatewara rq->stats.drop_total++; 13255318d809SShreyas Bhatewara skip_page_frags = true; 13265318d809SShreyas Bhatewara goto rcd_done; 13275318d809SShreyas Bhatewara } 13285738a09dSAlexey Khoroshilov new_dma_addr = dma_map_single(&adapter->pdev->dev, 13295738a09dSAlexey Khoroshilov new_skb->data, rbi->len, 13305738a09dSAlexey Khoroshilov PCI_DMA_FROMDEVICE); 13315738a09dSAlexey Khoroshilov if (dma_mapping_error(&adapter->pdev->dev, 13325738a09dSAlexey Khoroshilov new_dma_addr)) { 13335738a09dSAlexey Khoroshilov dev_kfree_skb(new_skb); 13345738a09dSAlexey Khoroshilov /* Skb allocation failed, do not handover this 13355738a09dSAlexey Khoroshilov * skb to stack. Reuse it. Drop the existing pkt 13365738a09dSAlexey Khoroshilov */ 13375738a09dSAlexey Khoroshilov rq->stats.rx_buf_alloc_failure++; 13385738a09dSAlexey Khoroshilov ctx->skb = NULL; 13395738a09dSAlexey Khoroshilov rq->stats.drop_total++; 13405738a09dSAlexey Khoroshilov skip_page_frags = true; 13415738a09dSAlexey Khoroshilov goto rcd_done; 13425738a09dSAlexey Khoroshilov } 1343d1a890faSShreyas Bhatewara 1344b0eb57cbSAndy King dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr, 1345b0eb57cbSAndy King rbi->len, 1346d1a890faSShreyas Bhatewara PCI_DMA_FROMDEVICE); 1347d1a890faSShreyas Bhatewara 13487db11f75SStephen Hemminger #ifdef VMXNET3_RSS 13497db11f75SStephen Hemminger if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE && 13507db11f75SStephen Hemminger (adapter->netdev->features & NETIF_F_RXHASH)) 13512c15a154SMichal Schmidt skb_set_hash(ctx->skb, 13522c15a154SMichal Schmidt le32_to_cpu(rcd->rssHash), 13530b680703STom Herbert PKT_HASH_TYPE_L3); 13547db11f75SStephen Hemminger #endif 1355d1a890faSShreyas Bhatewara skb_put(ctx->skb, rcd->len); 13565318d809SShreyas Bhatewara 13575318d809SShreyas Bhatewara /* Immediate refill */ 13585318d809SShreyas Bhatewara rbi->skb = new_skb; 13595738a09dSAlexey Khoroshilov rbi->dma_addr = new_dma_addr; 13605318d809SShreyas Bhatewara rxd->addr = cpu_to_le64(rbi->dma_addr); 13615318d809SShreyas Bhatewara rxd->len = rbi->len; 136245dac1d6SShreyas Bhatewara if (adapter->version == 2 && 136345dac1d6SShreyas Bhatewara rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) { 136445dac1d6SShreyas Bhatewara struct Vmxnet3_RxCompDescExt *rcdlro; 136545dac1d6SShreyas Bhatewara rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd; 13665318d809SShreyas Bhatewara 136745dac1d6SShreyas Bhatewara segCnt = rcdlro->segCnt; 136845dac1d6SShreyas Bhatewara BUG_ON(segCnt <= 1); 136945dac1d6SShreyas Bhatewara mss = rcdlro->mss; 137045dac1d6SShreyas Bhatewara if (unlikely(segCnt <= 1)) 137145dac1d6SShreyas Bhatewara segCnt = 0; 137245dac1d6SShreyas Bhatewara } else { 137345dac1d6SShreyas Bhatewara segCnt = 0; 137445dac1d6SShreyas Bhatewara } 1375d1a890faSShreyas Bhatewara } else { 13765318d809SShreyas Bhatewara BUG_ON(ctx->skb == NULL && !skip_page_frags); 13775318d809SShreyas Bhatewara 1378d1a890faSShreyas Bhatewara /* non SOP buffer must be type 1 in most cases */ 13795318d809SShreyas Bhatewara BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE); 1380d1a890faSShreyas Bhatewara BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY); 1381d1a890faSShreyas Bhatewara 13825318d809SShreyas Bhatewara /* If an sop buffer was dropped, skip all 13835318d809SShreyas Bhatewara * following non-sop fragments. They will be reused. 13845318d809SShreyas Bhatewara */ 13855318d809SShreyas Bhatewara if (skip_page_frags) 13865318d809SShreyas Bhatewara goto rcd_done; 13875318d809SShreyas Bhatewara 1388c41fcce9SShreyas Bhatewara if (rcd->len) { 13895318d809SShreyas Bhatewara new_page = alloc_page(GFP_ATOMIC); 13905318d809SShreyas Bhatewara /* Replacement page frag could not be allocated. 13915318d809SShreyas Bhatewara * Reuse this page. Drop the pkt and free the 13925318d809SShreyas Bhatewara * skb which contained this page as a frag. Skip 13935318d809SShreyas Bhatewara * processing all the following non-sop frags. 13945318d809SShreyas Bhatewara */ 1395c41fcce9SShreyas Bhatewara if (unlikely(!new_page)) { 13965318d809SShreyas Bhatewara rq->stats.rx_buf_alloc_failure++; 13975318d809SShreyas Bhatewara dev_kfree_skb(ctx->skb); 13985318d809SShreyas Bhatewara ctx->skb = NULL; 13995318d809SShreyas Bhatewara skip_page_frags = true; 14005318d809SShreyas Bhatewara goto rcd_done; 14015318d809SShreyas Bhatewara } 140258caf637SShrikrishna Khare new_dma_addr = dma_map_page(&adapter->pdev->dev, 140358caf637SShrikrishna Khare new_page, 14045738a09dSAlexey Khoroshilov 0, PAGE_SIZE, 14055738a09dSAlexey Khoroshilov PCI_DMA_FROMDEVICE); 14065738a09dSAlexey Khoroshilov if (dma_mapping_error(&adapter->pdev->dev, 14075738a09dSAlexey Khoroshilov new_dma_addr)) { 14085738a09dSAlexey Khoroshilov put_page(new_page); 14095738a09dSAlexey Khoroshilov rq->stats.rx_buf_alloc_failure++; 14105738a09dSAlexey Khoroshilov dev_kfree_skb(ctx->skb); 14115738a09dSAlexey Khoroshilov ctx->skb = NULL; 14125738a09dSAlexey Khoroshilov skip_page_frags = true; 14135738a09dSAlexey Khoroshilov goto rcd_done; 14145738a09dSAlexey Khoroshilov } 14155318d809SShreyas Bhatewara 1416b0eb57cbSAndy King dma_unmap_page(&adapter->pdev->dev, 1417d1a890faSShreyas Bhatewara rbi->dma_addr, rbi->len, 1418d1a890faSShreyas Bhatewara PCI_DMA_FROMDEVICE); 1419d1a890faSShreyas Bhatewara 1420d1a890faSShreyas Bhatewara vmxnet3_append_frag(ctx->skb, rcd, rbi); 14215318d809SShreyas Bhatewara 14225318d809SShreyas Bhatewara /* Immediate refill */ 14235318d809SShreyas Bhatewara rbi->page = new_page; 14245738a09dSAlexey Khoroshilov rbi->dma_addr = new_dma_addr; 14255318d809SShreyas Bhatewara rxd->addr = cpu_to_le64(rbi->dma_addr); 14265318d809SShreyas Bhatewara rxd->len = rbi->len; 1427d1a890faSShreyas Bhatewara } 1428c41fcce9SShreyas Bhatewara } 14295318d809SShreyas Bhatewara 1430d1a890faSShreyas Bhatewara 1431d1a890faSShreyas Bhatewara skb = ctx->skb; 1432d1a890faSShreyas Bhatewara if (rcd->eop) { 143345dac1d6SShreyas Bhatewara u32 mtu = adapter->netdev->mtu; 1434d1a890faSShreyas Bhatewara skb->len += skb->data_len; 1435d1a890faSShreyas Bhatewara 1436d1a890faSShreyas Bhatewara vmxnet3_rx_csum(adapter, skb, 1437d1a890faSShreyas Bhatewara (union Vmxnet3_GenericDesc *)rcd); 1438d1a890faSShreyas Bhatewara skb->protocol = eth_type_trans(skb, adapter->netdev); 143945dac1d6SShreyas Bhatewara if (!rcd->tcp || !adapter->lro) 144045dac1d6SShreyas Bhatewara goto not_lro; 1441d1a890faSShreyas Bhatewara 144245dac1d6SShreyas Bhatewara if (segCnt != 0 && mss != 0) { 144345dac1d6SShreyas Bhatewara skb_shinfo(skb)->gso_type = rcd->v4 ? 144445dac1d6SShreyas Bhatewara SKB_GSO_TCPV4 : SKB_GSO_TCPV6; 144545dac1d6SShreyas Bhatewara skb_shinfo(skb)->gso_size = mss; 144645dac1d6SShreyas Bhatewara skb_shinfo(skb)->gso_segs = segCnt; 144745dac1d6SShreyas Bhatewara } else if (segCnt != 0 || skb->len > mtu) { 144845dac1d6SShreyas Bhatewara u32 hlen; 144945dac1d6SShreyas Bhatewara 145045dac1d6SShreyas Bhatewara hlen = vmxnet3_get_hdr_len(adapter, skb, 145145dac1d6SShreyas Bhatewara (union Vmxnet3_GenericDesc *)rcd); 145245dac1d6SShreyas Bhatewara if (hlen == 0) 145345dac1d6SShreyas Bhatewara goto not_lro; 145445dac1d6SShreyas Bhatewara 145545dac1d6SShreyas Bhatewara skb_shinfo(skb)->gso_type = 145645dac1d6SShreyas Bhatewara rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; 145745dac1d6SShreyas Bhatewara if (segCnt != 0) { 145845dac1d6SShreyas Bhatewara skb_shinfo(skb)->gso_segs = segCnt; 145945dac1d6SShreyas Bhatewara skb_shinfo(skb)->gso_size = 146045dac1d6SShreyas Bhatewara DIV_ROUND_UP(skb->len - 146145dac1d6SShreyas Bhatewara hlen, segCnt); 146245dac1d6SShreyas Bhatewara } else { 146345dac1d6SShreyas Bhatewara skb_shinfo(skb)->gso_size = mtu - hlen; 146445dac1d6SShreyas Bhatewara } 146545dac1d6SShreyas Bhatewara } 146645dac1d6SShreyas Bhatewara not_lro: 146772e85c45SJesse Gross if (unlikely(rcd->ts)) 146886a9bad3SPatrick McHardy __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci); 146972e85c45SJesse Gross 1470213ade8cSJesse Gross if (adapter->netdev->features & NETIF_F_LRO) 1471d1a890faSShreyas Bhatewara netif_receive_skb(skb); 1472213ade8cSJesse Gross else 1473213ade8cSJesse Gross napi_gro_receive(&rq->napi, skb); 1474d1a890faSShreyas Bhatewara 1475d1a890faSShreyas Bhatewara ctx->skb = NULL; 14760769636cSNeil Horman num_pkts++; 1477d1a890faSShreyas Bhatewara } 1478d1a890faSShreyas Bhatewara 1479d1a890faSShreyas Bhatewara rcd_done: 14805318d809SShreyas Bhatewara /* device may have skipped some rx descs */ 14815318d809SShreyas Bhatewara ring->next2comp = idx; 14825318d809SShreyas Bhatewara num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring); 14835318d809SShreyas Bhatewara ring = rq->rx_ring + ring_idx; 14845318d809SShreyas Bhatewara while (num_to_alloc) { 14855318d809SShreyas Bhatewara vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd, 14865318d809SShreyas Bhatewara &rxCmdDesc); 14875318d809SShreyas Bhatewara BUG_ON(!rxd->addr); 1488d1a890faSShreyas Bhatewara 14895318d809SShreyas Bhatewara /* Recv desc is ready to be used by the device */ 14905318d809SShreyas Bhatewara rxd->gen = ring->gen; 14915318d809SShreyas Bhatewara vmxnet3_cmd_ring_adv_next2fill(ring); 14925318d809SShreyas Bhatewara num_to_alloc--; 14935318d809SShreyas Bhatewara } 1494d1a890faSShreyas Bhatewara 1495d1a890faSShreyas Bhatewara /* if needed, update the register */ 1496d1a890faSShreyas Bhatewara if (unlikely(rq->shared->updateRxProd)) { 1497d1a890faSShreyas Bhatewara VMXNET3_WRITE_BAR0_REG(adapter, 1498d1a890faSShreyas Bhatewara rxprod_reg[ring_idx] + rq->qid * 8, 14995318d809SShreyas Bhatewara ring->next2fill); 1500d1a890faSShreyas Bhatewara } 1501d1a890faSShreyas Bhatewara 1502d1a890faSShreyas Bhatewara vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); 1503115924b6SShreyas Bhatewara vmxnet3_getRxComp(rcd, 1504115924b6SShreyas Bhatewara &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp); 1505d1a890faSShreyas Bhatewara } 1506d1a890faSShreyas Bhatewara 15070769636cSNeil Horman return num_pkts; 1508d1a890faSShreyas Bhatewara } 1509d1a890faSShreyas Bhatewara 1510d1a890faSShreyas Bhatewara 1511d1a890faSShreyas Bhatewara static void 1512d1a890faSShreyas Bhatewara vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, 1513d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter) 1514d1a890faSShreyas Bhatewara { 1515d1a890faSShreyas Bhatewara u32 i, ring_idx; 1516d1a890faSShreyas Bhatewara struct Vmxnet3_RxDesc *rxd; 1517d1a890faSShreyas Bhatewara 1518d1a890faSShreyas Bhatewara for (ring_idx = 0; ring_idx < 2; ring_idx++) { 1519d1a890faSShreyas Bhatewara for (i = 0; i < rq->rx_ring[ring_idx].size; i++) { 1520115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD 1521115924b6SShreyas Bhatewara struct Vmxnet3_RxDesc rxDesc; 1522115924b6SShreyas Bhatewara #endif 1523115924b6SShreyas Bhatewara vmxnet3_getRxDesc(rxd, 1524115924b6SShreyas Bhatewara &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc); 1525d1a890faSShreyas Bhatewara 1526d1a890faSShreyas Bhatewara if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && 1527d1a890faSShreyas Bhatewara rq->buf_info[ring_idx][i].skb) { 1528b0eb57cbSAndy King dma_unmap_single(&adapter->pdev->dev, rxd->addr, 1529d1a890faSShreyas Bhatewara rxd->len, PCI_DMA_FROMDEVICE); 1530d1a890faSShreyas Bhatewara dev_kfree_skb(rq->buf_info[ring_idx][i].skb); 1531d1a890faSShreyas Bhatewara rq->buf_info[ring_idx][i].skb = NULL; 1532d1a890faSShreyas Bhatewara } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY && 1533d1a890faSShreyas Bhatewara rq->buf_info[ring_idx][i].page) { 1534b0eb57cbSAndy King dma_unmap_page(&adapter->pdev->dev, rxd->addr, 1535d1a890faSShreyas Bhatewara rxd->len, PCI_DMA_FROMDEVICE); 1536d1a890faSShreyas Bhatewara put_page(rq->buf_info[ring_idx][i].page); 1537d1a890faSShreyas Bhatewara rq->buf_info[ring_idx][i].page = NULL; 1538d1a890faSShreyas Bhatewara } 1539d1a890faSShreyas Bhatewara } 1540d1a890faSShreyas Bhatewara 1541d1a890faSShreyas Bhatewara rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN; 1542d1a890faSShreyas Bhatewara rq->rx_ring[ring_idx].next2fill = 1543d1a890faSShreyas Bhatewara rq->rx_ring[ring_idx].next2comp = 0; 1544d1a890faSShreyas Bhatewara } 1545d1a890faSShreyas Bhatewara 1546d1a890faSShreyas Bhatewara rq->comp_ring.gen = VMXNET3_INIT_GEN; 1547d1a890faSShreyas Bhatewara rq->comp_ring.next2proc = 0; 1548d1a890faSShreyas Bhatewara } 1549d1a890faSShreyas Bhatewara 1550d1a890faSShreyas Bhatewara 155109c5088eSShreyas Bhatewara static void 155209c5088eSShreyas Bhatewara vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter) 155309c5088eSShreyas Bhatewara { 155409c5088eSShreyas Bhatewara int i; 155509c5088eSShreyas Bhatewara 155609c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_rx_queues; i++) 155709c5088eSShreyas Bhatewara vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter); 155809c5088eSShreyas Bhatewara } 155909c5088eSShreyas Bhatewara 156009c5088eSShreyas Bhatewara 1561280b74f7Sstephen hemminger static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, 1562d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter) 1563d1a890faSShreyas Bhatewara { 1564d1a890faSShreyas Bhatewara int i; 1565d1a890faSShreyas Bhatewara int j; 1566d1a890faSShreyas Bhatewara 1567d1a890faSShreyas Bhatewara /* all rx buffers must have already been freed */ 1568d1a890faSShreyas Bhatewara for (i = 0; i < 2; i++) { 1569d1a890faSShreyas Bhatewara if (rq->buf_info[i]) { 1570d1a890faSShreyas Bhatewara for (j = 0; j < rq->rx_ring[i].size; j++) 1571d1a890faSShreyas Bhatewara BUG_ON(rq->buf_info[i][j].page != NULL); 1572d1a890faSShreyas Bhatewara } 1573d1a890faSShreyas Bhatewara } 1574d1a890faSShreyas Bhatewara 1575d1a890faSShreyas Bhatewara 1576d1a890faSShreyas Bhatewara for (i = 0; i < 2; i++) { 1577d1a890faSShreyas Bhatewara if (rq->rx_ring[i].base) { 1578b0eb57cbSAndy King dma_free_coherent(&adapter->pdev->dev, 1579b0eb57cbSAndy King rq->rx_ring[i].size 1580d1a890faSShreyas Bhatewara * sizeof(struct Vmxnet3_RxDesc), 1581d1a890faSShreyas Bhatewara rq->rx_ring[i].base, 1582d1a890faSShreyas Bhatewara rq->rx_ring[i].basePA); 1583d1a890faSShreyas Bhatewara rq->rx_ring[i].base = NULL; 1584d1a890faSShreyas Bhatewara } 1585d1a890faSShreyas Bhatewara rq->buf_info[i] = NULL; 1586d1a890faSShreyas Bhatewara } 1587d1a890faSShreyas Bhatewara 1588d1a890faSShreyas Bhatewara if (rq->comp_ring.base) { 1589b0eb57cbSAndy King dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size 1590b0eb57cbSAndy King * sizeof(struct Vmxnet3_RxCompDesc), 1591d1a890faSShreyas Bhatewara rq->comp_ring.base, rq->comp_ring.basePA); 1592d1a890faSShreyas Bhatewara rq->comp_ring.base = NULL; 1593d1a890faSShreyas Bhatewara } 1594b0eb57cbSAndy King 1595b0eb57cbSAndy King if (rq->buf_info[0]) { 1596b0eb57cbSAndy King size_t sz = sizeof(struct vmxnet3_rx_buf_info) * 1597b0eb57cbSAndy King (rq->rx_ring[0].size + rq->rx_ring[1].size); 1598b0eb57cbSAndy King dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0], 1599b0eb57cbSAndy King rq->buf_info_pa); 1600b0eb57cbSAndy King } 1601d1a890faSShreyas Bhatewara } 1602d1a890faSShreyas Bhatewara 1603d1a890faSShreyas Bhatewara 1604d1a890faSShreyas Bhatewara static int 1605d1a890faSShreyas Bhatewara vmxnet3_rq_init(struct vmxnet3_rx_queue *rq, 1606d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter) 1607d1a890faSShreyas Bhatewara { 1608d1a890faSShreyas Bhatewara int i; 1609d1a890faSShreyas Bhatewara 1610d1a890faSShreyas Bhatewara /* initialize buf_info */ 1611d1a890faSShreyas Bhatewara for (i = 0; i < rq->rx_ring[0].size; i++) { 1612d1a890faSShreyas Bhatewara 1613d1a890faSShreyas Bhatewara /* 1st buf for a pkt is skbuff */ 1614d1a890faSShreyas Bhatewara if (i % adapter->rx_buf_per_pkt == 0) { 1615d1a890faSShreyas Bhatewara rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB; 1616d1a890faSShreyas Bhatewara rq->buf_info[0][i].len = adapter->skb_buf_size; 1617d1a890faSShreyas Bhatewara } else { /* subsequent bufs for a pkt is frag */ 1618d1a890faSShreyas Bhatewara rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE; 1619d1a890faSShreyas Bhatewara rq->buf_info[0][i].len = PAGE_SIZE; 1620d1a890faSShreyas Bhatewara } 1621d1a890faSShreyas Bhatewara } 1622d1a890faSShreyas Bhatewara for (i = 0; i < rq->rx_ring[1].size; i++) { 1623d1a890faSShreyas Bhatewara rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE; 1624d1a890faSShreyas Bhatewara rq->buf_info[1][i].len = PAGE_SIZE; 1625d1a890faSShreyas Bhatewara } 1626d1a890faSShreyas Bhatewara 1627d1a890faSShreyas Bhatewara /* reset internal state and allocate buffers for both rings */ 1628d1a890faSShreyas Bhatewara for (i = 0; i < 2; i++) { 1629d1a890faSShreyas Bhatewara rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0; 1630d1a890faSShreyas Bhatewara 1631d1a890faSShreyas Bhatewara memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size * 1632d1a890faSShreyas Bhatewara sizeof(struct Vmxnet3_RxDesc)); 1633d1a890faSShreyas Bhatewara rq->rx_ring[i].gen = VMXNET3_INIT_GEN; 1634d1a890faSShreyas Bhatewara } 1635d1a890faSShreyas Bhatewara if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1, 1636d1a890faSShreyas Bhatewara adapter) == 0) { 1637d1a890faSShreyas Bhatewara /* at least has 1 rx buffer for the 1st ring */ 1638d1a890faSShreyas Bhatewara return -ENOMEM; 1639d1a890faSShreyas Bhatewara } 1640d1a890faSShreyas Bhatewara vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter); 1641d1a890faSShreyas Bhatewara 1642d1a890faSShreyas Bhatewara /* reset the comp ring */ 1643d1a890faSShreyas Bhatewara rq->comp_ring.next2proc = 0; 1644d1a890faSShreyas Bhatewara memset(rq->comp_ring.base, 0, rq->comp_ring.size * 1645d1a890faSShreyas Bhatewara sizeof(struct Vmxnet3_RxCompDesc)); 1646d1a890faSShreyas Bhatewara rq->comp_ring.gen = VMXNET3_INIT_GEN; 1647d1a890faSShreyas Bhatewara 1648d1a890faSShreyas Bhatewara /* reset rxctx */ 1649d1a890faSShreyas Bhatewara rq->rx_ctx.skb = NULL; 1650d1a890faSShreyas Bhatewara 1651d1a890faSShreyas Bhatewara /* stats are not reset */ 1652d1a890faSShreyas Bhatewara return 0; 1653d1a890faSShreyas Bhatewara } 1654d1a890faSShreyas Bhatewara 1655d1a890faSShreyas Bhatewara 1656d1a890faSShreyas Bhatewara static int 165709c5088eSShreyas Bhatewara vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter) 165809c5088eSShreyas Bhatewara { 165909c5088eSShreyas Bhatewara int i, err = 0; 166009c5088eSShreyas Bhatewara 166109c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_rx_queues; i++) { 166209c5088eSShreyas Bhatewara err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter); 166309c5088eSShreyas Bhatewara if (unlikely(err)) { 166409c5088eSShreyas Bhatewara dev_err(&adapter->netdev->dev, "%s: failed to " 166509c5088eSShreyas Bhatewara "initialize rx queue%i\n", 166609c5088eSShreyas Bhatewara adapter->netdev->name, i); 166709c5088eSShreyas Bhatewara break; 166809c5088eSShreyas Bhatewara } 166909c5088eSShreyas Bhatewara } 167009c5088eSShreyas Bhatewara return err; 167109c5088eSShreyas Bhatewara 167209c5088eSShreyas Bhatewara } 167309c5088eSShreyas Bhatewara 167409c5088eSShreyas Bhatewara 167509c5088eSShreyas Bhatewara static int 1676d1a890faSShreyas Bhatewara vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) 1677d1a890faSShreyas Bhatewara { 1678d1a890faSShreyas Bhatewara int i; 1679d1a890faSShreyas Bhatewara size_t sz; 1680d1a890faSShreyas Bhatewara struct vmxnet3_rx_buf_info *bi; 1681d1a890faSShreyas Bhatewara 1682d1a890faSShreyas Bhatewara for (i = 0; i < 2; i++) { 1683d1a890faSShreyas Bhatewara 1684d1a890faSShreyas Bhatewara sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc); 1685b0eb57cbSAndy King rq->rx_ring[i].base = dma_alloc_coherent( 1686b0eb57cbSAndy King &adapter->pdev->dev, sz, 1687b0eb57cbSAndy King &rq->rx_ring[i].basePA, 1688b0eb57cbSAndy King GFP_KERNEL); 1689d1a890faSShreyas Bhatewara if (!rq->rx_ring[i].base) { 1690204a6e65SStephen Hemminger netdev_err(adapter->netdev, 1691204a6e65SStephen Hemminger "failed to allocate rx ring %d\n", i); 1692d1a890faSShreyas Bhatewara goto err; 1693d1a890faSShreyas Bhatewara } 1694d1a890faSShreyas Bhatewara } 1695d1a890faSShreyas Bhatewara 1696d1a890faSShreyas Bhatewara sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc); 1697b0eb57cbSAndy King rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz, 1698b0eb57cbSAndy King &rq->comp_ring.basePA, 1699b0eb57cbSAndy King GFP_KERNEL); 1700d1a890faSShreyas Bhatewara if (!rq->comp_ring.base) { 1701204a6e65SStephen Hemminger netdev_err(adapter->netdev, "failed to allocate rx comp ring\n"); 1702d1a890faSShreyas Bhatewara goto err; 1703d1a890faSShreyas Bhatewara } 1704d1a890faSShreyas Bhatewara 1705d1a890faSShreyas Bhatewara sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + 1706d1a890faSShreyas Bhatewara rq->rx_ring[1].size); 1707b0eb57cbSAndy King bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa, 1708b0eb57cbSAndy King GFP_KERNEL); 1709e404decbSJoe Perches if (!bi) 1710d1a890faSShreyas Bhatewara goto err; 1711e404decbSJoe Perches 1712d1a890faSShreyas Bhatewara rq->buf_info[0] = bi; 1713d1a890faSShreyas Bhatewara rq->buf_info[1] = bi + rq->rx_ring[0].size; 1714d1a890faSShreyas Bhatewara 1715d1a890faSShreyas Bhatewara return 0; 1716d1a890faSShreyas Bhatewara 1717d1a890faSShreyas Bhatewara err: 1718d1a890faSShreyas Bhatewara vmxnet3_rq_destroy(rq, adapter); 1719d1a890faSShreyas Bhatewara return -ENOMEM; 1720d1a890faSShreyas Bhatewara } 1721d1a890faSShreyas Bhatewara 1722d1a890faSShreyas Bhatewara 1723d1a890faSShreyas Bhatewara static int 172409c5088eSShreyas Bhatewara vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter) 172509c5088eSShreyas Bhatewara { 172609c5088eSShreyas Bhatewara int i, err = 0; 172709c5088eSShreyas Bhatewara 172809c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_rx_queues; i++) { 172909c5088eSShreyas Bhatewara err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter); 173009c5088eSShreyas Bhatewara if (unlikely(err)) { 173109c5088eSShreyas Bhatewara dev_err(&adapter->netdev->dev, 173209c5088eSShreyas Bhatewara "%s: failed to create rx queue%i\n", 173309c5088eSShreyas Bhatewara adapter->netdev->name, i); 173409c5088eSShreyas Bhatewara goto err_out; 173509c5088eSShreyas Bhatewara } 173609c5088eSShreyas Bhatewara } 173709c5088eSShreyas Bhatewara return err; 173809c5088eSShreyas Bhatewara err_out: 173909c5088eSShreyas Bhatewara vmxnet3_rq_destroy_all(adapter); 174009c5088eSShreyas Bhatewara return err; 174109c5088eSShreyas Bhatewara 174209c5088eSShreyas Bhatewara } 174309c5088eSShreyas Bhatewara 174409c5088eSShreyas Bhatewara /* Multiple queue aware polling function for tx and rx */ 174509c5088eSShreyas Bhatewara 174609c5088eSShreyas Bhatewara static int 1747d1a890faSShreyas Bhatewara vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget) 1748d1a890faSShreyas Bhatewara { 174909c5088eSShreyas Bhatewara int rcd_done = 0, i; 1750d1a890faSShreyas Bhatewara if (unlikely(adapter->shared->ecr)) 1751d1a890faSShreyas Bhatewara vmxnet3_process_events(adapter); 175209c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_tx_queues; i++) 175309c5088eSShreyas Bhatewara vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter); 1754d1a890faSShreyas Bhatewara 175509c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_rx_queues; i++) 175609c5088eSShreyas Bhatewara rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i], 175709c5088eSShreyas Bhatewara adapter, budget); 175809c5088eSShreyas Bhatewara return rcd_done; 1759d1a890faSShreyas Bhatewara } 1760d1a890faSShreyas Bhatewara 1761d1a890faSShreyas Bhatewara 1762d1a890faSShreyas Bhatewara static int 1763d1a890faSShreyas Bhatewara vmxnet3_poll(struct napi_struct *napi, int budget) 1764d1a890faSShreyas Bhatewara { 176509c5088eSShreyas Bhatewara struct vmxnet3_rx_queue *rx_queue = container_of(napi, 176609c5088eSShreyas Bhatewara struct vmxnet3_rx_queue, napi); 1767d1a890faSShreyas Bhatewara int rxd_done; 1768d1a890faSShreyas Bhatewara 176909c5088eSShreyas Bhatewara rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget); 1770d1a890faSShreyas Bhatewara 1771d1a890faSShreyas Bhatewara if (rxd_done < budget) { 1772d1a890faSShreyas Bhatewara napi_complete(napi); 177309c5088eSShreyas Bhatewara vmxnet3_enable_all_intrs(rx_queue->adapter); 1774d1a890faSShreyas Bhatewara } 1775d1a890faSShreyas Bhatewara return rxd_done; 1776d1a890faSShreyas Bhatewara } 1777d1a890faSShreyas Bhatewara 177809c5088eSShreyas Bhatewara /* 177909c5088eSShreyas Bhatewara * NAPI polling function for MSI-X mode with multiple Rx queues 178009c5088eSShreyas Bhatewara * Returns the # of the NAPI credit consumed (# of rx descriptors processed) 178109c5088eSShreyas Bhatewara */ 178209c5088eSShreyas Bhatewara 178309c5088eSShreyas Bhatewara static int 178409c5088eSShreyas Bhatewara vmxnet3_poll_rx_only(struct napi_struct *napi, int budget) 178509c5088eSShreyas Bhatewara { 178609c5088eSShreyas Bhatewara struct vmxnet3_rx_queue *rq = container_of(napi, 178709c5088eSShreyas Bhatewara struct vmxnet3_rx_queue, napi); 178809c5088eSShreyas Bhatewara struct vmxnet3_adapter *adapter = rq->adapter; 178909c5088eSShreyas Bhatewara int rxd_done; 179009c5088eSShreyas Bhatewara 179109c5088eSShreyas Bhatewara /* When sharing interrupt with corresponding tx queue, process 179209c5088eSShreyas Bhatewara * tx completions in that queue as well 179309c5088eSShreyas Bhatewara */ 179409c5088eSShreyas Bhatewara if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) { 179509c5088eSShreyas Bhatewara struct vmxnet3_tx_queue *tq = 179609c5088eSShreyas Bhatewara &adapter->tx_queue[rq - adapter->rx_queue]; 179709c5088eSShreyas Bhatewara vmxnet3_tq_tx_complete(tq, adapter); 179809c5088eSShreyas Bhatewara } 179909c5088eSShreyas Bhatewara 180009c5088eSShreyas Bhatewara rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget); 180109c5088eSShreyas Bhatewara 180209c5088eSShreyas Bhatewara if (rxd_done < budget) { 180309c5088eSShreyas Bhatewara napi_complete(napi); 180409c5088eSShreyas Bhatewara vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx); 180509c5088eSShreyas Bhatewara } 180609c5088eSShreyas Bhatewara return rxd_done; 180709c5088eSShreyas Bhatewara } 180809c5088eSShreyas Bhatewara 180909c5088eSShreyas Bhatewara 181009c5088eSShreyas Bhatewara #ifdef CONFIG_PCI_MSI 181109c5088eSShreyas Bhatewara 181209c5088eSShreyas Bhatewara /* 181309c5088eSShreyas Bhatewara * Handle completion interrupts on tx queues 181409c5088eSShreyas Bhatewara * Returns whether or not the intr is handled 181509c5088eSShreyas Bhatewara */ 181609c5088eSShreyas Bhatewara 181709c5088eSShreyas Bhatewara static irqreturn_t 181809c5088eSShreyas Bhatewara vmxnet3_msix_tx(int irq, void *data) 181909c5088eSShreyas Bhatewara { 182009c5088eSShreyas Bhatewara struct vmxnet3_tx_queue *tq = data; 182109c5088eSShreyas Bhatewara struct vmxnet3_adapter *adapter = tq->adapter; 182209c5088eSShreyas Bhatewara 182309c5088eSShreyas Bhatewara if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) 182409c5088eSShreyas Bhatewara vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx); 182509c5088eSShreyas Bhatewara 182609c5088eSShreyas Bhatewara /* Handle the case where only one irq is allocate for all tx queues */ 182709c5088eSShreyas Bhatewara if (adapter->share_intr == VMXNET3_INTR_TXSHARE) { 182809c5088eSShreyas Bhatewara int i; 182909c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_tx_queues; i++) { 183009c5088eSShreyas Bhatewara struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i]; 183109c5088eSShreyas Bhatewara vmxnet3_tq_tx_complete(txq, adapter); 183209c5088eSShreyas Bhatewara } 183309c5088eSShreyas Bhatewara } else { 183409c5088eSShreyas Bhatewara vmxnet3_tq_tx_complete(tq, adapter); 183509c5088eSShreyas Bhatewara } 183609c5088eSShreyas Bhatewara vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx); 183709c5088eSShreyas Bhatewara 183809c5088eSShreyas Bhatewara return IRQ_HANDLED; 183909c5088eSShreyas Bhatewara } 184009c5088eSShreyas Bhatewara 184109c5088eSShreyas Bhatewara 184209c5088eSShreyas Bhatewara /* 184309c5088eSShreyas Bhatewara * Handle completion interrupts on rx queues. Returns whether or not the 184409c5088eSShreyas Bhatewara * intr is handled 184509c5088eSShreyas Bhatewara */ 184609c5088eSShreyas Bhatewara 184709c5088eSShreyas Bhatewara static irqreturn_t 184809c5088eSShreyas Bhatewara vmxnet3_msix_rx(int irq, void *data) 184909c5088eSShreyas Bhatewara { 185009c5088eSShreyas Bhatewara struct vmxnet3_rx_queue *rq = data; 185109c5088eSShreyas Bhatewara struct vmxnet3_adapter *adapter = rq->adapter; 185209c5088eSShreyas Bhatewara 185309c5088eSShreyas Bhatewara /* disable intr if needed */ 185409c5088eSShreyas Bhatewara if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) 185509c5088eSShreyas Bhatewara vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx); 185609c5088eSShreyas Bhatewara napi_schedule(&rq->napi); 185709c5088eSShreyas Bhatewara 185809c5088eSShreyas Bhatewara return IRQ_HANDLED; 185909c5088eSShreyas Bhatewara } 186009c5088eSShreyas Bhatewara 186109c5088eSShreyas Bhatewara /* 186209c5088eSShreyas Bhatewara *---------------------------------------------------------------------------- 186309c5088eSShreyas Bhatewara * 186409c5088eSShreyas Bhatewara * vmxnet3_msix_event -- 186509c5088eSShreyas Bhatewara * 186609c5088eSShreyas Bhatewara * vmxnet3 msix event intr handler 186709c5088eSShreyas Bhatewara * 186809c5088eSShreyas Bhatewara * Result: 186909c5088eSShreyas Bhatewara * whether or not the intr is handled 187009c5088eSShreyas Bhatewara * 187109c5088eSShreyas Bhatewara *---------------------------------------------------------------------------- 187209c5088eSShreyas Bhatewara */ 187309c5088eSShreyas Bhatewara 187409c5088eSShreyas Bhatewara static irqreturn_t 187509c5088eSShreyas Bhatewara vmxnet3_msix_event(int irq, void *data) 187609c5088eSShreyas Bhatewara { 187709c5088eSShreyas Bhatewara struct net_device *dev = data; 187809c5088eSShreyas Bhatewara struct vmxnet3_adapter *adapter = netdev_priv(dev); 187909c5088eSShreyas Bhatewara 188009c5088eSShreyas Bhatewara /* disable intr if needed */ 188109c5088eSShreyas Bhatewara if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) 188209c5088eSShreyas Bhatewara vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx); 188309c5088eSShreyas Bhatewara 188409c5088eSShreyas Bhatewara if (adapter->shared->ecr) 188509c5088eSShreyas Bhatewara vmxnet3_process_events(adapter); 188609c5088eSShreyas Bhatewara 188709c5088eSShreyas Bhatewara vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx); 188809c5088eSShreyas Bhatewara 188909c5088eSShreyas Bhatewara return IRQ_HANDLED; 189009c5088eSShreyas Bhatewara } 189109c5088eSShreyas Bhatewara 189209c5088eSShreyas Bhatewara #endif /* CONFIG_PCI_MSI */ 189309c5088eSShreyas Bhatewara 1894d1a890faSShreyas Bhatewara 1895d1a890faSShreyas Bhatewara /* Interrupt handler for vmxnet3 */ 1896d1a890faSShreyas Bhatewara static irqreturn_t 1897d1a890faSShreyas Bhatewara vmxnet3_intr(int irq, void *dev_id) 1898d1a890faSShreyas Bhatewara { 1899d1a890faSShreyas Bhatewara struct net_device *dev = dev_id; 1900d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter = netdev_priv(dev); 1901d1a890faSShreyas Bhatewara 190209c5088eSShreyas Bhatewara if (adapter->intr.type == VMXNET3_IT_INTX) { 1903d1a890faSShreyas Bhatewara u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR); 1904d1a890faSShreyas Bhatewara if (unlikely(icr == 0)) 1905d1a890faSShreyas Bhatewara /* not ours */ 1906d1a890faSShreyas Bhatewara return IRQ_NONE; 1907d1a890faSShreyas Bhatewara } 1908d1a890faSShreyas Bhatewara 1909d1a890faSShreyas Bhatewara 1910d1a890faSShreyas Bhatewara /* disable intr if needed */ 1911d1a890faSShreyas Bhatewara if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) 191209c5088eSShreyas Bhatewara vmxnet3_disable_all_intrs(adapter); 1913d1a890faSShreyas Bhatewara 191409c5088eSShreyas Bhatewara napi_schedule(&adapter->rx_queue[0].napi); 1915d1a890faSShreyas Bhatewara 1916d1a890faSShreyas Bhatewara return IRQ_HANDLED; 1917d1a890faSShreyas Bhatewara } 1918d1a890faSShreyas Bhatewara 1919d1a890faSShreyas Bhatewara #ifdef CONFIG_NET_POLL_CONTROLLER 1920d1a890faSShreyas Bhatewara 1921d1a890faSShreyas Bhatewara /* netpoll callback. */ 1922d1a890faSShreyas Bhatewara static void 1923d1a890faSShreyas Bhatewara vmxnet3_netpoll(struct net_device *netdev) 1924d1a890faSShreyas Bhatewara { 1925d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1926d1a890faSShreyas Bhatewara 1927d25f06eaSNeil Horman switch (adapter->intr.type) { 19280a8d8c44SArnd Bergmann #ifdef CONFIG_PCI_MSI 19290a8d8c44SArnd Bergmann case VMXNET3_IT_MSIX: { 19300a8d8c44SArnd Bergmann int i; 1931d25f06eaSNeil Horman for (i = 0; i < adapter->num_rx_queues; i++) 1932d25f06eaSNeil Horman vmxnet3_msix_rx(0, &adapter->rx_queue[i]); 1933d25f06eaSNeil Horman break; 19340a8d8c44SArnd Bergmann } 19350a8d8c44SArnd Bergmann #endif 1936d25f06eaSNeil Horman case VMXNET3_IT_MSI: 1937d25f06eaSNeil Horman default: 1938d25f06eaSNeil Horman vmxnet3_intr(0, adapter->netdev); 1939d25f06eaSNeil Horman break; 1940d25f06eaSNeil Horman } 194109c5088eSShreyas Bhatewara 1942d1a890faSShreyas Bhatewara } 194309c5088eSShreyas Bhatewara #endif /* CONFIG_NET_POLL_CONTROLLER */ 1944d1a890faSShreyas Bhatewara 1945d1a890faSShreyas Bhatewara static int 1946d1a890faSShreyas Bhatewara vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) 1947d1a890faSShreyas Bhatewara { 194809c5088eSShreyas Bhatewara struct vmxnet3_intr *intr = &adapter->intr; 194909c5088eSShreyas Bhatewara int err = 0, i; 195009c5088eSShreyas Bhatewara int vector = 0; 1951d1a890faSShreyas Bhatewara 19528f7e524cSRandy Dunlap #ifdef CONFIG_PCI_MSI 1953d1a890faSShreyas Bhatewara if (adapter->intr.type == VMXNET3_IT_MSIX) { 195409c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_tx_queues; i++) { 195509c5088eSShreyas Bhatewara if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) { 195609c5088eSShreyas Bhatewara sprintf(adapter->tx_queue[i].name, "%s-tx-%d", 195709c5088eSShreyas Bhatewara adapter->netdev->name, vector); 195809c5088eSShreyas Bhatewara err = request_irq( 195909c5088eSShreyas Bhatewara intr->msix_entries[vector].vector, 196009c5088eSShreyas Bhatewara vmxnet3_msix_tx, 0, 196109c5088eSShreyas Bhatewara adapter->tx_queue[i].name, 196209c5088eSShreyas Bhatewara &adapter->tx_queue[i]); 196309c5088eSShreyas Bhatewara } else { 196409c5088eSShreyas Bhatewara sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d", 196509c5088eSShreyas Bhatewara adapter->netdev->name, vector); 196609c5088eSShreyas Bhatewara } 196709c5088eSShreyas Bhatewara if (err) { 196809c5088eSShreyas Bhatewara dev_err(&adapter->netdev->dev, 196909c5088eSShreyas Bhatewara "Failed to request irq for MSIX, %s, " 197009c5088eSShreyas Bhatewara "error %d\n", 197109c5088eSShreyas Bhatewara adapter->tx_queue[i].name, err); 197209c5088eSShreyas Bhatewara return err; 197309c5088eSShreyas Bhatewara } 197409c5088eSShreyas Bhatewara 197509c5088eSShreyas Bhatewara /* Handle the case where only 1 MSIx was allocated for 197609c5088eSShreyas Bhatewara * all tx queues */ 197709c5088eSShreyas Bhatewara if (adapter->share_intr == VMXNET3_INTR_TXSHARE) { 197809c5088eSShreyas Bhatewara for (; i < adapter->num_tx_queues; i++) 197909c5088eSShreyas Bhatewara adapter->tx_queue[i].comp_ring.intr_idx 198009c5088eSShreyas Bhatewara = vector; 198109c5088eSShreyas Bhatewara vector++; 198209c5088eSShreyas Bhatewara break; 198309c5088eSShreyas Bhatewara } else { 198409c5088eSShreyas Bhatewara adapter->tx_queue[i].comp_ring.intr_idx 198509c5088eSShreyas Bhatewara = vector++; 198609c5088eSShreyas Bhatewara } 198709c5088eSShreyas Bhatewara } 198809c5088eSShreyas Bhatewara if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) 198909c5088eSShreyas Bhatewara vector = 0; 199009c5088eSShreyas Bhatewara 199109c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_rx_queues; i++) { 199209c5088eSShreyas Bhatewara if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) 199309c5088eSShreyas Bhatewara sprintf(adapter->rx_queue[i].name, "%s-rx-%d", 199409c5088eSShreyas Bhatewara adapter->netdev->name, vector); 199509c5088eSShreyas Bhatewara else 199609c5088eSShreyas Bhatewara sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d", 199709c5088eSShreyas Bhatewara adapter->netdev->name, vector); 199809c5088eSShreyas Bhatewara err = request_irq(intr->msix_entries[vector].vector, 199909c5088eSShreyas Bhatewara vmxnet3_msix_rx, 0, 200009c5088eSShreyas Bhatewara adapter->rx_queue[i].name, 200109c5088eSShreyas Bhatewara &(adapter->rx_queue[i])); 200209c5088eSShreyas Bhatewara if (err) { 2003204a6e65SStephen Hemminger netdev_err(adapter->netdev, 2004204a6e65SStephen Hemminger "Failed to request irq for MSIX, " 2005204a6e65SStephen Hemminger "%s, error %d\n", 200609c5088eSShreyas Bhatewara adapter->rx_queue[i].name, err); 200709c5088eSShreyas Bhatewara return err; 200809c5088eSShreyas Bhatewara } 200909c5088eSShreyas Bhatewara 201009c5088eSShreyas Bhatewara adapter->rx_queue[i].comp_ring.intr_idx = vector++; 201109c5088eSShreyas Bhatewara } 201209c5088eSShreyas Bhatewara 201309c5088eSShreyas Bhatewara sprintf(intr->event_msi_vector_name, "%s-event-%d", 201409c5088eSShreyas Bhatewara adapter->netdev->name, vector); 201509c5088eSShreyas Bhatewara err = request_irq(intr->msix_entries[vector].vector, 201609c5088eSShreyas Bhatewara vmxnet3_msix_event, 0, 201709c5088eSShreyas Bhatewara intr->event_msi_vector_name, adapter->netdev); 201809c5088eSShreyas Bhatewara intr->event_intr_idx = vector; 201909c5088eSShreyas Bhatewara 202009c5088eSShreyas Bhatewara } else if (intr->type == VMXNET3_IT_MSI) { 202109c5088eSShreyas Bhatewara adapter->num_rx_queues = 1; 2022d1a890faSShreyas Bhatewara err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, 2023d1a890faSShreyas Bhatewara adapter->netdev->name, adapter->netdev); 202409c5088eSShreyas Bhatewara } else { 2025115924b6SShreyas Bhatewara #endif 202609c5088eSShreyas Bhatewara adapter->num_rx_queues = 1; 2027d1a890faSShreyas Bhatewara err = request_irq(adapter->pdev->irq, vmxnet3_intr, 2028d1a890faSShreyas Bhatewara IRQF_SHARED, adapter->netdev->name, 2029d1a890faSShreyas Bhatewara adapter->netdev); 203009c5088eSShreyas Bhatewara #ifdef CONFIG_PCI_MSI 203109c5088eSShreyas Bhatewara } 203209c5088eSShreyas Bhatewara #endif 203309c5088eSShreyas Bhatewara intr->num_intrs = vector + 1; 203409c5088eSShreyas Bhatewara if (err) { 2035204a6e65SStephen Hemminger netdev_err(adapter->netdev, 2036204a6e65SStephen Hemminger "Failed to request irq (intr type:%d), error %d\n", 2037204a6e65SStephen Hemminger intr->type, err); 203809c5088eSShreyas Bhatewara } else { 203909c5088eSShreyas Bhatewara /* Number of rx queues will not change after this */ 204009c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_rx_queues; i++) { 204109c5088eSShreyas Bhatewara struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; 204209c5088eSShreyas Bhatewara rq->qid = i; 204309c5088eSShreyas Bhatewara rq->qid2 = i + adapter->num_rx_queues; 2044d1a890faSShreyas Bhatewara } 2045d1a890faSShreyas Bhatewara 2046d1a890faSShreyas Bhatewara 2047d1a890faSShreyas Bhatewara 2048d1a890faSShreyas Bhatewara /* init our intr settings */ 204909c5088eSShreyas Bhatewara for (i = 0; i < intr->num_intrs; i++) 205009c5088eSShreyas Bhatewara intr->mod_levels[i] = UPT1_IML_ADAPTIVE; 205109c5088eSShreyas Bhatewara if (adapter->intr.type != VMXNET3_IT_MSIX) { 2052d1a890faSShreyas Bhatewara adapter->intr.event_intr_idx = 0; 205309c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_tx_queues; i++) 205409c5088eSShreyas Bhatewara adapter->tx_queue[i].comp_ring.intr_idx = 0; 205509c5088eSShreyas Bhatewara adapter->rx_queue[0].comp_ring.intr_idx = 0; 205609c5088eSShreyas Bhatewara } 2057d1a890faSShreyas Bhatewara 2058204a6e65SStephen Hemminger netdev_info(adapter->netdev, 2059204a6e65SStephen Hemminger "intr type %u, mode %u, %u vectors allocated\n", 2060204a6e65SStephen Hemminger intr->type, intr->mask_mode, intr->num_intrs); 2061d1a890faSShreyas Bhatewara } 2062d1a890faSShreyas Bhatewara 2063d1a890faSShreyas Bhatewara return err; 2064d1a890faSShreyas Bhatewara } 2065d1a890faSShreyas Bhatewara 2066d1a890faSShreyas Bhatewara 2067d1a890faSShreyas Bhatewara static void 2068d1a890faSShreyas Bhatewara vmxnet3_free_irqs(struct vmxnet3_adapter *adapter) 2069d1a890faSShreyas Bhatewara { 207009c5088eSShreyas Bhatewara struct vmxnet3_intr *intr = &adapter->intr; 207109c5088eSShreyas Bhatewara BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0); 2072d1a890faSShreyas Bhatewara 207309c5088eSShreyas Bhatewara switch (intr->type) { 20748f7e524cSRandy Dunlap #ifdef CONFIG_PCI_MSI 2075d1a890faSShreyas Bhatewara case VMXNET3_IT_MSIX: 2076d1a890faSShreyas Bhatewara { 207709c5088eSShreyas Bhatewara int i, vector = 0; 2078d1a890faSShreyas Bhatewara 207909c5088eSShreyas Bhatewara if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) { 208009c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_tx_queues; i++) { 208109c5088eSShreyas Bhatewara free_irq(intr->msix_entries[vector++].vector, 208209c5088eSShreyas Bhatewara &(adapter->tx_queue[i])); 208309c5088eSShreyas Bhatewara if (adapter->share_intr == VMXNET3_INTR_TXSHARE) 208409c5088eSShreyas Bhatewara break; 208509c5088eSShreyas Bhatewara } 208609c5088eSShreyas Bhatewara } 208709c5088eSShreyas Bhatewara 208809c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_rx_queues; i++) { 208909c5088eSShreyas Bhatewara free_irq(intr->msix_entries[vector++].vector, 209009c5088eSShreyas Bhatewara &(adapter->rx_queue[i])); 209109c5088eSShreyas Bhatewara } 209209c5088eSShreyas Bhatewara 209309c5088eSShreyas Bhatewara free_irq(intr->msix_entries[vector].vector, 2094d1a890faSShreyas Bhatewara adapter->netdev); 209509c5088eSShreyas Bhatewara BUG_ON(vector >= intr->num_intrs); 2096d1a890faSShreyas Bhatewara break; 2097d1a890faSShreyas Bhatewara } 20988f7e524cSRandy Dunlap #endif 2099d1a890faSShreyas Bhatewara case VMXNET3_IT_MSI: 2100d1a890faSShreyas Bhatewara free_irq(adapter->pdev->irq, adapter->netdev); 2101d1a890faSShreyas Bhatewara break; 2102d1a890faSShreyas Bhatewara case VMXNET3_IT_INTX: 2103d1a890faSShreyas Bhatewara free_irq(adapter->pdev->irq, adapter->netdev); 2104d1a890faSShreyas Bhatewara break; 2105d1a890faSShreyas Bhatewara default: 2106c068e777SSasha Levin BUG(); 2107d1a890faSShreyas Bhatewara } 2108d1a890faSShreyas Bhatewara } 2109d1a890faSShreyas Bhatewara 2110d1a890faSShreyas Bhatewara 2111d1a890faSShreyas Bhatewara static void 2112d1a890faSShreyas Bhatewara vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter) 2113d1a890faSShreyas Bhatewara { 2114d1a890faSShreyas Bhatewara u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 211572e85c45SJesse Gross u16 vid; 2116d1a890faSShreyas Bhatewara 211772e85c45SJesse Gross /* allow untagged pkts */ 2118d1a890faSShreyas Bhatewara VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); 211972e85c45SJesse Gross 212072e85c45SJesse Gross for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 212172e85c45SJesse Gross VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); 2122d1a890faSShreyas Bhatewara } 2123d1a890faSShreyas Bhatewara 2124d1a890faSShreyas Bhatewara 21258e586137SJiri Pirko static int 212680d5c368SPatrick McHardy vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) 2127d1a890faSShreyas Bhatewara { 2128d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter = netdev_priv(netdev); 2129f6957f88SJesse Gross 2130f6957f88SJesse Gross if (!(netdev->flags & IFF_PROMISC)) { 2131d1a890faSShreyas Bhatewara u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 213283d0feffSShreyas Bhatewara unsigned long flags; 2133d1a890faSShreyas Bhatewara 2134d1a890faSShreyas Bhatewara VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); 213583d0feffSShreyas Bhatewara spin_lock_irqsave(&adapter->cmd_lock, flags); 2136d1a890faSShreyas Bhatewara VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2137d1a890faSShreyas Bhatewara VMXNET3_CMD_UPDATE_VLAN_FILTERS); 213883d0feffSShreyas Bhatewara spin_unlock_irqrestore(&adapter->cmd_lock, flags); 2139f6957f88SJesse Gross } 214072e85c45SJesse Gross 214172e85c45SJesse Gross set_bit(vid, adapter->active_vlans); 21428e586137SJiri Pirko 21438e586137SJiri Pirko return 0; 2144d1a890faSShreyas Bhatewara } 2145d1a890faSShreyas Bhatewara 2146d1a890faSShreyas Bhatewara 21478e586137SJiri Pirko static int 214880d5c368SPatrick McHardy vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) 2149d1a890faSShreyas Bhatewara { 2150d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter = netdev_priv(netdev); 2151f6957f88SJesse Gross 2152f6957f88SJesse Gross if (!(netdev->flags & IFF_PROMISC)) { 2153d1a890faSShreyas Bhatewara u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 215483d0feffSShreyas Bhatewara unsigned long flags; 2155d1a890faSShreyas Bhatewara 2156d1a890faSShreyas Bhatewara VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); 215783d0feffSShreyas Bhatewara spin_lock_irqsave(&adapter->cmd_lock, flags); 2158d1a890faSShreyas Bhatewara VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2159d1a890faSShreyas Bhatewara VMXNET3_CMD_UPDATE_VLAN_FILTERS); 216083d0feffSShreyas Bhatewara spin_unlock_irqrestore(&adapter->cmd_lock, flags); 2161f6957f88SJesse Gross } 216272e85c45SJesse Gross 216372e85c45SJesse Gross clear_bit(vid, adapter->active_vlans); 21648e586137SJiri Pirko 21658e586137SJiri Pirko return 0; 2166d1a890faSShreyas Bhatewara } 2167d1a890faSShreyas Bhatewara 2168d1a890faSShreyas Bhatewara 2169d1a890faSShreyas Bhatewara static u8 * 2170d1a890faSShreyas Bhatewara vmxnet3_copy_mc(struct net_device *netdev) 2171d1a890faSShreyas Bhatewara { 2172d1a890faSShreyas Bhatewara u8 *buf = NULL; 21734cd24eafSJiri Pirko u32 sz = netdev_mc_count(netdev) * ETH_ALEN; 2174d1a890faSShreyas Bhatewara 2175d1a890faSShreyas Bhatewara /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */ 2176d1a890faSShreyas Bhatewara if (sz <= 0xffff) { 2177d1a890faSShreyas Bhatewara /* We may be called with BH disabled */ 2178d1a890faSShreyas Bhatewara buf = kmalloc(sz, GFP_ATOMIC); 2179d1a890faSShreyas Bhatewara if (buf) { 218022bedad3SJiri Pirko struct netdev_hw_addr *ha; 2181567ec874SJiri Pirko int i = 0; 2182d1a890faSShreyas Bhatewara 218322bedad3SJiri Pirko netdev_for_each_mc_addr(ha, netdev) 218422bedad3SJiri Pirko memcpy(buf + i++ * ETH_ALEN, ha->addr, 2185d1a890faSShreyas Bhatewara ETH_ALEN); 2186d1a890faSShreyas Bhatewara } 2187d1a890faSShreyas Bhatewara } 2188d1a890faSShreyas Bhatewara return buf; 2189d1a890faSShreyas Bhatewara } 2190d1a890faSShreyas Bhatewara 2191d1a890faSShreyas Bhatewara 2192d1a890faSShreyas Bhatewara static void 2193d1a890faSShreyas Bhatewara vmxnet3_set_mc(struct net_device *netdev) 2194d1a890faSShreyas Bhatewara { 2195d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter = netdev_priv(netdev); 219683d0feffSShreyas Bhatewara unsigned long flags; 2197d1a890faSShreyas Bhatewara struct Vmxnet3_RxFilterConf *rxConf = 2198d1a890faSShreyas Bhatewara &adapter->shared->devRead.rxFilterConf; 2199d1a890faSShreyas Bhatewara u8 *new_table = NULL; 2200b0eb57cbSAndy King dma_addr_t new_table_pa = 0; 2201d1a890faSShreyas Bhatewara u32 new_mode = VMXNET3_RXM_UCAST; 2202d1a890faSShreyas Bhatewara 220372e85c45SJesse Gross if (netdev->flags & IFF_PROMISC) { 220472e85c45SJesse Gross u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 220572e85c45SJesse Gross memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable)); 220672e85c45SJesse Gross 2207d1a890faSShreyas Bhatewara new_mode |= VMXNET3_RXM_PROMISC; 220872e85c45SJesse Gross } else { 220972e85c45SJesse Gross vmxnet3_restore_vlan(adapter); 221072e85c45SJesse Gross } 2211d1a890faSShreyas Bhatewara 2212d1a890faSShreyas Bhatewara if (netdev->flags & IFF_BROADCAST) 2213d1a890faSShreyas Bhatewara new_mode |= VMXNET3_RXM_BCAST; 2214d1a890faSShreyas Bhatewara 2215d1a890faSShreyas Bhatewara if (netdev->flags & IFF_ALLMULTI) 2216d1a890faSShreyas Bhatewara new_mode |= VMXNET3_RXM_ALL_MULTI; 2217d1a890faSShreyas Bhatewara else 22184cd24eafSJiri Pirko if (!netdev_mc_empty(netdev)) { 2219d1a890faSShreyas Bhatewara new_table = vmxnet3_copy_mc(netdev); 2220d1a890faSShreyas Bhatewara if (new_table) { 2221d37d5ec8SShrikrishna Khare size_t sz = netdev_mc_count(netdev) * ETH_ALEN; 2222d37d5ec8SShrikrishna Khare 2223d37d5ec8SShrikrishna Khare rxConf->mfTableLen = cpu_to_le16(sz); 2224b0eb57cbSAndy King new_table_pa = dma_map_single( 2225b0eb57cbSAndy King &adapter->pdev->dev, 2226b0eb57cbSAndy King new_table, 2227d37d5ec8SShrikrishna Khare sz, 2228b0eb57cbSAndy King PCI_DMA_TODEVICE); 22294ad9a64fSAndy King } 22304ad9a64fSAndy King 22315738a09dSAlexey Khoroshilov if (!dma_mapping_error(&adapter->pdev->dev, 22325738a09dSAlexey Khoroshilov new_table_pa)) { 22334ad9a64fSAndy King new_mode |= VMXNET3_RXM_MCAST; 2234b0eb57cbSAndy King rxConf->mfTablePA = cpu_to_le64(new_table_pa); 2235d1a890faSShreyas Bhatewara } else { 22364ad9a64fSAndy King netdev_info(netdev, 22374ad9a64fSAndy King "failed to copy mcast list, setting ALL_MULTI\n"); 2238d1a890faSShreyas Bhatewara new_mode |= VMXNET3_RXM_ALL_MULTI; 2239d1a890faSShreyas Bhatewara } 2240d1a890faSShreyas Bhatewara } 2241d1a890faSShreyas Bhatewara 2242d1a890faSShreyas Bhatewara if (!(new_mode & VMXNET3_RXM_MCAST)) { 2243d1a890faSShreyas Bhatewara rxConf->mfTableLen = 0; 2244d1a890faSShreyas Bhatewara rxConf->mfTablePA = 0; 2245d1a890faSShreyas Bhatewara } 2246d1a890faSShreyas Bhatewara 224783d0feffSShreyas Bhatewara spin_lock_irqsave(&adapter->cmd_lock, flags); 2248d1a890faSShreyas Bhatewara if (new_mode != rxConf->rxMode) { 2249115924b6SShreyas Bhatewara rxConf->rxMode = cpu_to_le32(new_mode); 2250d1a890faSShreyas Bhatewara VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2251d1a890faSShreyas Bhatewara VMXNET3_CMD_UPDATE_RX_MODE); 225272e85c45SJesse Gross VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 225372e85c45SJesse Gross VMXNET3_CMD_UPDATE_VLAN_FILTERS); 2254d1a890faSShreyas Bhatewara } 2255d1a890faSShreyas Bhatewara 2256d1a890faSShreyas Bhatewara VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2257d1a890faSShreyas Bhatewara VMXNET3_CMD_UPDATE_MAC_FILTERS); 225883d0feffSShreyas Bhatewara spin_unlock_irqrestore(&adapter->cmd_lock, flags); 2259d1a890faSShreyas Bhatewara 22604ad9a64fSAndy King if (new_table_pa) 2261b0eb57cbSAndy King dma_unmap_single(&adapter->pdev->dev, new_table_pa, 2262b0eb57cbSAndy King rxConf->mfTableLen, PCI_DMA_TODEVICE); 2263d1a890faSShreyas Bhatewara kfree(new_table); 2264d1a890faSShreyas Bhatewara } 2265d1a890faSShreyas Bhatewara 226609c5088eSShreyas Bhatewara void 226709c5088eSShreyas Bhatewara vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter) 226809c5088eSShreyas Bhatewara { 226909c5088eSShreyas Bhatewara int i; 227009c5088eSShreyas Bhatewara 227109c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_rx_queues; i++) 227209c5088eSShreyas Bhatewara vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter); 227309c5088eSShreyas Bhatewara } 227409c5088eSShreyas Bhatewara 2275d1a890faSShreyas Bhatewara 2276d1a890faSShreyas Bhatewara /* 2277d1a890faSShreyas Bhatewara * Set up driver_shared based on settings in adapter. 2278d1a890faSShreyas Bhatewara */ 2279d1a890faSShreyas Bhatewara 2280d1a890faSShreyas Bhatewara static void 2281d1a890faSShreyas Bhatewara vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) 2282d1a890faSShreyas Bhatewara { 2283d1a890faSShreyas Bhatewara struct Vmxnet3_DriverShared *shared = adapter->shared; 2284d1a890faSShreyas Bhatewara struct Vmxnet3_DSDevRead *devRead = &shared->devRead; 2285d1a890faSShreyas Bhatewara struct Vmxnet3_TxQueueConf *tqc; 2286d1a890faSShreyas Bhatewara struct Vmxnet3_RxQueueConf *rqc; 2287d1a890faSShreyas Bhatewara int i; 2288d1a890faSShreyas Bhatewara 2289d1a890faSShreyas Bhatewara memset(shared, 0, sizeof(*shared)); 2290d1a890faSShreyas Bhatewara 2291d1a890faSShreyas Bhatewara /* driver settings */ 2292115924b6SShreyas Bhatewara shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC); 2293115924b6SShreyas Bhatewara devRead->misc.driverInfo.version = cpu_to_le32( 2294115924b6SShreyas Bhatewara VMXNET3_DRIVER_VERSION_NUM); 2295d1a890faSShreyas Bhatewara devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ? 2296d1a890faSShreyas Bhatewara VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64); 2297d1a890faSShreyas Bhatewara devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX; 2298115924b6SShreyas Bhatewara *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32( 2299115924b6SShreyas Bhatewara *((u32 *)&devRead->misc.driverInfo.gos)); 2300115924b6SShreyas Bhatewara devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1); 2301115924b6SShreyas Bhatewara devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1); 2302d1a890faSShreyas Bhatewara 2303b0eb57cbSAndy King devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa); 2304115924b6SShreyas Bhatewara devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter)); 2305d1a890faSShreyas Bhatewara 2306d1a890faSShreyas Bhatewara /* set up feature flags */ 2307a0d2730cSMichał Mirosław if (adapter->netdev->features & NETIF_F_RXCSUM) 23083843e515SHarvey Harrison devRead->misc.uptFeatures |= UPT1_F_RXCSUM; 2309d1a890faSShreyas Bhatewara 2310a0d2730cSMichał Mirosław if (adapter->netdev->features & NETIF_F_LRO) { 23113843e515SHarvey Harrison devRead->misc.uptFeatures |= UPT1_F_LRO; 2312115924b6SShreyas Bhatewara devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); 2313d1a890faSShreyas Bhatewara } 2314f646968fSPatrick McHardy if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 23153843e515SHarvey Harrison devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 2316d1a890faSShreyas Bhatewara 2317115924b6SShreyas Bhatewara devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); 2318115924b6SShreyas Bhatewara devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); 2319115924b6SShreyas Bhatewara devRead->misc.queueDescLen = cpu_to_le32( 232009c5088eSShreyas Bhatewara adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) + 232109c5088eSShreyas Bhatewara adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc)); 2322d1a890faSShreyas Bhatewara 2323d1a890faSShreyas Bhatewara /* tx queue settings */ 232409c5088eSShreyas Bhatewara devRead->misc.numTxQueues = adapter->num_tx_queues; 232509c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_tx_queues; i++) { 232609c5088eSShreyas Bhatewara struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; 232709c5088eSShreyas Bhatewara BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL); 232809c5088eSShreyas Bhatewara tqc = &adapter->tqd_start[i].conf; 232909c5088eSShreyas Bhatewara tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA); 233009c5088eSShreyas Bhatewara tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA); 233109c5088eSShreyas Bhatewara tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA); 2332b0eb57cbSAndy King tqc->ddPA = cpu_to_le64(tq->buf_info_pa); 233309c5088eSShreyas Bhatewara tqc->txRingSize = cpu_to_le32(tq->tx_ring.size); 233409c5088eSShreyas Bhatewara tqc->dataRingSize = cpu_to_le32(tq->data_ring.size); 233509c5088eSShreyas Bhatewara tqc->compRingSize = cpu_to_le32(tq->comp_ring.size); 233609c5088eSShreyas Bhatewara tqc->ddLen = cpu_to_le32( 233709c5088eSShreyas Bhatewara sizeof(struct vmxnet3_tx_buf_info) * 2338115924b6SShreyas Bhatewara tqc->txRingSize); 233909c5088eSShreyas Bhatewara tqc->intrIdx = tq->comp_ring.intr_idx; 234009c5088eSShreyas Bhatewara } 2341d1a890faSShreyas Bhatewara 2342d1a890faSShreyas Bhatewara /* rx queue settings */ 234309c5088eSShreyas Bhatewara devRead->misc.numRxQueues = adapter->num_rx_queues; 234409c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_rx_queues; i++) { 234509c5088eSShreyas Bhatewara struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; 234609c5088eSShreyas Bhatewara rqc = &adapter->rqd_start[i].conf; 234709c5088eSShreyas Bhatewara rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA); 234809c5088eSShreyas Bhatewara rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA); 234909c5088eSShreyas Bhatewara rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA); 2350b0eb57cbSAndy King rqc->ddPA = cpu_to_le64(rq->buf_info_pa); 235109c5088eSShreyas Bhatewara rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size); 235209c5088eSShreyas Bhatewara rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size); 235309c5088eSShreyas Bhatewara rqc->compRingSize = cpu_to_le32(rq->comp_ring.size); 235409c5088eSShreyas Bhatewara rqc->ddLen = cpu_to_le32( 235509c5088eSShreyas Bhatewara sizeof(struct vmxnet3_rx_buf_info) * 235609c5088eSShreyas Bhatewara (rqc->rxRingSize[0] + 235709c5088eSShreyas Bhatewara rqc->rxRingSize[1])); 235809c5088eSShreyas Bhatewara rqc->intrIdx = rq->comp_ring.intr_idx; 235909c5088eSShreyas Bhatewara } 236009c5088eSShreyas Bhatewara 236109c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS 236209c5088eSShreyas Bhatewara memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf)); 236309c5088eSShreyas Bhatewara 236409c5088eSShreyas Bhatewara if (adapter->rss) { 236509c5088eSShreyas Bhatewara struct UPT1_RSSConf *rssConf = adapter->rss_conf; 236666d35910SStephen Hemminger 236709c5088eSShreyas Bhatewara devRead->misc.uptFeatures |= UPT1_F_RSS; 236809c5088eSShreyas Bhatewara devRead->misc.numRxQueues = adapter->num_rx_queues; 236909c5088eSShreyas Bhatewara rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 | 237009c5088eSShreyas Bhatewara UPT1_RSS_HASH_TYPE_IPV4 | 237109c5088eSShreyas Bhatewara UPT1_RSS_HASH_TYPE_TCP_IPV6 | 237209c5088eSShreyas Bhatewara UPT1_RSS_HASH_TYPE_IPV6; 237309c5088eSShreyas Bhatewara rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ; 237409c5088eSShreyas Bhatewara rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE; 237509c5088eSShreyas Bhatewara rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE; 23766bf79cddSEric Dumazet netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey)); 237766d35910SStephen Hemminger 237809c5088eSShreyas Bhatewara for (i = 0; i < rssConf->indTableSize; i++) 2379278bc429SBen Hutchings rssConf->indTable[i] = ethtool_rxfh_indir_default( 2380278bc429SBen Hutchings i, adapter->num_rx_queues); 238109c5088eSShreyas Bhatewara 238209c5088eSShreyas Bhatewara devRead->rssConfDesc.confVer = 1; 2383b0eb57cbSAndy King devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf)); 2384b0eb57cbSAndy King devRead->rssConfDesc.confPA = 2385b0eb57cbSAndy King cpu_to_le64(adapter->rss_conf_pa); 238609c5088eSShreyas Bhatewara } 238709c5088eSShreyas Bhatewara 238809c5088eSShreyas Bhatewara #endif /* VMXNET3_RSS */ 2389d1a890faSShreyas Bhatewara 2390d1a890faSShreyas Bhatewara /* intr settings */ 2391d1a890faSShreyas Bhatewara devRead->intrConf.autoMask = adapter->intr.mask_mode == 2392d1a890faSShreyas Bhatewara VMXNET3_IMM_AUTO; 2393d1a890faSShreyas Bhatewara devRead->intrConf.numIntrs = adapter->intr.num_intrs; 2394d1a890faSShreyas Bhatewara for (i = 0; i < adapter->intr.num_intrs; i++) 2395d1a890faSShreyas Bhatewara devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i]; 2396d1a890faSShreyas Bhatewara 2397d1a890faSShreyas Bhatewara devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx; 23986929fe8aSRonghua Zang devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL); 2399d1a890faSShreyas Bhatewara 2400d1a890faSShreyas Bhatewara /* rx filter settings */ 2401d1a890faSShreyas Bhatewara devRead->rxFilterConf.rxMode = 0; 2402d1a890faSShreyas Bhatewara vmxnet3_restore_vlan(adapter); 2403f9f25026SShreyas Bhatewara vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr); 2404f9f25026SShreyas Bhatewara 2405d1a890faSShreyas Bhatewara /* the rest are already zeroed */ 2406d1a890faSShreyas Bhatewara } 2407d1a890faSShreyas Bhatewara 2408d1a890faSShreyas Bhatewara 2409d1a890faSShreyas Bhatewara int 2410d1a890faSShreyas Bhatewara vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) 2411d1a890faSShreyas Bhatewara { 241209c5088eSShreyas Bhatewara int err, i; 2413d1a890faSShreyas Bhatewara u32 ret; 241483d0feffSShreyas Bhatewara unsigned long flags; 2415d1a890faSShreyas Bhatewara 2416fdcd79b9SStephen Hemminger netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," 241709c5088eSShreyas Bhatewara " ring sizes %u %u %u\n", adapter->netdev->name, 241809c5088eSShreyas Bhatewara adapter->skb_buf_size, adapter->rx_buf_per_pkt, 241909c5088eSShreyas Bhatewara adapter->tx_queue[0].tx_ring.size, 242009c5088eSShreyas Bhatewara adapter->rx_queue[0].rx_ring[0].size, 242109c5088eSShreyas Bhatewara adapter->rx_queue[0].rx_ring[1].size); 2422d1a890faSShreyas Bhatewara 242309c5088eSShreyas Bhatewara vmxnet3_tq_init_all(adapter); 242409c5088eSShreyas Bhatewara err = vmxnet3_rq_init_all(adapter); 2425d1a890faSShreyas Bhatewara if (err) { 2426204a6e65SStephen Hemminger netdev_err(adapter->netdev, 2427204a6e65SStephen Hemminger "Failed to init rx queue error %d\n", err); 2428d1a890faSShreyas Bhatewara goto rq_err; 2429d1a890faSShreyas Bhatewara } 2430d1a890faSShreyas Bhatewara 2431d1a890faSShreyas Bhatewara err = vmxnet3_request_irqs(adapter); 2432d1a890faSShreyas Bhatewara if (err) { 2433204a6e65SStephen Hemminger netdev_err(adapter->netdev, 2434204a6e65SStephen Hemminger "Failed to setup irq for error %d\n", err); 2435d1a890faSShreyas Bhatewara goto irq_err; 2436d1a890faSShreyas Bhatewara } 2437d1a890faSShreyas Bhatewara 2438d1a890faSShreyas Bhatewara vmxnet3_setup_driver_shared(adapter); 2439d1a890faSShreyas Bhatewara 2440115924b6SShreyas Bhatewara VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO( 2441115924b6SShreyas Bhatewara adapter->shared_pa)); 2442115924b6SShreyas Bhatewara VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI( 2443115924b6SShreyas Bhatewara adapter->shared_pa)); 244483d0feffSShreyas Bhatewara spin_lock_irqsave(&adapter->cmd_lock, flags); 2445d1a890faSShreyas Bhatewara VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2446d1a890faSShreyas Bhatewara VMXNET3_CMD_ACTIVATE_DEV); 2447d1a890faSShreyas Bhatewara ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 244883d0feffSShreyas Bhatewara spin_unlock_irqrestore(&adapter->cmd_lock, flags); 2449d1a890faSShreyas Bhatewara 2450d1a890faSShreyas Bhatewara if (ret != 0) { 2451204a6e65SStephen Hemminger netdev_err(adapter->netdev, 2452204a6e65SStephen Hemminger "Failed to activate dev: error %u\n", ret); 2453d1a890faSShreyas Bhatewara err = -EINVAL; 2454d1a890faSShreyas Bhatewara goto activate_err; 2455d1a890faSShreyas Bhatewara } 245609c5088eSShreyas Bhatewara 245709c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_rx_queues; i++) { 245809c5088eSShreyas Bhatewara VMXNET3_WRITE_BAR0_REG(adapter, 245909c5088eSShreyas Bhatewara VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN, 246009c5088eSShreyas Bhatewara adapter->rx_queue[i].rx_ring[0].next2fill); 246109c5088eSShreyas Bhatewara VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 + 246209c5088eSShreyas Bhatewara (i * VMXNET3_REG_ALIGN)), 246309c5088eSShreyas Bhatewara adapter->rx_queue[i].rx_ring[1].next2fill); 246409c5088eSShreyas Bhatewara } 2465d1a890faSShreyas Bhatewara 2466d1a890faSShreyas Bhatewara /* Apply the rx filter settins last. */ 2467d1a890faSShreyas Bhatewara vmxnet3_set_mc(adapter->netdev); 2468d1a890faSShreyas Bhatewara 2469d1a890faSShreyas Bhatewara /* 2470d1a890faSShreyas Bhatewara * Check link state when first activating device. It will start the 2471d1a890faSShreyas Bhatewara * tx queue if the link is up. 2472d1a890faSShreyas Bhatewara */ 24734a1745fcSShreyas Bhatewara vmxnet3_check_link(adapter, true); 247409c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_rx_queues; i++) 247509c5088eSShreyas Bhatewara napi_enable(&adapter->rx_queue[i].napi); 2476d1a890faSShreyas Bhatewara vmxnet3_enable_all_intrs(adapter); 2477d1a890faSShreyas Bhatewara clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); 2478d1a890faSShreyas Bhatewara return 0; 2479d1a890faSShreyas Bhatewara 2480d1a890faSShreyas Bhatewara activate_err: 2481d1a890faSShreyas Bhatewara VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0); 2482d1a890faSShreyas Bhatewara VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0); 2483d1a890faSShreyas Bhatewara vmxnet3_free_irqs(adapter); 2484d1a890faSShreyas Bhatewara irq_err: 2485d1a890faSShreyas Bhatewara rq_err: 2486d1a890faSShreyas Bhatewara /* free up buffers we allocated */ 248709c5088eSShreyas Bhatewara vmxnet3_rq_cleanup_all(adapter); 2488d1a890faSShreyas Bhatewara return err; 2489d1a890faSShreyas Bhatewara } 2490d1a890faSShreyas Bhatewara 2491d1a890faSShreyas Bhatewara 2492d1a890faSShreyas Bhatewara void 2493d1a890faSShreyas Bhatewara vmxnet3_reset_dev(struct vmxnet3_adapter *adapter) 2494d1a890faSShreyas Bhatewara { 249583d0feffSShreyas Bhatewara unsigned long flags; 249683d0feffSShreyas Bhatewara spin_lock_irqsave(&adapter->cmd_lock, flags); 2497d1a890faSShreyas Bhatewara VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); 249883d0feffSShreyas Bhatewara spin_unlock_irqrestore(&adapter->cmd_lock, flags); 2499d1a890faSShreyas Bhatewara } 2500d1a890faSShreyas Bhatewara 2501d1a890faSShreyas Bhatewara 2502d1a890faSShreyas Bhatewara int 2503d1a890faSShreyas Bhatewara vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) 2504d1a890faSShreyas Bhatewara { 250509c5088eSShreyas Bhatewara int i; 250683d0feffSShreyas Bhatewara unsigned long flags; 2507d1a890faSShreyas Bhatewara if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) 2508d1a890faSShreyas Bhatewara return 0; 2509d1a890faSShreyas Bhatewara 2510d1a890faSShreyas Bhatewara 251183d0feffSShreyas Bhatewara spin_lock_irqsave(&adapter->cmd_lock, flags); 2512d1a890faSShreyas Bhatewara VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2513d1a890faSShreyas Bhatewara VMXNET3_CMD_QUIESCE_DEV); 251483d0feffSShreyas Bhatewara spin_unlock_irqrestore(&adapter->cmd_lock, flags); 2515d1a890faSShreyas Bhatewara vmxnet3_disable_all_intrs(adapter); 2516d1a890faSShreyas Bhatewara 251709c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_rx_queues; i++) 251809c5088eSShreyas Bhatewara napi_disable(&adapter->rx_queue[i].napi); 2519d1a890faSShreyas Bhatewara netif_tx_disable(adapter->netdev); 2520d1a890faSShreyas Bhatewara adapter->link_speed = 0; 2521d1a890faSShreyas Bhatewara netif_carrier_off(adapter->netdev); 2522d1a890faSShreyas Bhatewara 252309c5088eSShreyas Bhatewara vmxnet3_tq_cleanup_all(adapter); 252409c5088eSShreyas Bhatewara vmxnet3_rq_cleanup_all(adapter); 2525d1a890faSShreyas Bhatewara vmxnet3_free_irqs(adapter); 2526d1a890faSShreyas Bhatewara return 0; 2527d1a890faSShreyas Bhatewara } 2528d1a890faSShreyas Bhatewara 2529d1a890faSShreyas Bhatewara 2530d1a890faSShreyas Bhatewara static void 2531d1a890faSShreyas Bhatewara vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac) 2532d1a890faSShreyas Bhatewara { 2533d1a890faSShreyas Bhatewara u32 tmp; 2534d1a890faSShreyas Bhatewara 2535d1a890faSShreyas Bhatewara tmp = *(u32 *)mac; 2536d1a890faSShreyas Bhatewara VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp); 2537d1a890faSShreyas Bhatewara 2538d1a890faSShreyas Bhatewara tmp = (mac[5] << 8) | mac[4]; 2539d1a890faSShreyas Bhatewara VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp); 2540d1a890faSShreyas Bhatewara } 2541d1a890faSShreyas Bhatewara 2542d1a890faSShreyas Bhatewara 2543d1a890faSShreyas Bhatewara static int 2544d1a890faSShreyas Bhatewara vmxnet3_set_mac_addr(struct net_device *netdev, void *p) 2545d1a890faSShreyas Bhatewara { 2546d1a890faSShreyas Bhatewara struct sockaddr *addr = p; 2547d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter = netdev_priv(netdev); 2548d1a890faSShreyas Bhatewara 2549d1a890faSShreyas Bhatewara memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2550d1a890faSShreyas Bhatewara vmxnet3_write_mac_addr(adapter, addr->sa_data); 2551d1a890faSShreyas Bhatewara 2552d1a890faSShreyas Bhatewara return 0; 2553d1a890faSShreyas Bhatewara } 2554d1a890faSShreyas Bhatewara 2555d1a890faSShreyas Bhatewara 2556d1a890faSShreyas Bhatewara /* ==================== initialization and cleanup routines ============ */ 2557d1a890faSShreyas Bhatewara 2558d1a890faSShreyas Bhatewara static int 2559d1a890faSShreyas Bhatewara vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) 2560d1a890faSShreyas Bhatewara { 2561d1a890faSShreyas Bhatewara int err; 2562d1a890faSShreyas Bhatewara unsigned long mmio_start, mmio_len; 2563d1a890faSShreyas Bhatewara struct pci_dev *pdev = adapter->pdev; 2564d1a890faSShreyas Bhatewara 2565d1a890faSShreyas Bhatewara err = pci_enable_device(pdev); 2566d1a890faSShreyas Bhatewara if (err) { 2567204a6e65SStephen Hemminger dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err); 2568d1a890faSShreyas Bhatewara return err; 2569d1a890faSShreyas Bhatewara } 2570d1a890faSShreyas Bhatewara 2571d1a890faSShreyas Bhatewara if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { 2572d1a890faSShreyas Bhatewara if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { 2573204a6e65SStephen Hemminger dev_err(&pdev->dev, 2574204a6e65SStephen Hemminger "pci_set_consistent_dma_mask failed\n"); 2575d1a890faSShreyas Bhatewara err = -EIO; 2576d1a890faSShreyas Bhatewara goto err_set_mask; 2577d1a890faSShreyas Bhatewara } 2578d1a890faSShreyas Bhatewara *dma64 = true; 2579d1a890faSShreyas Bhatewara } else { 2580d1a890faSShreyas Bhatewara if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { 2581204a6e65SStephen Hemminger dev_err(&pdev->dev, 2582204a6e65SStephen Hemminger "pci_set_dma_mask failed\n"); 2583d1a890faSShreyas Bhatewara err = -EIO; 2584d1a890faSShreyas Bhatewara goto err_set_mask; 2585d1a890faSShreyas Bhatewara } 2586d1a890faSShreyas Bhatewara *dma64 = false; 2587d1a890faSShreyas Bhatewara } 2588d1a890faSShreyas Bhatewara 2589d1a890faSShreyas Bhatewara err = pci_request_selected_regions(pdev, (1 << 2) - 1, 2590d1a890faSShreyas Bhatewara vmxnet3_driver_name); 2591d1a890faSShreyas Bhatewara if (err) { 2592204a6e65SStephen Hemminger dev_err(&pdev->dev, 2593204a6e65SStephen Hemminger "Failed to request region for adapter: error %d\n", err); 2594d1a890faSShreyas Bhatewara goto err_set_mask; 2595d1a890faSShreyas Bhatewara } 2596d1a890faSShreyas Bhatewara 2597d1a890faSShreyas Bhatewara pci_set_master(pdev); 2598d1a890faSShreyas Bhatewara 2599d1a890faSShreyas Bhatewara mmio_start = pci_resource_start(pdev, 0); 2600d1a890faSShreyas Bhatewara mmio_len = pci_resource_len(pdev, 0); 2601d1a890faSShreyas Bhatewara adapter->hw_addr0 = ioremap(mmio_start, mmio_len); 2602d1a890faSShreyas Bhatewara if (!adapter->hw_addr0) { 2603204a6e65SStephen Hemminger dev_err(&pdev->dev, "Failed to map bar0\n"); 2604d1a890faSShreyas Bhatewara err = -EIO; 2605d1a890faSShreyas Bhatewara goto err_ioremap; 2606d1a890faSShreyas Bhatewara } 2607d1a890faSShreyas Bhatewara 2608d1a890faSShreyas Bhatewara mmio_start = pci_resource_start(pdev, 1); 2609d1a890faSShreyas Bhatewara mmio_len = pci_resource_len(pdev, 1); 2610d1a890faSShreyas Bhatewara adapter->hw_addr1 = ioremap(mmio_start, mmio_len); 2611d1a890faSShreyas Bhatewara if (!adapter->hw_addr1) { 2612204a6e65SStephen Hemminger dev_err(&pdev->dev, "Failed to map bar1\n"); 2613d1a890faSShreyas Bhatewara err = -EIO; 2614d1a890faSShreyas Bhatewara goto err_bar1; 2615d1a890faSShreyas Bhatewara } 2616d1a890faSShreyas Bhatewara return 0; 2617d1a890faSShreyas Bhatewara 2618d1a890faSShreyas Bhatewara err_bar1: 2619d1a890faSShreyas Bhatewara iounmap(adapter->hw_addr0); 2620d1a890faSShreyas Bhatewara err_ioremap: 2621d1a890faSShreyas Bhatewara pci_release_selected_regions(pdev, (1 << 2) - 1); 2622d1a890faSShreyas Bhatewara err_set_mask: 2623d1a890faSShreyas Bhatewara pci_disable_device(pdev); 2624d1a890faSShreyas Bhatewara return err; 2625d1a890faSShreyas Bhatewara } 2626d1a890faSShreyas Bhatewara 2627d1a890faSShreyas Bhatewara 2628d1a890faSShreyas Bhatewara static void 2629d1a890faSShreyas Bhatewara vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter) 2630d1a890faSShreyas Bhatewara { 2631d1a890faSShreyas Bhatewara BUG_ON(!adapter->pdev); 2632d1a890faSShreyas Bhatewara 2633d1a890faSShreyas Bhatewara iounmap(adapter->hw_addr0); 2634d1a890faSShreyas Bhatewara iounmap(adapter->hw_addr1); 2635d1a890faSShreyas Bhatewara pci_release_selected_regions(adapter->pdev, (1 << 2) - 1); 2636d1a890faSShreyas Bhatewara pci_disable_device(adapter->pdev); 2637d1a890faSShreyas Bhatewara } 2638d1a890faSShreyas Bhatewara 2639d1a890faSShreyas Bhatewara 2640d1a890faSShreyas Bhatewara static void 2641d1a890faSShreyas Bhatewara vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter) 2642d1a890faSShreyas Bhatewara { 264309c5088eSShreyas Bhatewara size_t sz, i, ring0_size, ring1_size, comp_size; 264409c5088eSShreyas Bhatewara struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0]; 264509c5088eSShreyas Bhatewara 2646d1a890faSShreyas Bhatewara 2647d1a890faSShreyas Bhatewara if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE - 2648d1a890faSShreyas Bhatewara VMXNET3_MAX_ETH_HDR_SIZE) { 2649d1a890faSShreyas Bhatewara adapter->skb_buf_size = adapter->netdev->mtu + 2650d1a890faSShreyas Bhatewara VMXNET3_MAX_ETH_HDR_SIZE; 2651d1a890faSShreyas Bhatewara if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE) 2652d1a890faSShreyas Bhatewara adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE; 2653d1a890faSShreyas Bhatewara 2654d1a890faSShreyas Bhatewara adapter->rx_buf_per_pkt = 1; 2655d1a890faSShreyas Bhatewara } else { 2656d1a890faSShreyas Bhatewara adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE; 2657d1a890faSShreyas Bhatewara sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE + 2658d1a890faSShreyas Bhatewara VMXNET3_MAX_ETH_HDR_SIZE; 2659d1a890faSShreyas Bhatewara adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE; 2660d1a890faSShreyas Bhatewara } 2661d1a890faSShreyas Bhatewara 2662d1a890faSShreyas Bhatewara /* 2663d1a890faSShreyas Bhatewara * for simplicity, force the ring0 size to be a multiple of 2664d1a890faSShreyas Bhatewara * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN 2665d1a890faSShreyas Bhatewara */ 2666d1a890faSShreyas Bhatewara sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; 266709c5088eSShreyas Bhatewara ring0_size = adapter->rx_queue[0].rx_ring[0].size; 266809c5088eSShreyas Bhatewara ring0_size = (ring0_size + sz - 1) / sz * sz; 2669a53255d3SShreyas Bhatewara ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE / 267009c5088eSShreyas Bhatewara sz * sz); 267109c5088eSShreyas Bhatewara ring1_size = adapter->rx_queue[0].rx_ring[1].size; 267253831aa1SShrikrishna Khare ring1_size = (ring1_size + sz - 1) / sz * sz; 267353831aa1SShrikrishna Khare ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE / 267453831aa1SShrikrishna Khare sz * sz); 267509c5088eSShreyas Bhatewara comp_size = ring0_size + ring1_size; 267609c5088eSShreyas Bhatewara 267709c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_rx_queues; i++) { 267809c5088eSShreyas Bhatewara rq = &adapter->rx_queue[i]; 267909c5088eSShreyas Bhatewara rq->rx_ring[0].size = ring0_size; 268009c5088eSShreyas Bhatewara rq->rx_ring[1].size = ring1_size; 268109c5088eSShreyas Bhatewara rq->comp_ring.size = comp_size; 268209c5088eSShreyas Bhatewara } 2683d1a890faSShreyas Bhatewara } 2684d1a890faSShreyas Bhatewara 2685d1a890faSShreyas Bhatewara 2686d1a890faSShreyas Bhatewara int 2687d1a890faSShreyas Bhatewara vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, 2688d1a890faSShreyas Bhatewara u32 rx_ring_size, u32 rx_ring2_size) 2689d1a890faSShreyas Bhatewara { 269009c5088eSShreyas Bhatewara int err = 0, i; 2691d1a890faSShreyas Bhatewara 269209c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_tx_queues; i++) { 269309c5088eSShreyas Bhatewara struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; 269409c5088eSShreyas Bhatewara tq->tx_ring.size = tx_ring_size; 269509c5088eSShreyas Bhatewara tq->data_ring.size = tx_ring_size; 269609c5088eSShreyas Bhatewara tq->comp_ring.size = tx_ring_size; 269709c5088eSShreyas Bhatewara tq->shared = &adapter->tqd_start[i].ctrl; 269809c5088eSShreyas Bhatewara tq->stopped = true; 269909c5088eSShreyas Bhatewara tq->adapter = adapter; 270009c5088eSShreyas Bhatewara tq->qid = i; 270109c5088eSShreyas Bhatewara err = vmxnet3_tq_create(tq, adapter); 270209c5088eSShreyas Bhatewara /* 270309c5088eSShreyas Bhatewara * Too late to change num_tx_queues. We cannot do away with 270409c5088eSShreyas Bhatewara * lesser number of queues than what we asked for 270509c5088eSShreyas Bhatewara */ 2706d1a890faSShreyas Bhatewara if (err) 270709c5088eSShreyas Bhatewara goto queue_err; 270809c5088eSShreyas Bhatewara } 2709d1a890faSShreyas Bhatewara 271009c5088eSShreyas Bhatewara adapter->rx_queue[0].rx_ring[0].size = rx_ring_size; 271109c5088eSShreyas Bhatewara adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size; 2712d1a890faSShreyas Bhatewara vmxnet3_adjust_rx_ring_size(adapter); 271309c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_rx_queues; i++) { 271409c5088eSShreyas Bhatewara struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; 271509c5088eSShreyas Bhatewara /* qid and qid2 for rx queues will be assigned later when num 271609c5088eSShreyas Bhatewara * of rx queues is finalized after allocating intrs */ 271709c5088eSShreyas Bhatewara rq->shared = &adapter->rqd_start[i].ctrl; 271809c5088eSShreyas Bhatewara rq->adapter = adapter; 271909c5088eSShreyas Bhatewara err = vmxnet3_rq_create(rq, adapter); 272009c5088eSShreyas Bhatewara if (err) { 272109c5088eSShreyas Bhatewara if (i == 0) { 2722204a6e65SStephen Hemminger netdev_err(adapter->netdev, 2723204a6e65SStephen Hemminger "Could not allocate any rx queues. " 2724204a6e65SStephen Hemminger "Aborting.\n"); 272509c5088eSShreyas Bhatewara goto queue_err; 272609c5088eSShreyas Bhatewara } else { 2727204a6e65SStephen Hemminger netdev_info(adapter->netdev, 2728204a6e65SStephen Hemminger "Number of rx queues changed " 272909c5088eSShreyas Bhatewara "to : %d.\n", i); 273009c5088eSShreyas Bhatewara adapter->num_rx_queues = i; 273109c5088eSShreyas Bhatewara err = 0; 273209c5088eSShreyas Bhatewara break; 273309c5088eSShreyas Bhatewara } 273409c5088eSShreyas Bhatewara } 273509c5088eSShreyas Bhatewara } 273609c5088eSShreyas Bhatewara return err; 273709c5088eSShreyas Bhatewara queue_err: 273809c5088eSShreyas Bhatewara vmxnet3_tq_destroy_all(adapter); 2739d1a890faSShreyas Bhatewara return err; 2740d1a890faSShreyas Bhatewara } 2741d1a890faSShreyas Bhatewara 2742d1a890faSShreyas Bhatewara static int 2743d1a890faSShreyas Bhatewara vmxnet3_open(struct net_device *netdev) 2744d1a890faSShreyas Bhatewara { 2745d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter; 274609c5088eSShreyas Bhatewara int err, i; 2747d1a890faSShreyas Bhatewara 2748d1a890faSShreyas Bhatewara adapter = netdev_priv(netdev); 2749d1a890faSShreyas Bhatewara 275009c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_tx_queues; i++) 275109c5088eSShreyas Bhatewara spin_lock_init(&adapter->tx_queue[i].tx_lock); 2752d1a890faSShreyas Bhatewara 2753f00e2b0aSNeil Horman err = vmxnet3_create_queues(adapter, adapter->tx_ring_size, 2754f00e2b0aSNeil Horman adapter->rx_ring_size, 275553831aa1SShrikrishna Khare adapter->rx_ring2_size); 2756d1a890faSShreyas Bhatewara if (err) 2757d1a890faSShreyas Bhatewara goto queue_err; 2758d1a890faSShreyas Bhatewara 2759d1a890faSShreyas Bhatewara err = vmxnet3_activate_dev(adapter); 2760d1a890faSShreyas Bhatewara if (err) 2761d1a890faSShreyas Bhatewara goto activate_err; 2762d1a890faSShreyas Bhatewara 2763d1a890faSShreyas Bhatewara return 0; 2764d1a890faSShreyas Bhatewara 2765d1a890faSShreyas Bhatewara activate_err: 276609c5088eSShreyas Bhatewara vmxnet3_rq_destroy_all(adapter); 276709c5088eSShreyas Bhatewara vmxnet3_tq_destroy_all(adapter); 2768d1a890faSShreyas Bhatewara queue_err: 2769d1a890faSShreyas Bhatewara return err; 2770d1a890faSShreyas Bhatewara } 2771d1a890faSShreyas Bhatewara 2772d1a890faSShreyas Bhatewara 2773d1a890faSShreyas Bhatewara static int 2774d1a890faSShreyas Bhatewara vmxnet3_close(struct net_device *netdev) 2775d1a890faSShreyas Bhatewara { 2776d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter = netdev_priv(netdev); 2777d1a890faSShreyas Bhatewara 2778d1a890faSShreyas Bhatewara /* 2779d1a890faSShreyas Bhatewara * Reset_work may be in the middle of resetting the device, wait for its 2780d1a890faSShreyas Bhatewara * completion. 2781d1a890faSShreyas Bhatewara */ 2782d1a890faSShreyas Bhatewara while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) 2783d1a890faSShreyas Bhatewara msleep(1); 2784d1a890faSShreyas Bhatewara 2785d1a890faSShreyas Bhatewara vmxnet3_quiesce_dev(adapter); 2786d1a890faSShreyas Bhatewara 278709c5088eSShreyas Bhatewara vmxnet3_rq_destroy_all(adapter); 278809c5088eSShreyas Bhatewara vmxnet3_tq_destroy_all(adapter); 2789d1a890faSShreyas Bhatewara 2790d1a890faSShreyas Bhatewara clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 2791d1a890faSShreyas Bhatewara 2792d1a890faSShreyas Bhatewara 2793d1a890faSShreyas Bhatewara return 0; 2794d1a890faSShreyas Bhatewara } 2795d1a890faSShreyas Bhatewara 2796d1a890faSShreyas Bhatewara 2797d1a890faSShreyas Bhatewara void 2798d1a890faSShreyas Bhatewara vmxnet3_force_close(struct vmxnet3_adapter *adapter) 2799d1a890faSShreyas Bhatewara { 280009c5088eSShreyas Bhatewara int i; 280109c5088eSShreyas Bhatewara 2802d1a890faSShreyas Bhatewara /* 2803d1a890faSShreyas Bhatewara * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise 2804d1a890faSShreyas Bhatewara * vmxnet3_close() will deadlock. 2805d1a890faSShreyas Bhatewara */ 2806d1a890faSShreyas Bhatewara BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)); 2807d1a890faSShreyas Bhatewara 2808d1a890faSShreyas Bhatewara /* we need to enable NAPI, otherwise dev_close will deadlock */ 280909c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_rx_queues; i++) 281009c5088eSShreyas Bhatewara napi_enable(&adapter->rx_queue[i].napi); 2811d1a890faSShreyas Bhatewara dev_close(adapter->netdev); 2812d1a890faSShreyas Bhatewara } 2813d1a890faSShreyas Bhatewara 2814d1a890faSShreyas Bhatewara 2815d1a890faSShreyas Bhatewara static int 2816d1a890faSShreyas Bhatewara vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) 2817d1a890faSShreyas Bhatewara { 2818d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter = netdev_priv(netdev); 2819d1a890faSShreyas Bhatewara int err = 0; 2820d1a890faSShreyas Bhatewara 2821d1a890faSShreyas Bhatewara if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU) 2822d1a890faSShreyas Bhatewara return -EINVAL; 2823d1a890faSShreyas Bhatewara 2824d1a890faSShreyas Bhatewara netdev->mtu = new_mtu; 2825d1a890faSShreyas Bhatewara 2826d1a890faSShreyas Bhatewara /* 2827d1a890faSShreyas Bhatewara * Reset_work may be in the middle of resetting the device, wait for its 2828d1a890faSShreyas Bhatewara * completion. 2829d1a890faSShreyas Bhatewara */ 2830d1a890faSShreyas Bhatewara while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) 2831d1a890faSShreyas Bhatewara msleep(1); 2832d1a890faSShreyas Bhatewara 2833d1a890faSShreyas Bhatewara if (netif_running(netdev)) { 2834d1a890faSShreyas Bhatewara vmxnet3_quiesce_dev(adapter); 2835d1a890faSShreyas Bhatewara vmxnet3_reset_dev(adapter); 2836d1a890faSShreyas Bhatewara 2837d1a890faSShreyas Bhatewara /* we need to re-create the rx queue based on the new mtu */ 283809c5088eSShreyas Bhatewara vmxnet3_rq_destroy_all(adapter); 2839d1a890faSShreyas Bhatewara vmxnet3_adjust_rx_ring_size(adapter); 284009c5088eSShreyas Bhatewara err = vmxnet3_rq_create_all(adapter); 2841d1a890faSShreyas Bhatewara if (err) { 2842204a6e65SStephen Hemminger netdev_err(netdev, 2843204a6e65SStephen Hemminger "failed to re-create rx queues, " 2844204a6e65SStephen Hemminger " error %d. Closing it.\n", err); 2845d1a890faSShreyas Bhatewara goto out; 2846d1a890faSShreyas Bhatewara } 2847d1a890faSShreyas Bhatewara 2848d1a890faSShreyas Bhatewara err = vmxnet3_activate_dev(adapter); 2849d1a890faSShreyas Bhatewara if (err) { 2850204a6e65SStephen Hemminger netdev_err(netdev, 2851204a6e65SStephen Hemminger "failed to re-activate, error %d. " 2852204a6e65SStephen Hemminger "Closing it\n", err); 2853d1a890faSShreyas Bhatewara goto out; 2854d1a890faSShreyas Bhatewara } 2855d1a890faSShreyas Bhatewara } 2856d1a890faSShreyas Bhatewara 2857d1a890faSShreyas Bhatewara out: 2858d1a890faSShreyas Bhatewara clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 2859d1a890faSShreyas Bhatewara if (err) 2860d1a890faSShreyas Bhatewara vmxnet3_force_close(adapter); 2861d1a890faSShreyas Bhatewara 2862d1a890faSShreyas Bhatewara return err; 2863d1a890faSShreyas Bhatewara } 2864d1a890faSShreyas Bhatewara 2865d1a890faSShreyas Bhatewara 2866d1a890faSShreyas Bhatewara static void 2867d1a890faSShreyas Bhatewara vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64) 2868d1a890faSShreyas Bhatewara { 2869d1a890faSShreyas Bhatewara struct net_device *netdev = adapter->netdev; 2870d1a890faSShreyas Bhatewara 2871a0d2730cSMichał Mirosław netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | 2872f646968fSPatrick McHardy NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | 2873f646968fSPatrick McHardy NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 | 287472e85c45SJesse Gross NETIF_F_LRO; 2875a0d2730cSMichał Mirosław if (dma64) 2876ebbf9295SShreyas Bhatewara netdev->hw_features |= NETIF_F_HIGHDMA; 287772e85c45SJesse Gross netdev->vlan_features = netdev->hw_features & 2878f646968fSPatrick McHardy ~(NETIF_F_HW_VLAN_CTAG_TX | 2879f646968fSPatrick McHardy NETIF_F_HW_VLAN_CTAG_RX); 2880f646968fSPatrick McHardy netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; 2881d1a890faSShreyas Bhatewara } 2882d1a890faSShreyas Bhatewara 2883d1a890faSShreyas Bhatewara 2884d1a890faSShreyas Bhatewara static void 2885d1a890faSShreyas Bhatewara vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac) 2886d1a890faSShreyas Bhatewara { 2887d1a890faSShreyas Bhatewara u32 tmp; 2888d1a890faSShreyas Bhatewara 2889d1a890faSShreyas Bhatewara tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL); 2890d1a890faSShreyas Bhatewara *(u32 *)mac = tmp; 2891d1a890faSShreyas Bhatewara 2892d1a890faSShreyas Bhatewara tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH); 2893d1a890faSShreyas Bhatewara mac[4] = tmp & 0xff; 2894d1a890faSShreyas Bhatewara mac[5] = (tmp >> 8) & 0xff; 2895d1a890faSShreyas Bhatewara } 2896d1a890faSShreyas Bhatewara 289709c5088eSShreyas Bhatewara #ifdef CONFIG_PCI_MSI 289809c5088eSShreyas Bhatewara 289909c5088eSShreyas Bhatewara /* 290009c5088eSShreyas Bhatewara * Enable MSIx vectors. 290109c5088eSShreyas Bhatewara * Returns : 290225985edcSLucas De Marchi * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required 2903b60b869dSAlexander Gordeev * were enabled. 2904b60b869dSAlexander Gordeev * number of vectors which were enabled otherwise (this number is greater 290509c5088eSShreyas Bhatewara * than VMXNET3_LINUX_MIN_MSIX_VECT) 290609c5088eSShreyas Bhatewara */ 290709c5088eSShreyas Bhatewara 290809c5088eSShreyas Bhatewara static int 2909b60b869dSAlexander Gordeev vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec) 291009c5088eSShreyas Bhatewara { 2911c0a1be38SAlexander Gordeev int ret = pci_enable_msix_range(adapter->pdev, 2912c0a1be38SAlexander Gordeev adapter->intr.msix_entries, nvec, nvec); 2913c0a1be38SAlexander Gordeev 2914c0a1be38SAlexander Gordeev if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) { 29154bad25faSStephen Hemminger dev_err(&adapter->netdev->dev, 2916b60b869dSAlexander Gordeev "Failed to enable %d MSI-X, trying %d\n", 2917b60b869dSAlexander Gordeev nvec, VMXNET3_LINUX_MIN_MSIX_VECT); 291809c5088eSShreyas Bhatewara 2919c0a1be38SAlexander Gordeev ret = pci_enable_msix_range(adapter->pdev, 2920c0a1be38SAlexander Gordeev adapter->intr.msix_entries, 2921c0a1be38SAlexander Gordeev VMXNET3_LINUX_MIN_MSIX_VECT, 2922c0a1be38SAlexander Gordeev VMXNET3_LINUX_MIN_MSIX_VECT); 2923c0a1be38SAlexander Gordeev } 2924c0a1be38SAlexander Gordeev 2925c0a1be38SAlexander Gordeev if (ret < 0) { 2926c0a1be38SAlexander Gordeev dev_err(&adapter->netdev->dev, 2927c0a1be38SAlexander Gordeev "Failed to enable MSI-X, error: %d\n", ret); 2928c0a1be38SAlexander Gordeev } 2929c0a1be38SAlexander Gordeev 2930c0a1be38SAlexander Gordeev return ret; 293109c5088eSShreyas Bhatewara } 293209c5088eSShreyas Bhatewara 293309c5088eSShreyas Bhatewara 293409c5088eSShreyas Bhatewara #endif /* CONFIG_PCI_MSI */ 2935d1a890faSShreyas Bhatewara 2936d1a890faSShreyas Bhatewara static void 2937d1a890faSShreyas Bhatewara vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) 2938d1a890faSShreyas Bhatewara { 2939d1a890faSShreyas Bhatewara u32 cfg; 2940e328d410SRoland Dreier unsigned long flags; 2941d1a890faSShreyas Bhatewara 2942d1a890faSShreyas Bhatewara /* intr settings */ 2943e328d410SRoland Dreier spin_lock_irqsave(&adapter->cmd_lock, flags); 2944d1a890faSShreyas Bhatewara VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2945d1a890faSShreyas Bhatewara VMXNET3_CMD_GET_CONF_INTR); 2946d1a890faSShreyas Bhatewara cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 2947e328d410SRoland Dreier spin_unlock_irqrestore(&adapter->cmd_lock, flags); 2948d1a890faSShreyas Bhatewara adapter->intr.type = cfg & 0x3; 2949d1a890faSShreyas Bhatewara adapter->intr.mask_mode = (cfg >> 2) & 0x3; 2950d1a890faSShreyas Bhatewara 2951d1a890faSShreyas Bhatewara if (adapter->intr.type == VMXNET3_IT_AUTO) { 29520bdc0d70SShreyas Bhatewara adapter->intr.type = VMXNET3_IT_MSIX; 29530bdc0d70SShreyas Bhatewara } 2954d1a890faSShreyas Bhatewara 29558f7e524cSRandy Dunlap #ifdef CONFIG_PCI_MSI 29560bdc0d70SShreyas Bhatewara if (adapter->intr.type == VMXNET3_IT_MSIX) { 2957b60b869dSAlexander Gordeev int i, nvec; 29580bdc0d70SShreyas Bhatewara 2959b60b869dSAlexander Gordeev nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ? 2960b60b869dSAlexander Gordeev 1 : adapter->num_tx_queues; 2961b60b869dSAlexander Gordeev nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ? 2962b60b869dSAlexander Gordeev 0 : adapter->num_rx_queues; 2963b60b869dSAlexander Gordeev nvec += 1; /* for link event */ 2964b60b869dSAlexander Gordeev nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ? 2965b60b869dSAlexander Gordeev nvec : VMXNET3_LINUX_MIN_MSIX_VECT; 296609c5088eSShreyas Bhatewara 2967b60b869dSAlexander Gordeev for (i = 0; i < nvec; i++) 2968b60b869dSAlexander Gordeev adapter->intr.msix_entries[i].entry = i; 296909c5088eSShreyas Bhatewara 2970b60b869dSAlexander Gordeev nvec = vmxnet3_acquire_msix_vectors(adapter, nvec); 2971b60b869dSAlexander Gordeev if (nvec < 0) 2972b60b869dSAlexander Gordeev goto msix_err; 297309c5088eSShreyas Bhatewara 297409c5088eSShreyas Bhatewara /* If we cannot allocate one MSIx vector per queue 297509c5088eSShreyas Bhatewara * then limit the number of rx queues to 1 297609c5088eSShreyas Bhatewara */ 2977b60b869dSAlexander Gordeev if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) { 297809c5088eSShreyas Bhatewara if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE 29797e96fbf2SShreyas Bhatewara || adapter->num_rx_queues != 1) { 298009c5088eSShreyas Bhatewara adapter->share_intr = VMXNET3_INTR_TXSHARE; 2981204a6e65SStephen Hemminger netdev_err(adapter->netdev, 2982204a6e65SStephen Hemminger "Number of rx queues : 1\n"); 298309c5088eSShreyas Bhatewara adapter->num_rx_queues = 1; 298409c5088eSShreyas Bhatewara } 2985d1a890faSShreyas Bhatewara } 2986b60b869dSAlexander Gordeev 2987b60b869dSAlexander Gordeev adapter->intr.num_intrs = nvec; 298809c5088eSShreyas Bhatewara return; 298909c5088eSShreyas Bhatewara 2990b60b869dSAlexander Gordeev msix_err: 299109c5088eSShreyas Bhatewara /* If we cannot allocate MSIx vectors use only one rx queue */ 29924bad25faSStephen Hemminger dev_info(&adapter->pdev->dev, 29934bad25faSStephen Hemminger "Failed to enable MSI-X, error %d. " 2994b60b869dSAlexander Gordeev "Limiting #rx queues to 1, try MSI.\n", nvec); 299509c5088eSShreyas Bhatewara 29960bdc0d70SShreyas Bhatewara adapter->intr.type = VMXNET3_IT_MSI; 29970bdc0d70SShreyas Bhatewara } 2998d1a890faSShreyas Bhatewara 29990bdc0d70SShreyas Bhatewara if (adapter->intr.type == VMXNET3_IT_MSI) { 3000b60b869dSAlexander Gordeev if (!pci_enable_msi(adapter->pdev)) { 300109c5088eSShreyas Bhatewara adapter->num_rx_queues = 1; 3002d1a890faSShreyas Bhatewara adapter->intr.num_intrs = 1; 3003d1a890faSShreyas Bhatewara return; 3004d1a890faSShreyas Bhatewara } 3005d1a890faSShreyas Bhatewara } 30060bdc0d70SShreyas Bhatewara #endif /* CONFIG_PCI_MSI */ 3007d1a890faSShreyas Bhatewara 300809c5088eSShreyas Bhatewara adapter->num_rx_queues = 1; 3009204a6e65SStephen Hemminger dev_info(&adapter->netdev->dev, 3010204a6e65SStephen Hemminger "Using INTx interrupt, #Rx queues: 1.\n"); 3011d1a890faSShreyas Bhatewara adapter->intr.type = VMXNET3_IT_INTX; 3012d1a890faSShreyas Bhatewara 3013d1a890faSShreyas Bhatewara /* INT-X related setting */ 3014d1a890faSShreyas Bhatewara adapter->intr.num_intrs = 1; 3015d1a890faSShreyas Bhatewara } 3016d1a890faSShreyas Bhatewara 3017d1a890faSShreyas Bhatewara 3018d1a890faSShreyas Bhatewara static void 3019d1a890faSShreyas Bhatewara vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter) 3020d1a890faSShreyas Bhatewara { 3021d1a890faSShreyas Bhatewara if (adapter->intr.type == VMXNET3_IT_MSIX) 3022d1a890faSShreyas Bhatewara pci_disable_msix(adapter->pdev); 3023d1a890faSShreyas Bhatewara else if (adapter->intr.type == VMXNET3_IT_MSI) 3024d1a890faSShreyas Bhatewara pci_disable_msi(adapter->pdev); 3025d1a890faSShreyas Bhatewara else 3026d1a890faSShreyas Bhatewara BUG_ON(adapter->intr.type != VMXNET3_IT_INTX); 3027d1a890faSShreyas Bhatewara } 3028d1a890faSShreyas Bhatewara 3029d1a890faSShreyas Bhatewara 3030d1a890faSShreyas Bhatewara static void 3031d1a890faSShreyas Bhatewara vmxnet3_tx_timeout(struct net_device *netdev) 3032d1a890faSShreyas Bhatewara { 3033d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter = netdev_priv(netdev); 3034d1a890faSShreyas Bhatewara adapter->tx_timeout_count++; 3035d1a890faSShreyas Bhatewara 3036204a6e65SStephen Hemminger netdev_err(adapter->netdev, "tx hang\n"); 3037d1a890faSShreyas Bhatewara schedule_work(&adapter->work); 303809c5088eSShreyas Bhatewara netif_wake_queue(adapter->netdev); 3039d1a890faSShreyas Bhatewara } 3040d1a890faSShreyas Bhatewara 3041d1a890faSShreyas Bhatewara 3042d1a890faSShreyas Bhatewara static void 3043d1a890faSShreyas Bhatewara vmxnet3_reset_work(struct work_struct *data) 3044d1a890faSShreyas Bhatewara { 3045d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter; 3046d1a890faSShreyas Bhatewara 3047d1a890faSShreyas Bhatewara adapter = container_of(data, struct vmxnet3_adapter, work); 3048d1a890faSShreyas Bhatewara 3049d1a890faSShreyas Bhatewara /* if another thread is resetting the device, no need to proceed */ 3050d1a890faSShreyas Bhatewara if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) 3051d1a890faSShreyas Bhatewara return; 3052d1a890faSShreyas Bhatewara 3053d1a890faSShreyas Bhatewara /* if the device is closed, we must leave it alone */ 3054d9a5f210SShreyas Bhatewara rtnl_lock(); 3055d1a890faSShreyas Bhatewara if (netif_running(adapter->netdev)) { 3056204a6e65SStephen Hemminger netdev_notice(adapter->netdev, "resetting\n"); 3057d1a890faSShreyas Bhatewara vmxnet3_quiesce_dev(adapter); 3058d1a890faSShreyas Bhatewara vmxnet3_reset_dev(adapter); 3059d1a890faSShreyas Bhatewara vmxnet3_activate_dev(adapter); 3060d1a890faSShreyas Bhatewara } else { 3061204a6e65SStephen Hemminger netdev_info(adapter->netdev, "already closed\n"); 3062d1a890faSShreyas Bhatewara } 3063d9a5f210SShreyas Bhatewara rtnl_unlock(); 3064d1a890faSShreyas Bhatewara 3065d1a890faSShreyas Bhatewara clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 3066d1a890faSShreyas Bhatewara } 3067d1a890faSShreyas Bhatewara 3068d1a890faSShreyas Bhatewara 30693a4751a3SBill Pemberton static int 3070d1a890faSShreyas Bhatewara vmxnet3_probe_device(struct pci_dev *pdev, 3071d1a890faSShreyas Bhatewara const struct pci_device_id *id) 3072d1a890faSShreyas Bhatewara { 3073d1a890faSShreyas Bhatewara static const struct net_device_ops vmxnet3_netdev_ops = { 3074d1a890faSShreyas Bhatewara .ndo_open = vmxnet3_open, 3075d1a890faSShreyas Bhatewara .ndo_stop = vmxnet3_close, 3076d1a890faSShreyas Bhatewara .ndo_start_xmit = vmxnet3_xmit_frame, 3077d1a890faSShreyas Bhatewara .ndo_set_mac_address = vmxnet3_set_mac_addr, 3078d1a890faSShreyas Bhatewara .ndo_change_mtu = vmxnet3_change_mtu, 3079a0d2730cSMichał Mirosław .ndo_set_features = vmxnet3_set_features, 308095305f6cSstephen hemminger .ndo_get_stats64 = vmxnet3_get_stats64, 3081d1a890faSShreyas Bhatewara .ndo_tx_timeout = vmxnet3_tx_timeout, 3082afc4b13dSJiri Pirko .ndo_set_rx_mode = vmxnet3_set_mc, 3083d1a890faSShreyas Bhatewara .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid, 3084d1a890faSShreyas Bhatewara .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid, 3085d1a890faSShreyas Bhatewara #ifdef CONFIG_NET_POLL_CONTROLLER 3086d1a890faSShreyas Bhatewara .ndo_poll_controller = vmxnet3_netpoll, 3087d1a890faSShreyas Bhatewara #endif 3088d1a890faSShreyas Bhatewara }; 3089d1a890faSShreyas Bhatewara int err; 3090d1a890faSShreyas Bhatewara bool dma64 = false; /* stupid gcc */ 3091d1a890faSShreyas Bhatewara u32 ver; 3092d1a890faSShreyas Bhatewara struct net_device *netdev; 3093d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter; 3094d1a890faSShreyas Bhatewara u8 mac[ETH_ALEN]; 309509c5088eSShreyas Bhatewara int size; 309609c5088eSShreyas Bhatewara int num_tx_queues; 309709c5088eSShreyas Bhatewara int num_rx_queues; 3098d1a890faSShreyas Bhatewara 3099e154b639SShreyas Bhatewara if (!pci_msi_enabled()) 3100e154b639SShreyas Bhatewara enable_mq = 0; 3101e154b639SShreyas Bhatewara 310209c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS 310309c5088eSShreyas Bhatewara if (enable_mq) 310409c5088eSShreyas Bhatewara num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES, 310509c5088eSShreyas Bhatewara (int)num_online_cpus()); 310609c5088eSShreyas Bhatewara else 310709c5088eSShreyas Bhatewara #endif 310809c5088eSShreyas Bhatewara num_rx_queues = 1; 3109eebb02b1SShreyas Bhatewara num_rx_queues = rounddown_pow_of_two(num_rx_queues); 311009c5088eSShreyas Bhatewara 311109c5088eSShreyas Bhatewara if (enable_mq) 311209c5088eSShreyas Bhatewara num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES, 311309c5088eSShreyas Bhatewara (int)num_online_cpus()); 311409c5088eSShreyas Bhatewara else 311509c5088eSShreyas Bhatewara num_tx_queues = 1; 311609c5088eSShreyas Bhatewara 3117eebb02b1SShreyas Bhatewara num_tx_queues = rounddown_pow_of_two(num_tx_queues); 311809c5088eSShreyas Bhatewara netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter), 311909c5088eSShreyas Bhatewara max(num_tx_queues, num_rx_queues)); 3120204a6e65SStephen Hemminger dev_info(&pdev->dev, 3121204a6e65SStephen Hemminger "# of Tx queues : %d, # of Rx queues : %d\n", 312209c5088eSShreyas Bhatewara num_tx_queues, num_rx_queues); 312309c5088eSShreyas Bhatewara 312441de8d4cSJoe Perches if (!netdev) 3125d1a890faSShreyas Bhatewara return -ENOMEM; 3126d1a890faSShreyas Bhatewara 3127d1a890faSShreyas Bhatewara pci_set_drvdata(pdev, netdev); 3128d1a890faSShreyas Bhatewara adapter = netdev_priv(netdev); 3129d1a890faSShreyas Bhatewara adapter->netdev = netdev; 3130d1a890faSShreyas Bhatewara adapter->pdev = pdev; 3131d1a890faSShreyas Bhatewara 3132f00e2b0aSNeil Horman adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE; 3133f00e2b0aSNeil Horman adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; 313453831aa1SShrikrishna Khare adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE; 3135f00e2b0aSNeil Horman 313683d0feffSShreyas Bhatewara spin_lock_init(&adapter->cmd_lock); 3137b0eb57cbSAndy King adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, 3138b0eb57cbSAndy King sizeof(struct vmxnet3_adapter), 3139b0eb57cbSAndy King PCI_DMA_TODEVICE); 31405738a09dSAlexey Khoroshilov if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) { 31415738a09dSAlexey Khoroshilov dev_err(&pdev->dev, "Failed to map dma\n"); 31425738a09dSAlexey Khoroshilov err = -EFAULT; 31435738a09dSAlexey Khoroshilov goto err_dma_map; 31445738a09dSAlexey Khoroshilov } 3145b0eb57cbSAndy King adapter->shared = dma_alloc_coherent( 3146b0eb57cbSAndy King &adapter->pdev->dev, 3147d1a890faSShreyas Bhatewara sizeof(struct Vmxnet3_DriverShared), 3148b0eb57cbSAndy King &adapter->shared_pa, GFP_KERNEL); 3149d1a890faSShreyas Bhatewara if (!adapter->shared) { 3150204a6e65SStephen Hemminger dev_err(&pdev->dev, "Failed to allocate memory\n"); 3151d1a890faSShreyas Bhatewara err = -ENOMEM; 3152d1a890faSShreyas Bhatewara goto err_alloc_shared; 3153d1a890faSShreyas Bhatewara } 3154d1a890faSShreyas Bhatewara 315509c5088eSShreyas Bhatewara adapter->num_rx_queues = num_rx_queues; 315609c5088eSShreyas Bhatewara adapter->num_tx_queues = num_tx_queues; 3157e4fabf2bSBhavesh Davda adapter->rx_buf_per_pkt = 1; 315809c5088eSShreyas Bhatewara 315909c5088eSShreyas Bhatewara size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; 316009c5088eSShreyas Bhatewara size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues; 3161b0eb57cbSAndy King adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size, 3162b0eb57cbSAndy King &adapter->queue_desc_pa, 3163b0eb57cbSAndy King GFP_KERNEL); 3164d1a890faSShreyas Bhatewara 3165d1a890faSShreyas Bhatewara if (!adapter->tqd_start) { 3166204a6e65SStephen Hemminger dev_err(&pdev->dev, "Failed to allocate memory\n"); 3167d1a890faSShreyas Bhatewara err = -ENOMEM; 3168d1a890faSShreyas Bhatewara goto err_alloc_queue_desc; 3169d1a890faSShreyas Bhatewara } 317009c5088eSShreyas Bhatewara adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start + 317109c5088eSShreyas Bhatewara adapter->num_tx_queues); 3172d1a890faSShreyas Bhatewara 3173b0eb57cbSAndy King adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev, 3174b0eb57cbSAndy King sizeof(struct Vmxnet3_PMConf), 3175b0eb57cbSAndy King &adapter->pm_conf_pa, 3176b0eb57cbSAndy King GFP_KERNEL); 3177d1a890faSShreyas Bhatewara if (adapter->pm_conf == NULL) { 3178d1a890faSShreyas Bhatewara err = -ENOMEM; 3179d1a890faSShreyas Bhatewara goto err_alloc_pm; 3180d1a890faSShreyas Bhatewara } 3181d1a890faSShreyas Bhatewara 318209c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS 318309c5088eSShreyas Bhatewara 3184b0eb57cbSAndy King adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev, 3185b0eb57cbSAndy King sizeof(struct UPT1_RSSConf), 3186b0eb57cbSAndy King &adapter->rss_conf_pa, 3187b0eb57cbSAndy King GFP_KERNEL); 318809c5088eSShreyas Bhatewara if (adapter->rss_conf == NULL) { 318909c5088eSShreyas Bhatewara err = -ENOMEM; 319009c5088eSShreyas Bhatewara goto err_alloc_rss; 319109c5088eSShreyas Bhatewara } 319209c5088eSShreyas Bhatewara #endif /* VMXNET3_RSS */ 319309c5088eSShreyas Bhatewara 3194d1a890faSShreyas Bhatewara err = vmxnet3_alloc_pci_resources(adapter, &dma64); 3195d1a890faSShreyas Bhatewara if (err < 0) 3196d1a890faSShreyas Bhatewara goto err_alloc_pci; 3197d1a890faSShreyas Bhatewara 3198d1a890faSShreyas Bhatewara ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS); 319945dac1d6SShreyas Bhatewara if (ver & 2) { 320045dac1d6SShreyas Bhatewara VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 2); 320145dac1d6SShreyas Bhatewara adapter->version = 2; 320245dac1d6SShreyas Bhatewara } else if (ver & 1) { 3203d1a890faSShreyas Bhatewara VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1); 320445dac1d6SShreyas Bhatewara adapter->version = 1; 3205d1a890faSShreyas Bhatewara } else { 3206204a6e65SStephen Hemminger dev_err(&pdev->dev, 3207204a6e65SStephen Hemminger "Incompatible h/w version (0x%x) for adapter\n", ver); 3208d1a890faSShreyas Bhatewara err = -EBUSY; 3209d1a890faSShreyas Bhatewara goto err_ver; 3210d1a890faSShreyas Bhatewara } 321145dac1d6SShreyas Bhatewara dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version); 3212d1a890faSShreyas Bhatewara 3213d1a890faSShreyas Bhatewara ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS); 3214d1a890faSShreyas Bhatewara if (ver & 1) { 3215d1a890faSShreyas Bhatewara VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1); 3216d1a890faSShreyas Bhatewara } else { 3217204a6e65SStephen Hemminger dev_err(&pdev->dev, 3218204a6e65SStephen Hemminger "Incompatible upt version (0x%x) for adapter\n", ver); 3219d1a890faSShreyas Bhatewara err = -EBUSY; 3220d1a890faSShreyas Bhatewara goto err_ver; 3221d1a890faSShreyas Bhatewara } 3222d1a890faSShreyas Bhatewara 3223e101e7ddSShreyas Bhatewara SET_NETDEV_DEV(netdev, &pdev->dev); 3224d1a890faSShreyas Bhatewara vmxnet3_declare_features(adapter, dma64); 3225d1a890faSShreyas Bhatewara 32264db37a78SStephen Hemminger if (adapter->num_tx_queues == adapter->num_rx_queues) 32274db37a78SStephen Hemminger adapter->share_intr = VMXNET3_INTR_BUDDYSHARE; 32284db37a78SStephen Hemminger else 322909c5088eSShreyas Bhatewara adapter->share_intr = VMXNET3_INTR_DONTSHARE; 323009c5088eSShreyas Bhatewara 3231d1a890faSShreyas Bhatewara vmxnet3_alloc_intr_resources(adapter); 3232d1a890faSShreyas Bhatewara 323309c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS 323409c5088eSShreyas Bhatewara if (adapter->num_rx_queues > 1 && 323509c5088eSShreyas Bhatewara adapter->intr.type == VMXNET3_IT_MSIX) { 323609c5088eSShreyas Bhatewara adapter->rss = true; 32377db11f75SStephen Hemminger netdev->hw_features |= NETIF_F_RXHASH; 32387db11f75SStephen Hemminger netdev->features |= NETIF_F_RXHASH; 3239204a6e65SStephen Hemminger dev_dbg(&pdev->dev, "RSS is enabled.\n"); 324009c5088eSShreyas Bhatewara } else { 324109c5088eSShreyas Bhatewara adapter->rss = false; 324209c5088eSShreyas Bhatewara } 324309c5088eSShreyas Bhatewara #endif 324409c5088eSShreyas Bhatewara 3245d1a890faSShreyas Bhatewara vmxnet3_read_mac_addr(adapter, mac); 3246d1a890faSShreyas Bhatewara memcpy(netdev->dev_addr, mac, netdev->addr_len); 3247d1a890faSShreyas Bhatewara 3248d1a890faSShreyas Bhatewara netdev->netdev_ops = &vmxnet3_netdev_ops; 3249d1a890faSShreyas Bhatewara vmxnet3_set_ethtool_ops(netdev); 325009c5088eSShreyas Bhatewara netdev->watchdog_timeo = 5 * HZ; 3251d1a890faSShreyas Bhatewara 3252d1a890faSShreyas Bhatewara INIT_WORK(&adapter->work, vmxnet3_reset_work); 3253e3bc4ffbSSteve Hodgson set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); 3254d1a890faSShreyas Bhatewara 325509c5088eSShreyas Bhatewara if (adapter->intr.type == VMXNET3_IT_MSIX) { 325609c5088eSShreyas Bhatewara int i; 325709c5088eSShreyas Bhatewara for (i = 0; i < adapter->num_rx_queues; i++) { 325809c5088eSShreyas Bhatewara netif_napi_add(adapter->netdev, 325909c5088eSShreyas Bhatewara &adapter->rx_queue[i].napi, 326009c5088eSShreyas Bhatewara vmxnet3_poll_rx_only, 64); 326109c5088eSShreyas Bhatewara } 326209c5088eSShreyas Bhatewara } else { 326309c5088eSShreyas Bhatewara netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi, 326409c5088eSShreyas Bhatewara vmxnet3_poll, 64); 326509c5088eSShreyas Bhatewara } 326609c5088eSShreyas Bhatewara 326709c5088eSShreyas Bhatewara netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); 326809c5088eSShreyas Bhatewara netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues); 326909c5088eSShreyas Bhatewara 32706cdd20c3SNeil Horman netif_carrier_off(netdev); 3271d1a890faSShreyas Bhatewara err = register_netdev(netdev); 3272d1a890faSShreyas Bhatewara 3273d1a890faSShreyas Bhatewara if (err) { 3274204a6e65SStephen Hemminger dev_err(&pdev->dev, "Failed to register adapter\n"); 3275d1a890faSShreyas Bhatewara goto err_register; 3276d1a890faSShreyas Bhatewara } 3277d1a890faSShreyas Bhatewara 32784a1745fcSShreyas Bhatewara vmxnet3_check_link(adapter, false); 3279d1a890faSShreyas Bhatewara return 0; 3280d1a890faSShreyas Bhatewara 3281d1a890faSShreyas Bhatewara err_register: 3282d1a890faSShreyas Bhatewara vmxnet3_free_intr_resources(adapter); 3283d1a890faSShreyas Bhatewara err_ver: 3284d1a890faSShreyas Bhatewara vmxnet3_free_pci_resources(adapter); 3285d1a890faSShreyas Bhatewara err_alloc_pci: 328609c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS 3287b0eb57cbSAndy King dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf), 3288b0eb57cbSAndy King adapter->rss_conf, adapter->rss_conf_pa); 328909c5088eSShreyas Bhatewara err_alloc_rss: 329009c5088eSShreyas Bhatewara #endif 3291b0eb57cbSAndy King dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf), 3292b0eb57cbSAndy King adapter->pm_conf, adapter->pm_conf_pa); 3293d1a890faSShreyas Bhatewara err_alloc_pm: 3294b0eb57cbSAndy King dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start, 329509c5088eSShreyas Bhatewara adapter->queue_desc_pa); 3296d1a890faSShreyas Bhatewara err_alloc_queue_desc: 3297b0eb57cbSAndy King dma_free_coherent(&adapter->pdev->dev, 3298b0eb57cbSAndy King sizeof(struct Vmxnet3_DriverShared), 3299d1a890faSShreyas Bhatewara adapter->shared, adapter->shared_pa); 3300d1a890faSShreyas Bhatewara err_alloc_shared: 3301b0eb57cbSAndy King dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, 3302b0eb57cbSAndy King sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); 33035738a09dSAlexey Khoroshilov err_dma_map: 3304d1a890faSShreyas Bhatewara free_netdev(netdev); 3305d1a890faSShreyas Bhatewara return err; 3306d1a890faSShreyas Bhatewara } 3307d1a890faSShreyas Bhatewara 3308d1a890faSShreyas Bhatewara 33093a4751a3SBill Pemberton static void 3310d1a890faSShreyas Bhatewara vmxnet3_remove_device(struct pci_dev *pdev) 3311d1a890faSShreyas Bhatewara { 3312d1a890faSShreyas Bhatewara struct net_device *netdev = pci_get_drvdata(pdev); 3313d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter = netdev_priv(netdev); 331409c5088eSShreyas Bhatewara int size = 0; 331509c5088eSShreyas Bhatewara int num_rx_queues; 331609c5088eSShreyas Bhatewara 331709c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS 331809c5088eSShreyas Bhatewara if (enable_mq) 331909c5088eSShreyas Bhatewara num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES, 332009c5088eSShreyas Bhatewara (int)num_online_cpus()); 332109c5088eSShreyas Bhatewara else 332209c5088eSShreyas Bhatewara #endif 332309c5088eSShreyas Bhatewara num_rx_queues = 1; 3324eebb02b1SShreyas Bhatewara num_rx_queues = rounddown_pow_of_two(num_rx_queues); 3325d1a890faSShreyas Bhatewara 332623f333a2STejun Heo cancel_work_sync(&adapter->work); 3327d1a890faSShreyas Bhatewara 3328d1a890faSShreyas Bhatewara unregister_netdev(netdev); 3329d1a890faSShreyas Bhatewara 3330d1a890faSShreyas Bhatewara vmxnet3_free_intr_resources(adapter); 3331d1a890faSShreyas Bhatewara vmxnet3_free_pci_resources(adapter); 333209c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS 3333b0eb57cbSAndy King dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf), 3334b0eb57cbSAndy King adapter->rss_conf, adapter->rss_conf_pa); 333509c5088eSShreyas Bhatewara #endif 3336b0eb57cbSAndy King dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf), 3337b0eb57cbSAndy King adapter->pm_conf, adapter->pm_conf_pa); 333809c5088eSShreyas Bhatewara 333909c5088eSShreyas Bhatewara size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; 334009c5088eSShreyas Bhatewara size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues; 3341b0eb57cbSAndy King dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start, 334209c5088eSShreyas Bhatewara adapter->queue_desc_pa); 3343b0eb57cbSAndy King dma_free_coherent(&adapter->pdev->dev, 3344b0eb57cbSAndy King sizeof(struct Vmxnet3_DriverShared), 3345d1a890faSShreyas Bhatewara adapter->shared, adapter->shared_pa); 3346b0eb57cbSAndy King dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, 3347b0eb57cbSAndy King sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); 3348d1a890faSShreyas Bhatewara free_netdev(netdev); 3349d1a890faSShreyas Bhatewara } 3350d1a890faSShreyas Bhatewara 3351e9ba47bfSShreyas Bhatewara static void vmxnet3_shutdown_device(struct pci_dev *pdev) 3352e9ba47bfSShreyas Bhatewara { 3353e9ba47bfSShreyas Bhatewara struct net_device *netdev = pci_get_drvdata(pdev); 3354e9ba47bfSShreyas Bhatewara struct vmxnet3_adapter *adapter = netdev_priv(netdev); 3355e9ba47bfSShreyas Bhatewara unsigned long flags; 3356e9ba47bfSShreyas Bhatewara 3357e9ba47bfSShreyas Bhatewara /* Reset_work may be in the middle of resetting the device, wait for its 3358e9ba47bfSShreyas Bhatewara * completion. 3359e9ba47bfSShreyas Bhatewara */ 3360e9ba47bfSShreyas Bhatewara while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) 3361e9ba47bfSShreyas Bhatewara msleep(1); 3362e9ba47bfSShreyas Bhatewara 3363e9ba47bfSShreyas Bhatewara if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, 3364e9ba47bfSShreyas Bhatewara &adapter->state)) { 3365e9ba47bfSShreyas Bhatewara clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 3366e9ba47bfSShreyas Bhatewara return; 3367e9ba47bfSShreyas Bhatewara } 3368e9ba47bfSShreyas Bhatewara spin_lock_irqsave(&adapter->cmd_lock, flags); 3369e9ba47bfSShreyas Bhatewara VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 3370e9ba47bfSShreyas Bhatewara VMXNET3_CMD_QUIESCE_DEV); 3371e9ba47bfSShreyas Bhatewara spin_unlock_irqrestore(&adapter->cmd_lock, flags); 3372e9ba47bfSShreyas Bhatewara vmxnet3_disable_all_intrs(adapter); 3373e9ba47bfSShreyas Bhatewara 3374e9ba47bfSShreyas Bhatewara clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 3375e9ba47bfSShreyas Bhatewara } 3376e9ba47bfSShreyas Bhatewara 3377d1a890faSShreyas Bhatewara 3378d1a890faSShreyas Bhatewara #ifdef CONFIG_PM 3379d1a890faSShreyas Bhatewara 3380d1a890faSShreyas Bhatewara static int 3381d1a890faSShreyas Bhatewara vmxnet3_suspend(struct device *device) 3382d1a890faSShreyas Bhatewara { 3383d1a890faSShreyas Bhatewara struct pci_dev *pdev = to_pci_dev(device); 3384d1a890faSShreyas Bhatewara struct net_device *netdev = pci_get_drvdata(pdev); 3385d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter = netdev_priv(netdev); 3386d1a890faSShreyas Bhatewara struct Vmxnet3_PMConf *pmConf; 3387d1a890faSShreyas Bhatewara struct ethhdr *ehdr; 3388d1a890faSShreyas Bhatewara struct arphdr *ahdr; 3389d1a890faSShreyas Bhatewara u8 *arpreq; 3390d1a890faSShreyas Bhatewara struct in_device *in_dev; 3391d1a890faSShreyas Bhatewara struct in_ifaddr *ifa; 339283d0feffSShreyas Bhatewara unsigned long flags; 3393d1a890faSShreyas Bhatewara int i = 0; 3394d1a890faSShreyas Bhatewara 3395d1a890faSShreyas Bhatewara if (!netif_running(netdev)) 3396d1a890faSShreyas Bhatewara return 0; 3397d1a890faSShreyas Bhatewara 339851956cd6SShreyas Bhatewara for (i = 0; i < adapter->num_rx_queues; i++) 339951956cd6SShreyas Bhatewara napi_disable(&adapter->rx_queue[i].napi); 340051956cd6SShreyas Bhatewara 3401d1a890faSShreyas Bhatewara vmxnet3_disable_all_intrs(adapter); 3402d1a890faSShreyas Bhatewara vmxnet3_free_irqs(adapter); 3403d1a890faSShreyas Bhatewara vmxnet3_free_intr_resources(adapter); 3404d1a890faSShreyas Bhatewara 3405d1a890faSShreyas Bhatewara netif_device_detach(netdev); 340609c5088eSShreyas Bhatewara netif_tx_stop_all_queues(netdev); 3407d1a890faSShreyas Bhatewara 3408d1a890faSShreyas Bhatewara /* Create wake-up filters. */ 3409d1a890faSShreyas Bhatewara pmConf = adapter->pm_conf; 3410d1a890faSShreyas Bhatewara memset(pmConf, 0, sizeof(*pmConf)); 3411d1a890faSShreyas Bhatewara 3412d1a890faSShreyas Bhatewara if (adapter->wol & WAKE_UCAST) { 3413d1a890faSShreyas Bhatewara pmConf->filters[i].patternSize = ETH_ALEN; 3414d1a890faSShreyas Bhatewara pmConf->filters[i].maskSize = 1; 3415d1a890faSShreyas Bhatewara memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN); 3416d1a890faSShreyas Bhatewara pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */ 3417d1a890faSShreyas Bhatewara 34183843e515SHarvey Harrison pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; 3419d1a890faSShreyas Bhatewara i++; 3420d1a890faSShreyas Bhatewara } 3421d1a890faSShreyas Bhatewara 3422d1a890faSShreyas Bhatewara if (adapter->wol & WAKE_ARP) { 3423d1a890faSShreyas Bhatewara in_dev = in_dev_get(netdev); 3424d1a890faSShreyas Bhatewara if (!in_dev) 3425d1a890faSShreyas Bhatewara goto skip_arp; 3426d1a890faSShreyas Bhatewara 3427d1a890faSShreyas Bhatewara ifa = (struct in_ifaddr *)in_dev->ifa_list; 3428d1a890faSShreyas Bhatewara if (!ifa) 3429d1a890faSShreyas Bhatewara goto skip_arp; 3430d1a890faSShreyas Bhatewara 3431d1a890faSShreyas Bhatewara pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/ 3432d1a890faSShreyas Bhatewara sizeof(struct arphdr) + /* ARP header */ 3433d1a890faSShreyas Bhatewara 2 * ETH_ALEN + /* 2 Ethernet addresses*/ 3434d1a890faSShreyas Bhatewara 2 * sizeof(u32); /*2 IPv4 addresses */ 3435d1a890faSShreyas Bhatewara pmConf->filters[i].maskSize = 3436d1a890faSShreyas Bhatewara (pmConf->filters[i].patternSize - 1) / 8 + 1; 3437d1a890faSShreyas Bhatewara 3438d1a890faSShreyas Bhatewara /* ETH_P_ARP in Ethernet header. */ 3439d1a890faSShreyas Bhatewara ehdr = (struct ethhdr *)pmConf->filters[i].pattern; 3440d1a890faSShreyas Bhatewara ehdr->h_proto = htons(ETH_P_ARP); 3441d1a890faSShreyas Bhatewara 3442d1a890faSShreyas Bhatewara /* ARPOP_REQUEST in ARP header. */ 3443d1a890faSShreyas Bhatewara ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN]; 3444d1a890faSShreyas Bhatewara ahdr->ar_op = htons(ARPOP_REQUEST); 3445d1a890faSShreyas Bhatewara arpreq = (u8 *)(ahdr + 1); 3446d1a890faSShreyas Bhatewara 3447d1a890faSShreyas Bhatewara /* The Unicast IPv4 address in 'tip' field. */ 3448d1a890faSShreyas Bhatewara arpreq += 2 * ETH_ALEN + sizeof(u32); 3449d1a890faSShreyas Bhatewara *(u32 *)arpreq = ifa->ifa_address; 3450d1a890faSShreyas Bhatewara 3451d1a890faSShreyas Bhatewara /* The mask for the relevant bits. */ 3452d1a890faSShreyas Bhatewara pmConf->filters[i].mask[0] = 0x00; 3453d1a890faSShreyas Bhatewara pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */ 3454d1a890faSShreyas Bhatewara pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */ 3455d1a890faSShreyas Bhatewara pmConf->filters[i].mask[3] = 0x00; 3456d1a890faSShreyas Bhatewara pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */ 3457d1a890faSShreyas Bhatewara pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */ 3458d1a890faSShreyas Bhatewara in_dev_put(in_dev); 3459d1a890faSShreyas Bhatewara 34603843e515SHarvey Harrison pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; 3461d1a890faSShreyas Bhatewara i++; 3462d1a890faSShreyas Bhatewara } 3463d1a890faSShreyas Bhatewara 3464d1a890faSShreyas Bhatewara skip_arp: 3465d1a890faSShreyas Bhatewara if (adapter->wol & WAKE_MAGIC) 34663843e515SHarvey Harrison pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC; 3467d1a890faSShreyas Bhatewara 3468d1a890faSShreyas Bhatewara pmConf->numFilters = i; 3469d1a890faSShreyas Bhatewara 3470115924b6SShreyas Bhatewara adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); 3471115924b6SShreyas Bhatewara adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( 3472115924b6SShreyas Bhatewara *pmConf)); 3473b0eb57cbSAndy King adapter->shared->devRead.pmConfDesc.confPA = 3474b0eb57cbSAndy King cpu_to_le64(adapter->pm_conf_pa); 3475d1a890faSShreyas Bhatewara 347683d0feffSShreyas Bhatewara spin_lock_irqsave(&adapter->cmd_lock, flags); 3477d1a890faSShreyas Bhatewara VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 3478d1a890faSShreyas Bhatewara VMXNET3_CMD_UPDATE_PMCFG); 347983d0feffSShreyas Bhatewara spin_unlock_irqrestore(&adapter->cmd_lock, flags); 3480d1a890faSShreyas Bhatewara 3481d1a890faSShreyas Bhatewara pci_save_state(pdev); 3482d1a890faSShreyas Bhatewara pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND), 3483d1a890faSShreyas Bhatewara adapter->wol); 3484d1a890faSShreyas Bhatewara pci_disable_device(pdev); 3485d1a890faSShreyas Bhatewara pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND)); 3486d1a890faSShreyas Bhatewara 3487d1a890faSShreyas Bhatewara return 0; 3488d1a890faSShreyas Bhatewara } 3489d1a890faSShreyas Bhatewara 3490d1a890faSShreyas Bhatewara 3491d1a890faSShreyas Bhatewara static int 3492d1a890faSShreyas Bhatewara vmxnet3_resume(struct device *device) 3493d1a890faSShreyas Bhatewara { 34945ec82c1eSShrikrishna Khare int err; 349583d0feffSShreyas Bhatewara unsigned long flags; 3496d1a890faSShreyas Bhatewara struct pci_dev *pdev = to_pci_dev(device); 3497d1a890faSShreyas Bhatewara struct net_device *netdev = pci_get_drvdata(pdev); 3498d1a890faSShreyas Bhatewara struct vmxnet3_adapter *adapter = netdev_priv(netdev); 3499d1a890faSShreyas Bhatewara 3500d1a890faSShreyas Bhatewara if (!netif_running(netdev)) 3501d1a890faSShreyas Bhatewara return 0; 3502d1a890faSShreyas Bhatewara 3503d1a890faSShreyas Bhatewara pci_set_power_state(pdev, PCI_D0); 3504d1a890faSShreyas Bhatewara pci_restore_state(pdev); 3505d1a890faSShreyas Bhatewara err = pci_enable_device_mem(pdev); 3506d1a890faSShreyas Bhatewara if (err != 0) 3507d1a890faSShreyas Bhatewara return err; 3508d1a890faSShreyas Bhatewara 3509d1a890faSShreyas Bhatewara pci_enable_wake(pdev, PCI_D0, 0); 3510d1a890faSShreyas Bhatewara 35115ec82c1eSShrikrishna Khare vmxnet3_alloc_intr_resources(adapter); 35125ec82c1eSShrikrishna Khare 35135ec82c1eSShrikrishna Khare /* During hibernate and suspend, device has to be reinitialized as the 35145ec82c1eSShrikrishna Khare * device state need not be preserved. 35155ec82c1eSShrikrishna Khare */ 35165ec82c1eSShrikrishna Khare 35175ec82c1eSShrikrishna Khare /* Need not check adapter state as other reset tasks cannot run during 35185ec82c1eSShrikrishna Khare * device resume. 35195ec82c1eSShrikrishna Khare */ 352083d0feffSShreyas Bhatewara spin_lock_irqsave(&adapter->cmd_lock, flags); 3521d1a890faSShreyas Bhatewara VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 35225ec82c1eSShrikrishna Khare VMXNET3_CMD_QUIESCE_DEV); 352383d0feffSShreyas Bhatewara spin_unlock_irqrestore(&adapter->cmd_lock, flags); 35245ec82c1eSShrikrishna Khare vmxnet3_tq_cleanup_all(adapter); 35255ec82c1eSShrikrishna Khare vmxnet3_rq_cleanup_all(adapter); 35265ec82c1eSShrikrishna Khare 35275ec82c1eSShrikrishna Khare vmxnet3_reset_dev(adapter); 35285ec82c1eSShrikrishna Khare err = vmxnet3_activate_dev(adapter); 35295ec82c1eSShrikrishna Khare if (err != 0) { 35305ec82c1eSShrikrishna Khare netdev_err(netdev, 35315ec82c1eSShrikrishna Khare "failed to re-activate on resume, error: %d", err); 35325ec82c1eSShrikrishna Khare vmxnet3_force_close(adapter); 35335ec82c1eSShrikrishna Khare return err; 35345ec82c1eSShrikrishna Khare } 35355ec82c1eSShrikrishna Khare netif_device_attach(netdev); 3536d1a890faSShreyas Bhatewara 3537d1a890faSShreyas Bhatewara return 0; 3538d1a890faSShreyas Bhatewara } 3539d1a890faSShreyas Bhatewara 354047145210SAlexey Dobriyan static const struct dev_pm_ops vmxnet3_pm_ops = { 3541d1a890faSShreyas Bhatewara .suspend = vmxnet3_suspend, 3542d1a890faSShreyas Bhatewara .resume = vmxnet3_resume, 35435ec82c1eSShrikrishna Khare .freeze = vmxnet3_suspend, 35445ec82c1eSShrikrishna Khare .restore = vmxnet3_resume, 3545d1a890faSShreyas Bhatewara }; 3546d1a890faSShreyas Bhatewara #endif 3547d1a890faSShreyas Bhatewara 3548d1a890faSShreyas Bhatewara static struct pci_driver vmxnet3_driver = { 3549d1a890faSShreyas Bhatewara .name = vmxnet3_driver_name, 3550d1a890faSShreyas Bhatewara .id_table = vmxnet3_pciid_table, 3551d1a890faSShreyas Bhatewara .probe = vmxnet3_probe_device, 35523a4751a3SBill Pemberton .remove = vmxnet3_remove_device, 3553e9ba47bfSShreyas Bhatewara .shutdown = vmxnet3_shutdown_device, 3554d1a890faSShreyas Bhatewara #ifdef CONFIG_PM 3555d1a890faSShreyas Bhatewara .driver.pm = &vmxnet3_pm_ops, 3556d1a890faSShreyas Bhatewara #endif 3557d1a890faSShreyas Bhatewara }; 3558d1a890faSShreyas Bhatewara 3559d1a890faSShreyas Bhatewara 3560d1a890faSShreyas Bhatewara static int __init 3561d1a890faSShreyas Bhatewara vmxnet3_init_module(void) 3562d1a890faSShreyas Bhatewara { 3563204a6e65SStephen Hemminger pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC, 3564d1a890faSShreyas Bhatewara VMXNET3_DRIVER_VERSION_REPORT); 3565d1a890faSShreyas Bhatewara return pci_register_driver(&vmxnet3_driver); 3566d1a890faSShreyas Bhatewara } 3567d1a890faSShreyas Bhatewara 3568d1a890faSShreyas Bhatewara module_init(vmxnet3_init_module); 3569d1a890faSShreyas Bhatewara 3570d1a890faSShreyas Bhatewara 3571d1a890faSShreyas Bhatewara static void 3572d1a890faSShreyas Bhatewara vmxnet3_exit_module(void) 3573d1a890faSShreyas Bhatewara { 3574d1a890faSShreyas Bhatewara pci_unregister_driver(&vmxnet3_driver); 3575d1a890faSShreyas Bhatewara } 3576d1a890faSShreyas Bhatewara 3577d1a890faSShreyas Bhatewara module_exit(vmxnet3_exit_module); 3578d1a890faSShreyas Bhatewara 3579d1a890faSShreyas Bhatewara MODULE_AUTHOR("VMware, Inc."); 3580d1a890faSShreyas Bhatewara MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC); 3581d1a890faSShreyas Bhatewara MODULE_LICENSE("GPL v2"); 3582d1a890faSShreyas Bhatewara MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING); 3583