1f7917c00SJeff Kirsher /* 2f7917c00SJeff Kirsher * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. 3f7917c00SJeff Kirsher * 4f7917c00SJeff Kirsher * This software is available to you under a choice of one of two 5f7917c00SJeff Kirsher * licenses. You may choose to be licensed under the terms of the GNU 6f7917c00SJeff Kirsher * General Public License (GPL) Version 2, available from the file 7f7917c00SJeff Kirsher * COPYING in the main directory of this source tree, or the 8f7917c00SJeff Kirsher * OpenIB.org BSD license below: 9f7917c00SJeff Kirsher * 10f7917c00SJeff Kirsher * Redistribution and use in source and binary forms, with or 11f7917c00SJeff Kirsher * without modification, are permitted provided that the following 12f7917c00SJeff Kirsher * conditions are met: 13f7917c00SJeff Kirsher * 14f7917c00SJeff Kirsher * - Redistributions of source code must retain the above 15f7917c00SJeff Kirsher * copyright notice, this list of conditions and the following 16f7917c00SJeff Kirsher * disclaimer. 17f7917c00SJeff Kirsher * 18f7917c00SJeff Kirsher * - Redistributions in binary form must reproduce the above 19f7917c00SJeff Kirsher * copyright notice, this list of conditions and the following 20f7917c00SJeff Kirsher * disclaimer in the documentation and/or other materials 21f7917c00SJeff Kirsher * provided with the distribution. 22f7917c00SJeff Kirsher * 23f7917c00SJeff Kirsher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24f7917c00SJeff Kirsher * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25f7917c00SJeff Kirsher * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26f7917c00SJeff Kirsher * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27f7917c00SJeff Kirsher * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28f7917c00SJeff Kirsher * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29f7917c00SJeff Kirsher * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30f7917c00SJeff Kirsher * SOFTWARE. 31f7917c00SJeff Kirsher */ 32428ac43fSJoe Perches 33428ac43fSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34428ac43fSJoe Perches 35f7917c00SJeff Kirsher #include <linux/module.h> 36f7917c00SJeff Kirsher #include <linux/init.h> 37f7917c00SJeff Kirsher #include <linux/pci.h> 38f7917c00SJeff Kirsher #include <linux/dma-mapping.h> 39f7917c00SJeff Kirsher #include <linux/netdevice.h> 40f7917c00SJeff Kirsher #include <linux/etherdevice.h> 41f7917c00SJeff Kirsher #include <linux/if_vlan.h> 42f7917c00SJeff Kirsher #include <linux/mdio.h> 43f7917c00SJeff Kirsher #include <linux/sockios.h> 44f7917c00SJeff Kirsher #include <linux/workqueue.h> 45f7917c00SJeff Kirsher #include <linux/proc_fs.h> 46f7917c00SJeff Kirsher #include <linux/rtnetlink.h> 47f7917c00SJeff Kirsher #include <linux/firmware.h> 48f7917c00SJeff Kirsher #include <linux/log2.h> 49f7917c00SJeff Kirsher #include <linux/stringify.h> 50f7917c00SJeff Kirsher #include <linux/sched.h> 51f7917c00SJeff Kirsher #include <linux/slab.h> 527c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 53676bcfecSGustavo A. R. Silva #include <linux/nospec.h> 54f7917c00SJeff Kirsher 55f7917c00SJeff Kirsher #include "common.h" 56f7917c00SJeff Kirsher #include "cxgb3_ioctl.h" 57f7917c00SJeff Kirsher #include "regs.h" 58f7917c00SJeff Kirsher #include "cxgb3_offload.h" 59f7917c00SJeff Kirsher #include "version.h" 60f7917c00SJeff Kirsher 61f7917c00SJeff Kirsher #include "cxgb3_ctl_defs.h" 62f7917c00SJeff Kirsher #include "t3_cpl.h" 63f7917c00SJeff Kirsher #include "firmware_exports.h" 64f7917c00SJeff Kirsher 65f7917c00SJeff Kirsher enum { 66f7917c00SJeff Kirsher MAX_TXQ_ENTRIES = 16384, 67f7917c00SJeff Kirsher MAX_CTRL_TXQ_ENTRIES = 1024, 68f7917c00SJeff Kirsher MAX_RSPQ_ENTRIES = 16384, 69f7917c00SJeff Kirsher MAX_RX_BUFFERS = 16384, 70f7917c00SJeff Kirsher MAX_RX_JUMBO_BUFFERS = 16384, 71f7917c00SJeff Kirsher MIN_TXQ_ENTRIES = 4, 72f7917c00SJeff Kirsher MIN_CTRL_TXQ_ENTRIES = 4, 73f7917c00SJeff Kirsher MIN_RSPQ_ENTRIES = 32, 74f7917c00SJeff Kirsher MIN_FL_ENTRIES = 32 75f7917c00SJeff Kirsher }; 76f7917c00SJeff Kirsher 77f7917c00SJeff Kirsher #define PORT_MASK ((1 << MAX_NPORTS) - 1) 78f7917c00SJeff Kirsher 79f7917c00SJeff Kirsher #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ 80f7917c00SJeff Kirsher NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ 81f7917c00SJeff Kirsher NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) 82f7917c00SJeff Kirsher 83f7917c00SJeff Kirsher #define EEPROM_MAGIC 0x38E2F10C 84f7917c00SJeff Kirsher 85f7917c00SJeff Kirsher #define CH_DEVICE(devid, idx) \ 86f7917c00SJeff Kirsher { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx } 87f7917c00SJeff Kirsher 889baa3c34SBenoit Taine static const struct pci_device_id cxgb3_pci_tbl[] = { 89f7917c00SJeff Kirsher CH_DEVICE(0x20, 0), /* PE9000 */ 90f7917c00SJeff Kirsher CH_DEVICE(0x21, 1), /* T302E */ 91f7917c00SJeff Kirsher CH_DEVICE(0x22, 2), /* T310E */ 92f7917c00SJeff Kirsher CH_DEVICE(0x23, 3), /* T320X */ 93f7917c00SJeff Kirsher CH_DEVICE(0x24, 1), /* T302X */ 94f7917c00SJeff Kirsher CH_DEVICE(0x25, 3), /* T320E */ 95f7917c00SJeff Kirsher CH_DEVICE(0x26, 2), /* T310X */ 96f7917c00SJeff Kirsher CH_DEVICE(0x30, 2), /* T3B10 */ 97f7917c00SJeff Kirsher CH_DEVICE(0x31, 3), /* T3B20 */ 98f7917c00SJeff Kirsher CH_DEVICE(0x32, 1), /* T3B02 */ 99f7917c00SJeff Kirsher CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */ 100f7917c00SJeff Kirsher CH_DEVICE(0x36, 3), /* S320E-CR */ 101f7917c00SJeff Kirsher CH_DEVICE(0x37, 7), /* N320E-G2 */ 102f7917c00SJeff Kirsher {0,} 103f7917c00SJeff Kirsher }; 104f7917c00SJeff Kirsher 105f7917c00SJeff Kirsher MODULE_DESCRIPTION(DRV_DESC); 106f7917c00SJeff Kirsher MODULE_AUTHOR("Chelsio Communications"); 107f7917c00SJeff Kirsher MODULE_LICENSE("Dual BSD/GPL"); 108f7917c00SJeff Kirsher MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl); 109f7917c00SJeff Kirsher 110f7917c00SJeff Kirsher static int dflt_msg_enable = DFLT_MSG_ENABLE; 111f7917c00SJeff Kirsher 112f7917c00SJeff Kirsher module_param(dflt_msg_enable, int, 0644); 113f7917c00SJeff Kirsher MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap"); 114f7917c00SJeff Kirsher 115f7917c00SJeff Kirsher /* 116f7917c00SJeff Kirsher * The driver uses the best interrupt scheme available on a platform in the 117f7917c00SJeff Kirsher * order MSI-X, MSI, legacy pin interrupts. This parameter determines which 118f7917c00SJeff Kirsher * of these schemes the driver may consider as follows: 119f7917c00SJeff Kirsher * 120f7917c00SJeff Kirsher * msi = 2: choose from among all three options 121f7917c00SJeff Kirsher * msi = 1: only consider MSI and pin interrupts 122f7917c00SJeff Kirsher * msi = 0: force pin interrupts 123f7917c00SJeff Kirsher */ 124f7917c00SJeff Kirsher static int msi = 2; 125f7917c00SJeff Kirsher 126f7917c00SJeff Kirsher module_param(msi, int, 0644); 127f7917c00SJeff Kirsher MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X"); 128f7917c00SJeff Kirsher 129f7917c00SJeff Kirsher /* 130f7917c00SJeff Kirsher * The driver enables offload as a default. 131f7917c00SJeff Kirsher * To disable it, use ofld_disable = 1. 132f7917c00SJeff Kirsher */ 133f7917c00SJeff Kirsher 134f7917c00SJeff Kirsher static int ofld_disable = 0; 135f7917c00SJeff Kirsher 136f7917c00SJeff Kirsher module_param(ofld_disable, int, 0644); 137f7917c00SJeff Kirsher MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not"); 138f7917c00SJeff Kirsher 139f7917c00SJeff Kirsher /* 140f7917c00SJeff Kirsher * We have work elements that we need to cancel when an interface is taken 141f7917c00SJeff Kirsher * down. Normally the work elements would be executed by keventd but that 142f7917c00SJeff Kirsher * can deadlock because of linkwatch. If our close method takes the rtnl 143f7917c00SJeff Kirsher * lock and linkwatch is ahead of our work elements in keventd, linkwatch 144f7917c00SJeff Kirsher * will block keventd as it needs the rtnl lock, and we'll deadlock waiting 145f7917c00SJeff Kirsher * for our work to complete. Get our own work queue to solve this. 146f7917c00SJeff Kirsher */ 147f7917c00SJeff Kirsher struct workqueue_struct *cxgb3_wq; 148f7917c00SJeff Kirsher 149f7917c00SJeff Kirsher /** 150f7917c00SJeff Kirsher * link_report - show link status and link speed/duplex 151d0ea5cbdSJesse Brandeburg * @dev: the port whose settings are to be reported 152f7917c00SJeff Kirsher * 153f7917c00SJeff Kirsher * Shows the link status, speed, and duplex of a port. 154f7917c00SJeff Kirsher */ 155f7917c00SJeff Kirsher static void link_report(struct net_device *dev) 156f7917c00SJeff Kirsher { 157f7917c00SJeff Kirsher if (!netif_carrier_ok(dev)) 158428ac43fSJoe Perches netdev_info(dev, "link down\n"); 159f7917c00SJeff Kirsher else { 160f7917c00SJeff Kirsher const char *s = "10Mbps"; 161f7917c00SJeff Kirsher const struct port_info *p = netdev_priv(dev); 162f7917c00SJeff Kirsher 163f7917c00SJeff Kirsher switch (p->link_config.speed) { 164f7917c00SJeff Kirsher case SPEED_10000: 165f7917c00SJeff Kirsher s = "10Gbps"; 166f7917c00SJeff Kirsher break; 167f7917c00SJeff Kirsher case SPEED_1000: 168f7917c00SJeff Kirsher s = "1000Mbps"; 169f7917c00SJeff Kirsher break; 170f7917c00SJeff Kirsher case SPEED_100: 171f7917c00SJeff Kirsher s = "100Mbps"; 172f7917c00SJeff Kirsher break; 173f7917c00SJeff Kirsher } 174f7917c00SJeff Kirsher 175428ac43fSJoe Perches netdev_info(dev, "link up, %s, %s-duplex\n", 176428ac43fSJoe Perches s, p->link_config.duplex == DUPLEX_FULL 177428ac43fSJoe Perches ? "full" : "half"); 178f7917c00SJeff Kirsher } 179f7917c00SJeff Kirsher } 180f7917c00SJeff Kirsher 181f7917c00SJeff Kirsher static void enable_tx_fifo_drain(struct adapter *adapter, 182f7917c00SJeff Kirsher struct port_info *pi) 183f7917c00SJeff Kirsher { 184f7917c00SJeff Kirsher t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0, 185f7917c00SJeff Kirsher F_ENDROPPKT); 186f7917c00SJeff Kirsher t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0); 187f7917c00SJeff Kirsher t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN); 188f7917c00SJeff Kirsher t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN); 189f7917c00SJeff Kirsher } 190f7917c00SJeff Kirsher 191f7917c00SJeff Kirsher static void disable_tx_fifo_drain(struct adapter *adapter, 192f7917c00SJeff Kirsher struct port_info *pi) 193f7917c00SJeff Kirsher { 194f7917c00SJeff Kirsher t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 195f7917c00SJeff Kirsher F_ENDROPPKT, 0); 196f7917c00SJeff Kirsher } 197f7917c00SJeff Kirsher 198f7917c00SJeff Kirsher void t3_os_link_fault(struct adapter *adap, int port_id, int state) 199f7917c00SJeff Kirsher { 200f7917c00SJeff Kirsher struct net_device *dev = adap->port[port_id]; 201f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 202f7917c00SJeff Kirsher 203f7917c00SJeff Kirsher if (state == netif_carrier_ok(dev)) 204f7917c00SJeff Kirsher return; 205f7917c00SJeff Kirsher 206f7917c00SJeff Kirsher if (state) { 207f7917c00SJeff Kirsher struct cmac *mac = &pi->mac; 208f7917c00SJeff Kirsher 209f7917c00SJeff Kirsher netif_carrier_on(dev); 210f7917c00SJeff Kirsher 211f7917c00SJeff Kirsher disable_tx_fifo_drain(adap, pi); 212f7917c00SJeff Kirsher 213f7917c00SJeff Kirsher /* Clear local faults */ 214f7917c00SJeff Kirsher t3_xgm_intr_disable(adap, pi->port_id); 215f7917c00SJeff Kirsher t3_read_reg(adap, A_XGM_INT_STATUS + 216f7917c00SJeff Kirsher pi->mac.offset); 217f7917c00SJeff Kirsher t3_write_reg(adap, 218f7917c00SJeff Kirsher A_XGM_INT_CAUSE + pi->mac.offset, 219f7917c00SJeff Kirsher F_XGM_INT); 220f7917c00SJeff Kirsher 221f7917c00SJeff Kirsher t3_set_reg_field(adap, 222f7917c00SJeff Kirsher A_XGM_INT_ENABLE + 223f7917c00SJeff Kirsher pi->mac.offset, 224f7917c00SJeff Kirsher F_XGM_INT, F_XGM_INT); 225f7917c00SJeff Kirsher t3_xgm_intr_enable(adap, pi->port_id); 226f7917c00SJeff Kirsher 227f7917c00SJeff Kirsher t3_mac_enable(mac, MAC_DIRECTION_TX); 228f7917c00SJeff Kirsher } else { 229f7917c00SJeff Kirsher netif_carrier_off(dev); 230f7917c00SJeff Kirsher 231f7917c00SJeff Kirsher /* Flush TX FIFO */ 232f7917c00SJeff Kirsher enable_tx_fifo_drain(adap, pi); 233f7917c00SJeff Kirsher } 234f7917c00SJeff Kirsher link_report(dev); 235f7917c00SJeff Kirsher } 236f7917c00SJeff Kirsher 237f7917c00SJeff Kirsher /** 238f7917c00SJeff Kirsher * t3_os_link_changed - handle link status changes 239f7917c00SJeff Kirsher * @adapter: the adapter associated with the link change 240f7917c00SJeff Kirsher * @port_id: the port index whose limk status has changed 241f7917c00SJeff Kirsher * @link_stat: the new status of the link 242f7917c00SJeff Kirsher * @speed: the new speed setting 243f7917c00SJeff Kirsher * @duplex: the new duplex setting 244f7917c00SJeff Kirsher * @pause: the new flow-control setting 245f7917c00SJeff Kirsher * 246f7917c00SJeff Kirsher * This is the OS-dependent handler for link status changes. The OS 247f7917c00SJeff Kirsher * neutral handler takes care of most of the processing for these events, 248f7917c00SJeff Kirsher * then calls this handler for any OS-specific processing. 249f7917c00SJeff Kirsher */ 250f7917c00SJeff Kirsher void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat, 251f7917c00SJeff Kirsher int speed, int duplex, int pause) 252f7917c00SJeff Kirsher { 253f7917c00SJeff Kirsher struct net_device *dev = adapter->port[port_id]; 254f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 255f7917c00SJeff Kirsher struct cmac *mac = &pi->mac; 256f7917c00SJeff Kirsher 257f7917c00SJeff Kirsher /* Skip changes from disabled ports. */ 258f7917c00SJeff Kirsher if (!netif_running(dev)) 259f7917c00SJeff Kirsher return; 260f7917c00SJeff Kirsher 261f7917c00SJeff Kirsher if (link_stat != netif_carrier_ok(dev)) { 262f7917c00SJeff Kirsher if (link_stat) { 263f7917c00SJeff Kirsher disable_tx_fifo_drain(adapter, pi); 264f7917c00SJeff Kirsher 265f7917c00SJeff Kirsher t3_mac_enable(mac, MAC_DIRECTION_RX); 266f7917c00SJeff Kirsher 267f7917c00SJeff Kirsher /* Clear local faults */ 268f7917c00SJeff Kirsher t3_xgm_intr_disable(adapter, pi->port_id); 269f7917c00SJeff Kirsher t3_read_reg(adapter, A_XGM_INT_STATUS + 270f7917c00SJeff Kirsher pi->mac.offset); 271f7917c00SJeff Kirsher t3_write_reg(adapter, 272f7917c00SJeff Kirsher A_XGM_INT_CAUSE + pi->mac.offset, 273f7917c00SJeff Kirsher F_XGM_INT); 274f7917c00SJeff Kirsher 275f7917c00SJeff Kirsher t3_set_reg_field(adapter, 276f7917c00SJeff Kirsher A_XGM_INT_ENABLE + pi->mac.offset, 277f7917c00SJeff Kirsher F_XGM_INT, F_XGM_INT); 278f7917c00SJeff Kirsher t3_xgm_intr_enable(adapter, pi->port_id); 279f7917c00SJeff Kirsher 280f7917c00SJeff Kirsher netif_carrier_on(dev); 281f7917c00SJeff Kirsher } else { 282f7917c00SJeff Kirsher netif_carrier_off(dev); 283f7917c00SJeff Kirsher 284f7917c00SJeff Kirsher t3_xgm_intr_disable(adapter, pi->port_id); 285f7917c00SJeff Kirsher t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset); 286f7917c00SJeff Kirsher t3_set_reg_field(adapter, 287f7917c00SJeff Kirsher A_XGM_INT_ENABLE + pi->mac.offset, 288f7917c00SJeff Kirsher F_XGM_INT, 0); 289f7917c00SJeff Kirsher 290f7917c00SJeff Kirsher if (is_10G(adapter)) 291f7917c00SJeff Kirsher pi->phy.ops->power_down(&pi->phy, 1); 292f7917c00SJeff Kirsher 293f7917c00SJeff Kirsher t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset); 294f7917c00SJeff Kirsher t3_mac_disable(mac, MAC_DIRECTION_RX); 295f7917c00SJeff Kirsher t3_link_start(&pi->phy, mac, &pi->link_config); 296f7917c00SJeff Kirsher 297f7917c00SJeff Kirsher /* Flush TX FIFO */ 298f7917c00SJeff Kirsher enable_tx_fifo_drain(adapter, pi); 299f7917c00SJeff Kirsher } 300f7917c00SJeff Kirsher 301f7917c00SJeff Kirsher link_report(dev); 302f7917c00SJeff Kirsher } 303f7917c00SJeff Kirsher } 304f7917c00SJeff Kirsher 305f7917c00SJeff Kirsher /** 306f7917c00SJeff Kirsher * t3_os_phymod_changed - handle PHY module changes 307d0ea5cbdSJesse Brandeburg * @adap: the adapter associated with the link change 308d0ea5cbdSJesse Brandeburg * @port_id: the port index whose limk status has changed 309f7917c00SJeff Kirsher * 310f7917c00SJeff Kirsher * This is the OS-dependent handler for PHY module changes. It is 311f7917c00SJeff Kirsher * invoked when a PHY module is removed or inserted for any OS-specific 312f7917c00SJeff Kirsher * processing. 313f7917c00SJeff Kirsher */ 314f7917c00SJeff Kirsher void t3_os_phymod_changed(struct adapter *adap, int port_id) 315f7917c00SJeff Kirsher { 316f7917c00SJeff Kirsher static const char *mod_str[] = { 317f7917c00SJeff Kirsher NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown" 318f7917c00SJeff Kirsher }; 319f7917c00SJeff Kirsher 320f7917c00SJeff Kirsher const struct net_device *dev = adap->port[port_id]; 321f7917c00SJeff Kirsher const struct port_info *pi = netdev_priv(dev); 322f7917c00SJeff Kirsher 323f7917c00SJeff Kirsher if (pi->phy.modtype == phy_modtype_none) 324428ac43fSJoe Perches netdev_info(dev, "PHY module unplugged\n"); 325f7917c00SJeff Kirsher else 326428ac43fSJoe Perches netdev_info(dev, "%s PHY module inserted\n", 327f7917c00SJeff Kirsher mod_str[pi->phy.modtype]); 328f7917c00SJeff Kirsher } 329f7917c00SJeff Kirsher 330f7917c00SJeff Kirsher static void cxgb_set_rxmode(struct net_device *dev) 331f7917c00SJeff Kirsher { 332f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 333f7917c00SJeff Kirsher 334f7917c00SJeff Kirsher t3_mac_set_rx_mode(&pi->mac, dev); 335f7917c00SJeff Kirsher } 336f7917c00SJeff Kirsher 337f7917c00SJeff Kirsher /** 338f7917c00SJeff Kirsher * link_start - enable a port 339f7917c00SJeff Kirsher * @dev: the device to enable 340f7917c00SJeff Kirsher * 341f7917c00SJeff Kirsher * Performs the MAC and PHY actions needed to enable a port. 342f7917c00SJeff Kirsher */ 343f7917c00SJeff Kirsher static void link_start(struct net_device *dev) 344f7917c00SJeff Kirsher { 345f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 346f7917c00SJeff Kirsher struct cmac *mac = &pi->mac; 347f7917c00SJeff Kirsher 348f7917c00SJeff Kirsher t3_mac_reset(mac); 349f7917c00SJeff Kirsher t3_mac_set_num_ucast(mac, MAX_MAC_IDX); 350f7917c00SJeff Kirsher t3_mac_set_mtu(mac, dev->mtu); 351f7917c00SJeff Kirsher t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr); 352f7917c00SJeff Kirsher t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr); 353f7917c00SJeff Kirsher t3_mac_set_rx_mode(mac, dev); 354f7917c00SJeff Kirsher t3_link_start(&pi->phy, mac, &pi->link_config); 355f7917c00SJeff Kirsher t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); 356f7917c00SJeff Kirsher } 357f7917c00SJeff Kirsher 358f7917c00SJeff Kirsher static inline void cxgb_disable_msi(struct adapter *adapter) 359f7917c00SJeff Kirsher { 360f7917c00SJeff Kirsher if (adapter->flags & USING_MSIX) { 361f7917c00SJeff Kirsher pci_disable_msix(adapter->pdev); 362f7917c00SJeff Kirsher adapter->flags &= ~USING_MSIX; 363f7917c00SJeff Kirsher } else if (adapter->flags & USING_MSI) { 364f7917c00SJeff Kirsher pci_disable_msi(adapter->pdev); 365f7917c00SJeff Kirsher adapter->flags &= ~USING_MSI; 366f7917c00SJeff Kirsher } 367f7917c00SJeff Kirsher } 368f7917c00SJeff Kirsher 369f7917c00SJeff Kirsher /* 370f7917c00SJeff Kirsher * Interrupt handler for asynchronous events used with MSI-X. 371f7917c00SJeff Kirsher */ 372f7917c00SJeff Kirsher static irqreturn_t t3_async_intr_handler(int irq, void *cookie) 373f7917c00SJeff Kirsher { 374f7917c00SJeff Kirsher t3_slow_intr_handler(cookie); 375f7917c00SJeff Kirsher return IRQ_HANDLED; 376f7917c00SJeff Kirsher } 377f7917c00SJeff Kirsher 378f7917c00SJeff Kirsher /* 379f7917c00SJeff Kirsher * Name the MSI-X interrupts. 380f7917c00SJeff Kirsher */ 381f7917c00SJeff Kirsher static void name_msix_vecs(struct adapter *adap) 382f7917c00SJeff Kirsher { 383f7917c00SJeff Kirsher int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1; 384f7917c00SJeff Kirsher 385f7917c00SJeff Kirsher snprintf(adap->msix_info[0].desc, n, "%s", adap->name); 386f7917c00SJeff Kirsher adap->msix_info[0].desc[n] = 0; 387f7917c00SJeff Kirsher 388f7917c00SJeff Kirsher for_each_port(adap, j) { 389f7917c00SJeff Kirsher struct net_device *d = adap->port[j]; 390f7917c00SJeff Kirsher const struct port_info *pi = netdev_priv(d); 391f7917c00SJeff Kirsher 392f7917c00SJeff Kirsher for (i = 0; i < pi->nqsets; i++, msi_idx++) { 393f7917c00SJeff Kirsher snprintf(adap->msix_info[msi_idx].desc, n, 394f7917c00SJeff Kirsher "%s-%d", d->name, pi->first_qset + i); 395f7917c00SJeff Kirsher adap->msix_info[msi_idx].desc[n] = 0; 396f7917c00SJeff Kirsher } 397f7917c00SJeff Kirsher } 398f7917c00SJeff Kirsher } 399f7917c00SJeff Kirsher 400f7917c00SJeff Kirsher static int request_msix_data_irqs(struct adapter *adap) 401f7917c00SJeff Kirsher { 402f7917c00SJeff Kirsher int i, j, err, qidx = 0; 403f7917c00SJeff Kirsher 404f7917c00SJeff Kirsher for_each_port(adap, i) { 405f7917c00SJeff Kirsher int nqsets = adap2pinfo(adap, i)->nqsets; 406f7917c00SJeff Kirsher 407f7917c00SJeff Kirsher for (j = 0; j < nqsets; ++j) { 408f7917c00SJeff Kirsher err = request_irq(adap->msix_info[qidx + 1].vec, 409f7917c00SJeff Kirsher t3_intr_handler(adap, 410f7917c00SJeff Kirsher adap->sge.qs[qidx]. 411f7917c00SJeff Kirsher rspq.polling), 0, 412f7917c00SJeff Kirsher adap->msix_info[qidx + 1].desc, 413f7917c00SJeff Kirsher &adap->sge.qs[qidx]); 414f7917c00SJeff Kirsher if (err) { 415f7917c00SJeff Kirsher while (--qidx >= 0) 416f7917c00SJeff Kirsher free_irq(adap->msix_info[qidx + 1].vec, 417f7917c00SJeff Kirsher &adap->sge.qs[qidx]); 418f7917c00SJeff Kirsher return err; 419f7917c00SJeff Kirsher } 420f7917c00SJeff Kirsher qidx++; 421f7917c00SJeff Kirsher } 422f7917c00SJeff Kirsher } 423f7917c00SJeff Kirsher return 0; 424f7917c00SJeff Kirsher } 425f7917c00SJeff Kirsher 426f7917c00SJeff Kirsher static void free_irq_resources(struct adapter *adapter) 427f7917c00SJeff Kirsher { 428f7917c00SJeff Kirsher if (adapter->flags & USING_MSIX) { 429f7917c00SJeff Kirsher int i, n = 0; 430f7917c00SJeff Kirsher 431f7917c00SJeff Kirsher free_irq(adapter->msix_info[0].vec, adapter); 432f7917c00SJeff Kirsher for_each_port(adapter, i) 433f7917c00SJeff Kirsher n += adap2pinfo(adapter, i)->nqsets; 434f7917c00SJeff Kirsher 435f7917c00SJeff Kirsher for (i = 0; i < n; ++i) 436f7917c00SJeff Kirsher free_irq(adapter->msix_info[i + 1].vec, 437f7917c00SJeff Kirsher &adapter->sge.qs[i]); 438f7917c00SJeff Kirsher } else 439f7917c00SJeff Kirsher free_irq(adapter->pdev->irq, adapter); 440f7917c00SJeff Kirsher } 441f7917c00SJeff Kirsher 442f7917c00SJeff Kirsher static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt, 443f7917c00SJeff Kirsher unsigned long n) 444f7917c00SJeff Kirsher { 445f7917c00SJeff Kirsher int attempts = 10; 446f7917c00SJeff Kirsher 447f7917c00SJeff Kirsher while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { 448f7917c00SJeff Kirsher if (!--attempts) 449f7917c00SJeff Kirsher return -ETIMEDOUT; 450f7917c00SJeff Kirsher msleep(10); 451f7917c00SJeff Kirsher } 452f7917c00SJeff Kirsher return 0; 453f7917c00SJeff Kirsher } 454f7917c00SJeff Kirsher 455f7917c00SJeff Kirsher static int init_tp_parity(struct adapter *adap) 456f7917c00SJeff Kirsher { 457f7917c00SJeff Kirsher int i; 458f7917c00SJeff Kirsher struct sk_buff *skb; 459f7917c00SJeff Kirsher struct cpl_set_tcb_field *greq; 460f7917c00SJeff Kirsher unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts; 461f7917c00SJeff Kirsher 462f7917c00SJeff Kirsher t3_tp_set_offload_mode(adap, 1); 463f7917c00SJeff Kirsher 464f7917c00SJeff Kirsher for (i = 0; i < 16; i++) { 465f7917c00SJeff Kirsher struct cpl_smt_write_req *req; 466f7917c00SJeff Kirsher 467f7917c00SJeff Kirsher skb = alloc_skb(sizeof(*req), GFP_KERNEL); 468f7917c00SJeff Kirsher if (!skb) 469f7917c00SJeff Kirsher skb = adap->nofail_skb; 470f7917c00SJeff Kirsher if (!skb) 471f7917c00SJeff Kirsher goto alloc_skb_fail; 472f7917c00SJeff Kirsher 473de77b966Syuan linyu req = __skb_put_zero(skb, sizeof(*req)); 474f7917c00SJeff Kirsher req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 475f7917c00SJeff Kirsher OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i)); 476f7917c00SJeff Kirsher req->mtu_idx = NMTUS - 1; 477f7917c00SJeff Kirsher req->iff = i; 478f7917c00SJeff Kirsher t3_mgmt_tx(adap, skb); 479f7917c00SJeff Kirsher if (skb == adap->nofail_skb) { 480f7917c00SJeff Kirsher await_mgmt_replies(adap, cnt, i + 1); 481f7917c00SJeff Kirsher adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL); 482f7917c00SJeff Kirsher if (!adap->nofail_skb) 483f7917c00SJeff Kirsher goto alloc_skb_fail; 484f7917c00SJeff Kirsher } 485f7917c00SJeff Kirsher } 486f7917c00SJeff Kirsher 487f7917c00SJeff Kirsher for (i = 0; i < 2048; i++) { 488f7917c00SJeff Kirsher struct cpl_l2t_write_req *req; 489f7917c00SJeff Kirsher 490f7917c00SJeff Kirsher skb = alloc_skb(sizeof(*req), GFP_KERNEL); 491f7917c00SJeff Kirsher if (!skb) 492f7917c00SJeff Kirsher skb = adap->nofail_skb; 493f7917c00SJeff Kirsher if (!skb) 494f7917c00SJeff Kirsher goto alloc_skb_fail; 495f7917c00SJeff Kirsher 496de77b966Syuan linyu req = __skb_put_zero(skb, sizeof(*req)); 497f7917c00SJeff Kirsher req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 498f7917c00SJeff Kirsher OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i)); 499f7917c00SJeff Kirsher req->params = htonl(V_L2T_W_IDX(i)); 500f7917c00SJeff Kirsher t3_mgmt_tx(adap, skb); 501f7917c00SJeff Kirsher if (skb == adap->nofail_skb) { 502f7917c00SJeff Kirsher await_mgmt_replies(adap, cnt, 16 + i + 1); 503f7917c00SJeff Kirsher adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL); 504f7917c00SJeff Kirsher if (!adap->nofail_skb) 505f7917c00SJeff Kirsher goto alloc_skb_fail; 506f7917c00SJeff Kirsher } 507f7917c00SJeff Kirsher } 508f7917c00SJeff Kirsher 509f7917c00SJeff Kirsher for (i = 0; i < 2048; i++) { 510f7917c00SJeff Kirsher struct cpl_rte_write_req *req; 511f7917c00SJeff Kirsher 512f7917c00SJeff Kirsher skb = alloc_skb(sizeof(*req), GFP_KERNEL); 513f7917c00SJeff Kirsher if (!skb) 514f7917c00SJeff Kirsher skb = adap->nofail_skb; 515f7917c00SJeff Kirsher if (!skb) 516f7917c00SJeff Kirsher goto alloc_skb_fail; 517f7917c00SJeff Kirsher 518de77b966Syuan linyu req = __skb_put_zero(skb, sizeof(*req)); 519f7917c00SJeff Kirsher req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 520f7917c00SJeff Kirsher OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i)); 521f7917c00SJeff Kirsher req->l2t_idx = htonl(V_L2T_W_IDX(i)); 522f7917c00SJeff Kirsher t3_mgmt_tx(adap, skb); 523f7917c00SJeff Kirsher if (skb == adap->nofail_skb) { 524f7917c00SJeff Kirsher await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1); 525f7917c00SJeff Kirsher adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL); 526f7917c00SJeff Kirsher if (!adap->nofail_skb) 527f7917c00SJeff Kirsher goto alloc_skb_fail; 528f7917c00SJeff Kirsher } 529f7917c00SJeff Kirsher } 530f7917c00SJeff Kirsher 531f7917c00SJeff Kirsher skb = alloc_skb(sizeof(*greq), GFP_KERNEL); 532f7917c00SJeff Kirsher if (!skb) 533f7917c00SJeff Kirsher skb = adap->nofail_skb; 534f7917c00SJeff Kirsher if (!skb) 535f7917c00SJeff Kirsher goto alloc_skb_fail; 536f7917c00SJeff Kirsher 537de77b966Syuan linyu greq = __skb_put_zero(skb, sizeof(*greq)); 538f7917c00SJeff Kirsher greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 539f7917c00SJeff Kirsher OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0)); 540f7917c00SJeff Kirsher greq->mask = cpu_to_be64(1); 541f7917c00SJeff Kirsher t3_mgmt_tx(adap, skb); 542f7917c00SJeff Kirsher 543f7917c00SJeff Kirsher i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1); 544f7917c00SJeff Kirsher if (skb == adap->nofail_skb) { 545f7917c00SJeff Kirsher i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1); 546f7917c00SJeff Kirsher adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL); 547f7917c00SJeff Kirsher } 548f7917c00SJeff Kirsher 549f7917c00SJeff Kirsher t3_tp_set_offload_mode(adap, 0); 550f7917c00SJeff Kirsher return i; 551f7917c00SJeff Kirsher 552f7917c00SJeff Kirsher alloc_skb_fail: 553f7917c00SJeff Kirsher t3_tp_set_offload_mode(adap, 0); 554f7917c00SJeff Kirsher return -ENOMEM; 555f7917c00SJeff Kirsher } 556f7917c00SJeff Kirsher 557f7917c00SJeff Kirsher /** 558f7917c00SJeff Kirsher * setup_rss - configure RSS 559f7917c00SJeff Kirsher * @adap: the adapter 560f7917c00SJeff Kirsher * 561f7917c00SJeff Kirsher * Sets up RSS to distribute packets to multiple receive queues. We 562f7917c00SJeff Kirsher * configure the RSS CPU lookup table to distribute to the number of HW 563f7917c00SJeff Kirsher * receive queues, and the response queue lookup table to narrow that 564f7917c00SJeff Kirsher * down to the response queues actually configured for each port. 565f7917c00SJeff Kirsher * We always configure the RSS mapping for two ports since the mapping 566f7917c00SJeff Kirsher * table has plenty of entries. 567f7917c00SJeff Kirsher */ 568f7917c00SJeff Kirsher static void setup_rss(struct adapter *adap) 569f7917c00SJeff Kirsher { 570f7917c00SJeff Kirsher int i; 571f7917c00SJeff Kirsher unsigned int nq0 = adap2pinfo(adap, 0)->nqsets; 572f7917c00SJeff Kirsher unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1; 573f7917c00SJeff Kirsher u8 cpus[SGE_QSETS + 1]; 5740b86a2a1SMichal Schmidt u16 rspq_map[RSS_TABLE_SIZE + 1]; 575f7917c00SJeff Kirsher 576f7917c00SJeff Kirsher for (i = 0; i < SGE_QSETS; ++i) 577f7917c00SJeff Kirsher cpus[i] = i; 578f7917c00SJeff Kirsher cpus[SGE_QSETS] = 0xff; /* terminator */ 579f7917c00SJeff Kirsher 580f7917c00SJeff Kirsher for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) { 581f7917c00SJeff Kirsher rspq_map[i] = i % nq0; 582f7917c00SJeff Kirsher rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0; 583f7917c00SJeff Kirsher } 5840b86a2a1SMichal Schmidt rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */ 585f7917c00SJeff Kirsher 586f7917c00SJeff Kirsher t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN | 587f7917c00SJeff Kirsher F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | 588f7917c00SJeff Kirsher V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map); 589f7917c00SJeff Kirsher } 590f7917c00SJeff Kirsher 591f7917c00SJeff Kirsher static void ring_dbs(struct adapter *adap) 592f7917c00SJeff Kirsher { 593f7917c00SJeff Kirsher int i, j; 594f7917c00SJeff Kirsher 595f7917c00SJeff Kirsher for (i = 0; i < SGE_QSETS; i++) { 596f7917c00SJeff Kirsher struct sge_qset *qs = &adap->sge.qs[i]; 597f7917c00SJeff Kirsher 598f7917c00SJeff Kirsher if (qs->adap) 599f7917c00SJeff Kirsher for (j = 0; j < SGE_TXQ_PER_SET; j++) 600f7917c00SJeff Kirsher t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id)); 601f7917c00SJeff Kirsher } 602f7917c00SJeff Kirsher } 603f7917c00SJeff Kirsher 604f7917c00SJeff Kirsher static void init_napi(struct adapter *adap) 605f7917c00SJeff Kirsher { 606f7917c00SJeff Kirsher int i; 607f7917c00SJeff Kirsher 608f7917c00SJeff Kirsher for (i = 0; i < SGE_QSETS; i++) { 609f7917c00SJeff Kirsher struct sge_qset *qs = &adap->sge.qs[i]; 610f7917c00SJeff Kirsher 611f7917c00SJeff Kirsher if (qs->adap) 612b48b89f9SJakub Kicinski netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll); 613f7917c00SJeff Kirsher } 614f7917c00SJeff Kirsher 615f7917c00SJeff Kirsher /* 616f7917c00SJeff Kirsher * netif_napi_add() can be called only once per napi_struct because it 617f7917c00SJeff Kirsher * adds each new napi_struct to a list. Be careful not to call it a 618f7917c00SJeff Kirsher * second time, e.g., during EEH recovery, by making a note of it. 619f7917c00SJeff Kirsher */ 620f7917c00SJeff Kirsher adap->flags |= NAPI_INIT; 621f7917c00SJeff Kirsher } 622f7917c00SJeff Kirsher 623f7917c00SJeff Kirsher /* 624f7917c00SJeff Kirsher * Wait until all NAPI handlers are descheduled. This includes the handlers of 625f7917c00SJeff Kirsher * both netdevices representing interfaces and the dummy ones for the extra 626f7917c00SJeff Kirsher * queues. 627f7917c00SJeff Kirsher */ 628f7917c00SJeff Kirsher static void quiesce_rx(struct adapter *adap) 629f7917c00SJeff Kirsher { 630f7917c00SJeff Kirsher int i; 631f7917c00SJeff Kirsher 632f7917c00SJeff Kirsher for (i = 0; i < SGE_QSETS; i++) 633f7917c00SJeff Kirsher if (adap->sge.qs[i].adap) 634f7917c00SJeff Kirsher napi_disable(&adap->sge.qs[i].napi); 635f7917c00SJeff Kirsher } 636f7917c00SJeff Kirsher 637f7917c00SJeff Kirsher static void enable_all_napi(struct adapter *adap) 638f7917c00SJeff Kirsher { 639f7917c00SJeff Kirsher int i; 640f7917c00SJeff Kirsher for (i = 0; i < SGE_QSETS; i++) 641f7917c00SJeff Kirsher if (adap->sge.qs[i].adap) 642f7917c00SJeff Kirsher napi_enable(&adap->sge.qs[i].napi); 643f7917c00SJeff Kirsher } 644f7917c00SJeff Kirsher 645f7917c00SJeff Kirsher /** 646f7917c00SJeff Kirsher * setup_sge_qsets - configure SGE Tx/Rx/response queues 647f7917c00SJeff Kirsher * @adap: the adapter 648f7917c00SJeff Kirsher * 649f7917c00SJeff Kirsher * Determines how many sets of SGE queues to use and initializes them. 650f7917c00SJeff Kirsher * We support multiple queue sets per port if we have MSI-X, otherwise 651f7917c00SJeff Kirsher * just one queue set per port. 652f7917c00SJeff Kirsher */ 653f7917c00SJeff Kirsher static int setup_sge_qsets(struct adapter *adap) 654f7917c00SJeff Kirsher { 655f7917c00SJeff Kirsher int i, j, err, irq_idx = 0, qset_idx = 0; 656f7917c00SJeff Kirsher unsigned int ntxq = SGE_TXQ_PER_SET; 657f7917c00SJeff Kirsher 658f7917c00SJeff Kirsher if (adap->params.rev > 0 && !(adap->flags & USING_MSI)) 659f7917c00SJeff Kirsher irq_idx = -1; 660f7917c00SJeff Kirsher 661f7917c00SJeff Kirsher for_each_port(adap, i) { 662f7917c00SJeff Kirsher struct net_device *dev = adap->port[i]; 663f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 664f7917c00SJeff Kirsher 665f7917c00SJeff Kirsher pi->qs = &adap->sge.qs[pi->first_qset]; 666f7917c00SJeff Kirsher for (j = 0; j < pi->nqsets; ++j, ++qset_idx) { 667f7917c00SJeff Kirsher err = t3_sge_alloc_qset(adap, qset_idx, 1, 668f7917c00SJeff Kirsher (adap->flags & USING_MSIX) ? qset_idx + 1 : 669f7917c00SJeff Kirsher irq_idx, 670f7917c00SJeff Kirsher &adap->params.sge.qset[qset_idx], ntxq, dev, 671f7917c00SJeff Kirsher netdev_get_tx_queue(dev, j)); 672f7917c00SJeff Kirsher if (err) { 673f7917c00SJeff Kirsher t3_free_sge_resources(adap); 674f7917c00SJeff Kirsher return err; 675f7917c00SJeff Kirsher } 676f7917c00SJeff Kirsher } 677f7917c00SJeff Kirsher } 678f7917c00SJeff Kirsher 679f7917c00SJeff Kirsher return 0; 680f7917c00SJeff Kirsher } 681f7917c00SJeff Kirsher 682f7917c00SJeff Kirsher static ssize_t attr_show(struct device *d, char *buf, 683f7917c00SJeff Kirsher ssize_t(*format) (struct net_device *, char *)) 684f7917c00SJeff Kirsher { 685f7917c00SJeff Kirsher ssize_t len; 686f7917c00SJeff Kirsher 687f7917c00SJeff Kirsher /* Synchronize with ioctls that may shut down the device */ 688f7917c00SJeff Kirsher rtnl_lock(); 689f7917c00SJeff Kirsher len = (*format) (to_net_dev(d), buf); 690f7917c00SJeff Kirsher rtnl_unlock(); 691f7917c00SJeff Kirsher return len; 692f7917c00SJeff Kirsher } 693f7917c00SJeff Kirsher 694f7917c00SJeff Kirsher static ssize_t attr_store(struct device *d, 695f7917c00SJeff Kirsher const char *buf, size_t len, 696f7917c00SJeff Kirsher ssize_t(*set) (struct net_device *, unsigned int), 697f7917c00SJeff Kirsher unsigned int min_val, unsigned int max_val) 698f7917c00SJeff Kirsher { 699f7917c00SJeff Kirsher ssize_t ret; 700f7917c00SJeff Kirsher unsigned int val; 701f7917c00SJeff Kirsher 702f7917c00SJeff Kirsher if (!capable(CAP_NET_ADMIN)) 703f7917c00SJeff Kirsher return -EPERM; 704f7917c00SJeff Kirsher 705e72c932dSLABBE Corentin ret = kstrtouint(buf, 0, &val); 706e72c932dSLABBE Corentin if (ret) 707e72c932dSLABBE Corentin return ret; 708e72c932dSLABBE Corentin if (val < min_val || val > max_val) 709f7917c00SJeff Kirsher return -EINVAL; 710f7917c00SJeff Kirsher 711f7917c00SJeff Kirsher rtnl_lock(); 712f7917c00SJeff Kirsher ret = (*set) (to_net_dev(d), val); 713f7917c00SJeff Kirsher if (!ret) 714f7917c00SJeff Kirsher ret = len; 715f7917c00SJeff Kirsher rtnl_unlock(); 716f7917c00SJeff Kirsher return ret; 717f7917c00SJeff Kirsher } 718f7917c00SJeff Kirsher 719f7917c00SJeff Kirsher #define CXGB3_SHOW(name, val_expr) \ 720f7917c00SJeff Kirsher static ssize_t format_##name(struct net_device *dev, char *buf) \ 721f7917c00SJeff Kirsher { \ 722f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); \ 723f7917c00SJeff Kirsher struct adapter *adap = pi->adapter; \ 724f7917c00SJeff Kirsher return sprintf(buf, "%u\n", val_expr); \ 725f7917c00SJeff Kirsher } \ 726f7917c00SJeff Kirsher static ssize_t show_##name(struct device *d, struct device_attribute *attr, \ 727f7917c00SJeff Kirsher char *buf) \ 728f7917c00SJeff Kirsher { \ 729f7917c00SJeff Kirsher return attr_show(d, buf, format_##name); \ 730f7917c00SJeff Kirsher } 731f7917c00SJeff Kirsher 732f7917c00SJeff Kirsher static ssize_t set_nfilters(struct net_device *dev, unsigned int val) 733f7917c00SJeff Kirsher { 734f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 735f7917c00SJeff Kirsher struct adapter *adap = pi->adapter; 736f7917c00SJeff Kirsher int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0; 737f7917c00SJeff Kirsher 738f7917c00SJeff Kirsher if (adap->flags & FULL_INIT_DONE) 739f7917c00SJeff Kirsher return -EBUSY; 740f7917c00SJeff Kirsher if (val && adap->params.rev == 0) 741f7917c00SJeff Kirsher return -EINVAL; 742f7917c00SJeff Kirsher if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers - 743f7917c00SJeff Kirsher min_tids) 744f7917c00SJeff Kirsher return -EINVAL; 745f7917c00SJeff Kirsher adap->params.mc5.nfilters = val; 746f7917c00SJeff Kirsher return 0; 747f7917c00SJeff Kirsher } 748f7917c00SJeff Kirsher 749f7917c00SJeff Kirsher static ssize_t store_nfilters(struct device *d, struct device_attribute *attr, 750f7917c00SJeff Kirsher const char *buf, size_t len) 751f7917c00SJeff Kirsher { 752f7917c00SJeff Kirsher return attr_store(d, buf, len, set_nfilters, 0, ~0); 753f7917c00SJeff Kirsher } 754f7917c00SJeff Kirsher 755f7917c00SJeff Kirsher static ssize_t set_nservers(struct net_device *dev, unsigned int val) 756f7917c00SJeff Kirsher { 757f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 758f7917c00SJeff Kirsher struct adapter *adap = pi->adapter; 759f7917c00SJeff Kirsher 760f7917c00SJeff Kirsher if (adap->flags & FULL_INIT_DONE) 761f7917c00SJeff Kirsher return -EBUSY; 762f7917c00SJeff Kirsher if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters - 763f7917c00SJeff Kirsher MC5_MIN_TIDS) 764f7917c00SJeff Kirsher return -EINVAL; 765f7917c00SJeff Kirsher adap->params.mc5.nservers = val; 766f7917c00SJeff Kirsher return 0; 767f7917c00SJeff Kirsher } 768f7917c00SJeff Kirsher 769f7917c00SJeff Kirsher static ssize_t store_nservers(struct device *d, struct device_attribute *attr, 770f7917c00SJeff Kirsher const char *buf, size_t len) 771f7917c00SJeff Kirsher { 772f7917c00SJeff Kirsher return attr_store(d, buf, len, set_nservers, 0, ~0); 773f7917c00SJeff Kirsher } 774f7917c00SJeff Kirsher 775f7917c00SJeff Kirsher #define CXGB3_ATTR_R(name, val_expr) \ 776f7917c00SJeff Kirsher CXGB3_SHOW(name, val_expr) \ 777d3757ba4SJoe Perches static DEVICE_ATTR(name, 0444, show_##name, NULL) 778f7917c00SJeff Kirsher 779f7917c00SJeff Kirsher #define CXGB3_ATTR_RW(name, val_expr, store_method) \ 780f7917c00SJeff Kirsher CXGB3_SHOW(name, val_expr) \ 781d3757ba4SJoe Perches static DEVICE_ATTR(name, 0644, show_##name, store_method) 782f7917c00SJeff Kirsher 783f7917c00SJeff Kirsher CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5)); 784f7917c00SJeff Kirsher CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters); 785f7917c00SJeff Kirsher CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers); 786f7917c00SJeff Kirsher 787f7917c00SJeff Kirsher static struct attribute *cxgb3_attrs[] = { 788f7917c00SJeff Kirsher &dev_attr_cam_size.attr, 789f7917c00SJeff Kirsher &dev_attr_nfilters.attr, 790f7917c00SJeff Kirsher &dev_attr_nservers.attr, 791f7917c00SJeff Kirsher NULL 792f7917c00SJeff Kirsher }; 793f7917c00SJeff Kirsher 79498dc8373SArvind Yadav static const struct attribute_group cxgb3_attr_group = { 79598dc8373SArvind Yadav .attrs = cxgb3_attrs, 79698dc8373SArvind Yadav }; 797f7917c00SJeff Kirsher 798f7917c00SJeff Kirsher static ssize_t tm_attr_show(struct device *d, 799f7917c00SJeff Kirsher char *buf, int sched) 800f7917c00SJeff Kirsher { 801f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(to_net_dev(d)); 802f7917c00SJeff Kirsher struct adapter *adap = pi->adapter; 803f7917c00SJeff Kirsher unsigned int v, addr, bpt, cpt; 804f7917c00SJeff Kirsher ssize_t len; 805f7917c00SJeff Kirsher 806f7917c00SJeff Kirsher addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2; 807f7917c00SJeff Kirsher rtnl_lock(); 808f7917c00SJeff Kirsher t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr); 809f7917c00SJeff Kirsher v = t3_read_reg(adap, A_TP_TM_PIO_DATA); 810f7917c00SJeff Kirsher if (sched & 1) 811f7917c00SJeff Kirsher v >>= 16; 812f7917c00SJeff Kirsher bpt = (v >> 8) & 0xff; 813f7917c00SJeff Kirsher cpt = v & 0xff; 814f7917c00SJeff Kirsher if (!cpt) 815f7917c00SJeff Kirsher len = sprintf(buf, "disabled\n"); 816f7917c00SJeff Kirsher else { 817f7917c00SJeff Kirsher v = (adap->params.vpd.cclk * 1000) / cpt; 818f7917c00SJeff Kirsher len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125); 819f7917c00SJeff Kirsher } 820f7917c00SJeff Kirsher rtnl_unlock(); 821f7917c00SJeff Kirsher return len; 822f7917c00SJeff Kirsher } 823f7917c00SJeff Kirsher 824f7917c00SJeff Kirsher static ssize_t tm_attr_store(struct device *d, 825f7917c00SJeff Kirsher const char *buf, size_t len, int sched) 826f7917c00SJeff Kirsher { 827f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(to_net_dev(d)); 828f7917c00SJeff Kirsher struct adapter *adap = pi->adapter; 829f7917c00SJeff Kirsher unsigned int val; 830f7917c00SJeff Kirsher ssize_t ret; 831f7917c00SJeff Kirsher 832f7917c00SJeff Kirsher if (!capable(CAP_NET_ADMIN)) 833f7917c00SJeff Kirsher return -EPERM; 834f7917c00SJeff Kirsher 835e72c932dSLABBE Corentin ret = kstrtouint(buf, 0, &val); 836e72c932dSLABBE Corentin if (ret) 837e72c932dSLABBE Corentin return ret; 838e72c932dSLABBE Corentin if (val > 10000000) 839f7917c00SJeff Kirsher return -EINVAL; 840f7917c00SJeff Kirsher 841f7917c00SJeff Kirsher rtnl_lock(); 842f7917c00SJeff Kirsher ret = t3_config_sched(adap, val, sched); 843f7917c00SJeff Kirsher if (!ret) 844f7917c00SJeff Kirsher ret = len; 845f7917c00SJeff Kirsher rtnl_unlock(); 846f7917c00SJeff Kirsher return ret; 847f7917c00SJeff Kirsher } 848f7917c00SJeff Kirsher 849f7917c00SJeff Kirsher #define TM_ATTR(name, sched) \ 850f7917c00SJeff Kirsher static ssize_t show_##name(struct device *d, struct device_attribute *attr, \ 851f7917c00SJeff Kirsher char *buf) \ 852f7917c00SJeff Kirsher { \ 853f7917c00SJeff Kirsher return tm_attr_show(d, buf, sched); \ 854f7917c00SJeff Kirsher } \ 855f7917c00SJeff Kirsher static ssize_t store_##name(struct device *d, struct device_attribute *attr, \ 856f7917c00SJeff Kirsher const char *buf, size_t len) \ 857f7917c00SJeff Kirsher { \ 858f7917c00SJeff Kirsher return tm_attr_store(d, buf, len, sched); \ 859f7917c00SJeff Kirsher } \ 860d3757ba4SJoe Perches static DEVICE_ATTR(name, 0644, show_##name, store_##name) 861f7917c00SJeff Kirsher 862f7917c00SJeff Kirsher TM_ATTR(sched0, 0); 863f7917c00SJeff Kirsher TM_ATTR(sched1, 1); 864f7917c00SJeff Kirsher TM_ATTR(sched2, 2); 865f7917c00SJeff Kirsher TM_ATTR(sched3, 3); 866f7917c00SJeff Kirsher TM_ATTR(sched4, 4); 867f7917c00SJeff Kirsher TM_ATTR(sched5, 5); 868f7917c00SJeff Kirsher TM_ATTR(sched6, 6); 869f7917c00SJeff Kirsher TM_ATTR(sched7, 7); 870f7917c00SJeff Kirsher 871f7917c00SJeff Kirsher static struct attribute *offload_attrs[] = { 872f7917c00SJeff Kirsher &dev_attr_sched0.attr, 873f7917c00SJeff Kirsher &dev_attr_sched1.attr, 874f7917c00SJeff Kirsher &dev_attr_sched2.attr, 875f7917c00SJeff Kirsher &dev_attr_sched3.attr, 876f7917c00SJeff Kirsher &dev_attr_sched4.attr, 877f7917c00SJeff Kirsher &dev_attr_sched5.attr, 878f7917c00SJeff Kirsher &dev_attr_sched6.attr, 879f7917c00SJeff Kirsher &dev_attr_sched7.attr, 880f7917c00SJeff Kirsher NULL 881f7917c00SJeff Kirsher }; 882f7917c00SJeff Kirsher 88398dc8373SArvind Yadav static const struct attribute_group offload_attr_group = { 88498dc8373SArvind Yadav .attrs = offload_attrs, 88598dc8373SArvind Yadav }; 886f7917c00SJeff Kirsher 887f7917c00SJeff Kirsher /* 888f7917c00SJeff Kirsher * Sends an sk_buff to an offload queue driver 889f7917c00SJeff Kirsher * after dealing with any active network taps. 890f7917c00SJeff Kirsher */ 891f7917c00SJeff Kirsher static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb) 892f7917c00SJeff Kirsher { 893f7917c00SJeff Kirsher int ret; 894f7917c00SJeff Kirsher 895f7917c00SJeff Kirsher local_bh_disable(); 896f7917c00SJeff Kirsher ret = t3_offload_tx(tdev, skb); 897f7917c00SJeff Kirsher local_bh_enable(); 898f7917c00SJeff Kirsher return ret; 899f7917c00SJeff Kirsher } 900f7917c00SJeff Kirsher 901f7917c00SJeff Kirsher static int write_smt_entry(struct adapter *adapter, int idx) 902f7917c00SJeff Kirsher { 903f7917c00SJeff Kirsher struct cpl_smt_write_req *req; 904f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(adapter->port[idx]); 905f7917c00SJeff Kirsher struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL); 906f7917c00SJeff Kirsher 907f7917c00SJeff Kirsher if (!skb) 908f7917c00SJeff Kirsher return -ENOMEM; 909f7917c00SJeff Kirsher 9104df864c1SJohannes Berg req = __skb_put(skb, sizeof(*req)); 911f7917c00SJeff Kirsher req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 912f7917c00SJeff Kirsher OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx)); 913f7917c00SJeff Kirsher req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */ 914f7917c00SJeff Kirsher req->iff = idx; 915f7917c00SJeff Kirsher memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN); 916f7917c00SJeff Kirsher memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN); 917f7917c00SJeff Kirsher skb->priority = 1; 918f7917c00SJeff Kirsher offload_tx(&adapter->tdev, skb); 919f7917c00SJeff Kirsher return 0; 920f7917c00SJeff Kirsher } 921f7917c00SJeff Kirsher 922f7917c00SJeff Kirsher static int init_smt(struct adapter *adapter) 923f7917c00SJeff Kirsher { 924f7917c00SJeff Kirsher int i; 925f7917c00SJeff Kirsher 926f7917c00SJeff Kirsher for_each_port(adapter, i) 927f7917c00SJeff Kirsher write_smt_entry(adapter, i); 928f7917c00SJeff Kirsher return 0; 929f7917c00SJeff Kirsher } 930f7917c00SJeff Kirsher 931f7917c00SJeff Kirsher static void init_port_mtus(struct adapter *adapter) 932f7917c00SJeff Kirsher { 933f7917c00SJeff Kirsher unsigned int mtus = adapter->port[0]->mtu; 934f7917c00SJeff Kirsher 935f7917c00SJeff Kirsher if (adapter->port[1]) 936f7917c00SJeff Kirsher mtus |= adapter->port[1]->mtu << 16; 937f7917c00SJeff Kirsher t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus); 938f7917c00SJeff Kirsher } 939f7917c00SJeff Kirsher 940f7917c00SJeff Kirsher static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, 941f7917c00SJeff Kirsher int hi, int port) 942f7917c00SJeff Kirsher { 943f7917c00SJeff Kirsher struct sk_buff *skb; 944f7917c00SJeff Kirsher struct mngt_pktsched_wr *req; 945f7917c00SJeff Kirsher int ret; 946f7917c00SJeff Kirsher 947f7917c00SJeff Kirsher skb = alloc_skb(sizeof(*req), GFP_KERNEL); 948f7917c00SJeff Kirsher if (!skb) 949f7917c00SJeff Kirsher skb = adap->nofail_skb; 950f7917c00SJeff Kirsher if (!skb) 951f7917c00SJeff Kirsher return -ENOMEM; 952f7917c00SJeff Kirsher 9534df864c1SJohannes Berg req = skb_put(skb, sizeof(*req)); 954f7917c00SJeff Kirsher req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT)); 955f7917c00SJeff Kirsher req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET; 956f7917c00SJeff Kirsher req->sched = sched; 957f7917c00SJeff Kirsher req->idx = qidx; 958f7917c00SJeff Kirsher req->min = lo; 959f7917c00SJeff Kirsher req->max = hi; 960f7917c00SJeff Kirsher req->binding = port; 961f7917c00SJeff Kirsher ret = t3_mgmt_tx(adap, skb); 962f7917c00SJeff Kirsher if (skb == adap->nofail_skb) { 963f7917c00SJeff Kirsher adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field), 964f7917c00SJeff Kirsher GFP_KERNEL); 965f7917c00SJeff Kirsher if (!adap->nofail_skb) 966f7917c00SJeff Kirsher ret = -ENOMEM; 967f7917c00SJeff Kirsher } 968f7917c00SJeff Kirsher 969f7917c00SJeff Kirsher return ret; 970f7917c00SJeff Kirsher } 971f7917c00SJeff Kirsher 972f7917c00SJeff Kirsher static int bind_qsets(struct adapter *adap) 973f7917c00SJeff Kirsher { 974f7917c00SJeff Kirsher int i, j, err = 0; 975f7917c00SJeff Kirsher 976f7917c00SJeff Kirsher for_each_port(adap, i) { 977f7917c00SJeff Kirsher const struct port_info *pi = adap2pinfo(adap, i); 978f7917c00SJeff Kirsher 979f7917c00SJeff Kirsher for (j = 0; j < pi->nqsets; ++j) { 980f7917c00SJeff Kirsher int ret = send_pktsched_cmd(adap, 1, 981f7917c00SJeff Kirsher pi->first_qset + j, -1, 982f7917c00SJeff Kirsher -1, i); 983f7917c00SJeff Kirsher if (ret) 984f7917c00SJeff Kirsher err = ret; 985f7917c00SJeff Kirsher } 986f7917c00SJeff Kirsher } 987f7917c00SJeff Kirsher 988f7917c00SJeff Kirsher return err; 989f7917c00SJeff Kirsher } 990f7917c00SJeff Kirsher 991f7917c00SJeff Kirsher #define FW_VERSION __stringify(FW_VERSION_MAJOR) "." \ 992f7917c00SJeff Kirsher __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO) 993f7917c00SJeff Kirsher #define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin" 994f7917c00SJeff Kirsher #define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "." \ 995f7917c00SJeff Kirsher __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO) 996f7917c00SJeff Kirsher #define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin" 997f7917c00SJeff Kirsher #define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin" 998f7917c00SJeff Kirsher #define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin" 999f7917c00SJeff Kirsher #define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin" 1000f7917c00SJeff Kirsher MODULE_FIRMWARE(FW_FNAME); 1001f7917c00SJeff Kirsher MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin"); 1002f7917c00SJeff Kirsher MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin"); 1003f7917c00SJeff Kirsher MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME); 1004f7917c00SJeff Kirsher MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME); 1005f7917c00SJeff Kirsher MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME); 1006f7917c00SJeff Kirsher 1007f7917c00SJeff Kirsher static inline const char *get_edc_fw_name(int edc_idx) 1008f7917c00SJeff Kirsher { 1009f7917c00SJeff Kirsher const char *fw_name = NULL; 1010f7917c00SJeff Kirsher 1011f7917c00SJeff Kirsher switch (edc_idx) { 1012f7917c00SJeff Kirsher case EDC_OPT_AEL2005: 1013f7917c00SJeff Kirsher fw_name = AEL2005_OPT_EDC_NAME; 1014f7917c00SJeff Kirsher break; 1015f7917c00SJeff Kirsher case EDC_TWX_AEL2005: 1016f7917c00SJeff Kirsher fw_name = AEL2005_TWX_EDC_NAME; 1017f7917c00SJeff Kirsher break; 1018f7917c00SJeff Kirsher case EDC_TWX_AEL2020: 1019f7917c00SJeff Kirsher fw_name = AEL2020_TWX_EDC_NAME; 1020f7917c00SJeff Kirsher break; 1021f7917c00SJeff Kirsher } 1022f7917c00SJeff Kirsher return fw_name; 1023f7917c00SJeff Kirsher } 1024f7917c00SJeff Kirsher 1025f7917c00SJeff Kirsher int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size) 1026f7917c00SJeff Kirsher { 1027f7917c00SJeff Kirsher struct adapter *adapter = phy->adapter; 1028f7917c00SJeff Kirsher const struct firmware *fw; 102992a486caSKees Cook const char *fw_name; 1030f7917c00SJeff Kirsher u32 csum; 1031f7917c00SJeff Kirsher const __be32 *p; 1032f7917c00SJeff Kirsher u16 *cache = phy->phy_cache; 103392a486caSKees Cook int i, ret = -EINVAL; 1034f7917c00SJeff Kirsher 103592a486caSKees Cook fw_name = get_edc_fw_name(edc_idx); 103692a486caSKees Cook if (fw_name) 103792a486caSKees Cook ret = request_firmware(&fw, fw_name, &adapter->pdev->dev); 1038f7917c00SJeff Kirsher if (ret < 0) { 1039f7917c00SJeff Kirsher dev_err(&adapter->pdev->dev, 1040f7917c00SJeff Kirsher "could not upgrade firmware: unable to load %s\n", 104192a486caSKees Cook fw_name); 1042f7917c00SJeff Kirsher return ret; 1043f7917c00SJeff Kirsher } 1044f7917c00SJeff Kirsher 1045f7917c00SJeff Kirsher /* check size, take checksum in account */ 1046f7917c00SJeff Kirsher if (fw->size > size + 4) { 1047f7917c00SJeff Kirsher CH_ERR(adapter, "firmware image too large %u, expected %d\n", 1048f7917c00SJeff Kirsher (unsigned int)fw->size, size + 4); 1049f7917c00SJeff Kirsher ret = -EINVAL; 1050f7917c00SJeff Kirsher } 1051f7917c00SJeff Kirsher 1052f7917c00SJeff Kirsher /* compute checksum */ 1053f7917c00SJeff Kirsher p = (const __be32 *)fw->data; 1054f7917c00SJeff Kirsher for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++) 1055f7917c00SJeff Kirsher csum += ntohl(p[i]); 1056f7917c00SJeff Kirsher 1057f7917c00SJeff Kirsher if (csum != 0xffffffff) { 1058f7917c00SJeff Kirsher CH_ERR(adapter, "corrupted firmware image, checksum %u\n", 1059f7917c00SJeff Kirsher csum); 1060f7917c00SJeff Kirsher ret = -EINVAL; 1061f7917c00SJeff Kirsher } 1062f7917c00SJeff Kirsher 1063f7917c00SJeff Kirsher for (i = 0; i < size / 4 ; i++) { 1064f7917c00SJeff Kirsher *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16; 1065f7917c00SJeff Kirsher *cache++ = be32_to_cpu(p[i]) & 0xffff; 1066f7917c00SJeff Kirsher } 1067f7917c00SJeff Kirsher 1068f7917c00SJeff Kirsher release_firmware(fw); 1069f7917c00SJeff Kirsher 1070f7917c00SJeff Kirsher return ret; 1071f7917c00SJeff Kirsher } 1072f7917c00SJeff Kirsher 1073f7917c00SJeff Kirsher static int upgrade_fw(struct adapter *adap) 1074f7917c00SJeff Kirsher { 1075f7917c00SJeff Kirsher int ret; 1076f7917c00SJeff Kirsher const struct firmware *fw; 1077f7917c00SJeff Kirsher struct device *dev = &adap->pdev->dev; 1078f7917c00SJeff Kirsher 1079f7917c00SJeff Kirsher ret = request_firmware(&fw, FW_FNAME, dev); 1080f7917c00SJeff Kirsher if (ret < 0) { 1081f7917c00SJeff Kirsher dev_err(dev, "could not upgrade firmware: unable to load %s\n", 1082f7917c00SJeff Kirsher FW_FNAME); 1083f7917c00SJeff Kirsher return ret; 1084f7917c00SJeff Kirsher } 1085f7917c00SJeff Kirsher ret = t3_load_fw(adap, fw->data, fw->size); 1086f7917c00SJeff Kirsher release_firmware(fw); 1087f7917c00SJeff Kirsher 1088f7917c00SJeff Kirsher if (ret == 0) 1089f7917c00SJeff Kirsher dev_info(dev, "successful upgrade to firmware %d.%d.%d\n", 1090f7917c00SJeff Kirsher FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO); 1091f7917c00SJeff Kirsher else 1092f7917c00SJeff Kirsher dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n", 1093f7917c00SJeff Kirsher FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO); 1094f7917c00SJeff Kirsher 1095f7917c00SJeff Kirsher return ret; 1096f7917c00SJeff Kirsher } 1097f7917c00SJeff Kirsher 1098f7917c00SJeff Kirsher static inline char t3rev2char(struct adapter *adapter) 1099f7917c00SJeff Kirsher { 1100f7917c00SJeff Kirsher char rev = 0; 1101f7917c00SJeff Kirsher 1102f7917c00SJeff Kirsher switch(adapter->params.rev) { 1103f7917c00SJeff Kirsher case T3_REV_B: 1104f7917c00SJeff Kirsher case T3_REV_B2: 1105f7917c00SJeff Kirsher rev = 'b'; 1106f7917c00SJeff Kirsher break; 1107f7917c00SJeff Kirsher case T3_REV_C: 1108f7917c00SJeff Kirsher rev = 'c'; 1109f7917c00SJeff Kirsher break; 1110f7917c00SJeff Kirsher } 1111f7917c00SJeff Kirsher return rev; 1112f7917c00SJeff Kirsher } 1113f7917c00SJeff Kirsher 1114f7917c00SJeff Kirsher static int update_tpsram(struct adapter *adap) 1115f7917c00SJeff Kirsher { 1116f7917c00SJeff Kirsher const struct firmware *tpsram; 1117f7917c00SJeff Kirsher char buf[64]; 1118f7917c00SJeff Kirsher struct device *dev = &adap->pdev->dev; 1119f7917c00SJeff Kirsher int ret; 1120f7917c00SJeff Kirsher char rev; 1121f7917c00SJeff Kirsher 1122f7917c00SJeff Kirsher rev = t3rev2char(adap); 1123f7917c00SJeff Kirsher if (!rev) 1124f7917c00SJeff Kirsher return 0; 1125f7917c00SJeff Kirsher 1126f7917c00SJeff Kirsher snprintf(buf, sizeof(buf), TPSRAM_NAME, rev); 1127f7917c00SJeff Kirsher 1128f7917c00SJeff Kirsher ret = request_firmware(&tpsram, buf, dev); 1129f7917c00SJeff Kirsher if (ret < 0) { 1130f7917c00SJeff Kirsher dev_err(dev, "could not load TP SRAM: unable to load %s\n", 1131f7917c00SJeff Kirsher buf); 1132f7917c00SJeff Kirsher return ret; 1133f7917c00SJeff Kirsher } 1134f7917c00SJeff Kirsher 1135f7917c00SJeff Kirsher ret = t3_check_tpsram(adap, tpsram->data, tpsram->size); 1136f7917c00SJeff Kirsher if (ret) 1137f7917c00SJeff Kirsher goto release_tpsram; 1138f7917c00SJeff Kirsher 1139f7917c00SJeff Kirsher ret = t3_set_proto_sram(adap, tpsram->data); 1140f7917c00SJeff Kirsher if (ret == 0) 1141f7917c00SJeff Kirsher dev_info(dev, 1142f7917c00SJeff Kirsher "successful update of protocol engine " 1143f7917c00SJeff Kirsher "to %d.%d.%d\n", 1144f7917c00SJeff Kirsher TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); 1145f7917c00SJeff Kirsher else 1146f7917c00SJeff Kirsher dev_err(dev, "failed to update of protocol engine %d.%d.%d\n", 1147f7917c00SJeff Kirsher TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); 1148f7917c00SJeff Kirsher if (ret) 1149f7917c00SJeff Kirsher dev_err(dev, "loading protocol SRAM failed\n"); 1150f7917c00SJeff Kirsher 1151f7917c00SJeff Kirsher release_tpsram: 1152f7917c00SJeff Kirsher release_firmware(tpsram); 1153f7917c00SJeff Kirsher 1154f7917c00SJeff Kirsher return ret; 1155f7917c00SJeff Kirsher } 1156f7917c00SJeff Kirsher 1157f7917c00SJeff Kirsher /** 115860158e64SRoland Dreier * t3_synchronize_rx - wait for current Rx processing on a port to complete 115960158e64SRoland Dreier * @adap: the adapter 116060158e64SRoland Dreier * @p: the port 116160158e64SRoland Dreier * 116260158e64SRoland Dreier * Ensures that current Rx processing on any of the queues associated with 116360158e64SRoland Dreier * the given port completes before returning. We do this by acquiring and 116460158e64SRoland Dreier * releasing the locks of the response queues associated with the port. 116560158e64SRoland Dreier */ 116660158e64SRoland Dreier static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p) 116760158e64SRoland Dreier { 116860158e64SRoland Dreier int i; 116960158e64SRoland Dreier 117060158e64SRoland Dreier for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { 117160158e64SRoland Dreier struct sge_rspq *q = &adap->sge.qs[i].rspq; 117260158e64SRoland Dreier 117360158e64SRoland Dreier spin_lock_irq(&q->lock); 117460158e64SRoland Dreier spin_unlock_irq(&q->lock); 117560158e64SRoland Dreier } 117660158e64SRoland Dreier } 117760158e64SRoland Dreier 117860158e64SRoland Dreier static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features) 117960158e64SRoland Dreier { 118060158e64SRoland Dreier struct port_info *pi = netdev_priv(dev); 118160158e64SRoland Dreier struct adapter *adapter = pi->adapter; 118260158e64SRoland Dreier 118360158e64SRoland Dreier if (adapter->params.rev > 0) { 118460158e64SRoland Dreier t3_set_vlan_accel(adapter, 1 << pi->port_id, 1185f646968fSPatrick McHardy features & NETIF_F_HW_VLAN_CTAG_RX); 118660158e64SRoland Dreier } else { 118760158e64SRoland Dreier /* single control for all ports */ 1188f646968fSPatrick McHardy unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX; 118960158e64SRoland Dreier 119060158e64SRoland Dreier for_each_port(adapter, i) 119160158e64SRoland Dreier have_vlans |= 1192f646968fSPatrick McHardy adapter->port[i]->features & 1193f646968fSPatrick McHardy NETIF_F_HW_VLAN_CTAG_RX; 119460158e64SRoland Dreier 119560158e64SRoland Dreier t3_set_vlan_accel(adapter, 1, have_vlans); 119660158e64SRoland Dreier } 119760158e64SRoland Dreier t3_synchronize_rx(adapter, pi); 119860158e64SRoland Dreier } 119960158e64SRoland Dreier 120060158e64SRoland Dreier /** 1201f7917c00SJeff Kirsher * cxgb_up - enable the adapter 1202d0ea5cbdSJesse Brandeburg * @adap: adapter being enabled 1203f7917c00SJeff Kirsher * 1204f7917c00SJeff Kirsher * Called when the first port is enabled, this function performs the 1205f7917c00SJeff Kirsher * actions necessary to make an adapter operational, such as completing 1206f7917c00SJeff Kirsher * the initialization of HW modules, and enabling interrupts. 1207f7917c00SJeff Kirsher * 1208f7917c00SJeff Kirsher * Must be called with the rtnl lock held. 1209f7917c00SJeff Kirsher */ 1210f7917c00SJeff Kirsher static int cxgb_up(struct adapter *adap) 1211f7917c00SJeff Kirsher { 121260158e64SRoland Dreier int i, err; 1213f7917c00SJeff Kirsher 1214f7917c00SJeff Kirsher if (!(adap->flags & FULL_INIT_DONE)) { 1215f7917c00SJeff Kirsher err = t3_check_fw_version(adap); 1216f7917c00SJeff Kirsher if (err == -EINVAL) { 1217f7917c00SJeff Kirsher err = upgrade_fw(adap); 1218f7917c00SJeff Kirsher CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n", 1219f7917c00SJeff Kirsher FW_VERSION_MAJOR, FW_VERSION_MINOR, 1220f7917c00SJeff Kirsher FW_VERSION_MICRO, err ? "failed" : "succeeded"); 1221f7917c00SJeff Kirsher } 1222f7917c00SJeff Kirsher 1223f7917c00SJeff Kirsher err = t3_check_tpsram_version(adap); 1224f7917c00SJeff Kirsher if (err == -EINVAL) { 1225f7917c00SJeff Kirsher err = update_tpsram(adap); 1226f7917c00SJeff Kirsher CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n", 1227f7917c00SJeff Kirsher TP_VERSION_MAJOR, TP_VERSION_MINOR, 1228f7917c00SJeff Kirsher TP_VERSION_MICRO, err ? "failed" : "succeeded"); 1229f7917c00SJeff Kirsher } 1230f7917c00SJeff Kirsher 1231f7917c00SJeff Kirsher /* 1232f7917c00SJeff Kirsher * Clear interrupts now to catch errors if t3_init_hw fails. 1233f7917c00SJeff Kirsher * We clear them again later as initialization may trigger 1234f7917c00SJeff Kirsher * conditions that can interrupt. 1235f7917c00SJeff Kirsher */ 1236f7917c00SJeff Kirsher t3_intr_clear(adap); 1237f7917c00SJeff Kirsher 1238f7917c00SJeff Kirsher err = t3_init_hw(adap, 0); 1239f7917c00SJeff Kirsher if (err) 1240f7917c00SJeff Kirsher goto out; 1241f7917c00SJeff Kirsher 1242f7917c00SJeff Kirsher t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT); 1243f7917c00SJeff Kirsher t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12)); 1244f7917c00SJeff Kirsher 1245f7917c00SJeff Kirsher err = setup_sge_qsets(adap); 1246f7917c00SJeff Kirsher if (err) 1247f7917c00SJeff Kirsher goto out; 1248f7917c00SJeff Kirsher 124960158e64SRoland Dreier for_each_port(adap, i) 125060158e64SRoland Dreier cxgb_vlan_mode(adap->port[i], adap->port[i]->features); 125160158e64SRoland Dreier 1252f7917c00SJeff Kirsher setup_rss(adap); 1253f7917c00SJeff Kirsher if (!(adap->flags & NAPI_INIT)) 1254f7917c00SJeff Kirsher init_napi(adap); 1255f7917c00SJeff Kirsher 1256f7917c00SJeff Kirsher t3_start_sge_timers(adap); 1257f7917c00SJeff Kirsher adap->flags |= FULL_INIT_DONE; 1258f7917c00SJeff Kirsher } 1259f7917c00SJeff Kirsher 1260f7917c00SJeff Kirsher t3_intr_clear(adap); 1261f7917c00SJeff Kirsher 1262f7917c00SJeff Kirsher if (adap->flags & USING_MSIX) { 1263f7917c00SJeff Kirsher name_msix_vecs(adap); 1264f7917c00SJeff Kirsher err = request_irq(adap->msix_info[0].vec, 1265f7917c00SJeff Kirsher t3_async_intr_handler, 0, 1266f7917c00SJeff Kirsher adap->msix_info[0].desc, adap); 1267f7917c00SJeff Kirsher if (err) 1268f7917c00SJeff Kirsher goto irq_err; 1269f7917c00SJeff Kirsher 1270f7917c00SJeff Kirsher err = request_msix_data_irqs(adap); 1271f7917c00SJeff Kirsher if (err) { 1272f7917c00SJeff Kirsher free_irq(adap->msix_info[0].vec, adap); 1273f7917c00SJeff Kirsher goto irq_err; 1274f7917c00SJeff Kirsher } 12756a8dd8b2SÍñigo Huguet } else { 12766a8dd8b2SÍñigo Huguet err = request_irq(adap->pdev->irq, 12776a8dd8b2SÍñigo Huguet t3_intr_handler(adap, adap->sge.qs[0].rspq.polling), 12786a8dd8b2SÍñigo Huguet (adap->flags & USING_MSI) ? 0 : IRQF_SHARED, 12796a8dd8b2SÍñigo Huguet adap->name, adap); 12806a8dd8b2SÍñigo Huguet if (err) 1281f7917c00SJeff Kirsher goto irq_err; 12826a8dd8b2SÍñigo Huguet } 1283f7917c00SJeff Kirsher 1284f7917c00SJeff Kirsher enable_all_napi(adap); 1285f7917c00SJeff Kirsher t3_sge_start(adap); 1286f7917c00SJeff Kirsher t3_intr_enable(adap); 1287f7917c00SJeff Kirsher 1288f7917c00SJeff Kirsher if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) && 1289f7917c00SJeff Kirsher is_offload(adap) && init_tp_parity(adap) == 0) 1290f7917c00SJeff Kirsher adap->flags |= TP_PARITY_INIT; 1291f7917c00SJeff Kirsher 1292f7917c00SJeff Kirsher if (adap->flags & TP_PARITY_INIT) { 1293f7917c00SJeff Kirsher t3_write_reg(adap, A_TP_INT_CAUSE, 1294f7917c00SJeff Kirsher F_CMCACHEPERR | F_ARPLUTPERR); 1295f7917c00SJeff Kirsher t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff); 1296f7917c00SJeff Kirsher } 1297f7917c00SJeff Kirsher 1298f7917c00SJeff Kirsher if (!(adap->flags & QUEUES_BOUND)) { 1299f7917c00SJeff Kirsher int ret = bind_qsets(adap); 1300f7917c00SJeff Kirsher 1301f7917c00SJeff Kirsher if (ret < 0) { 1302f7917c00SJeff Kirsher CH_ERR(adap, "failed to bind qsets, err %d\n", ret); 1303f7917c00SJeff Kirsher t3_intr_disable(adap); 1304*d75aed14SZhengchao Shao quiesce_rx(adap); 1305f7917c00SJeff Kirsher free_irq_resources(adap); 1306f7917c00SJeff Kirsher err = ret; 1307f7917c00SJeff Kirsher goto out; 1308f7917c00SJeff Kirsher } 1309f7917c00SJeff Kirsher adap->flags |= QUEUES_BOUND; 1310f7917c00SJeff Kirsher } 1311f7917c00SJeff Kirsher 1312f7917c00SJeff Kirsher out: 1313f7917c00SJeff Kirsher return err; 1314f7917c00SJeff Kirsher irq_err: 1315f7917c00SJeff Kirsher CH_ERR(adap, "request_irq failed, err %d\n", err); 1316f7917c00SJeff Kirsher goto out; 1317f7917c00SJeff Kirsher } 1318f7917c00SJeff Kirsher 1319f7917c00SJeff Kirsher /* 1320f7917c00SJeff Kirsher * Release resources when all the ports and offloading have been stopped. 1321f7917c00SJeff Kirsher */ 1322f7917c00SJeff Kirsher static void cxgb_down(struct adapter *adapter, int on_wq) 1323f7917c00SJeff Kirsher { 1324f7917c00SJeff Kirsher t3_sge_stop(adapter); 1325f7917c00SJeff Kirsher spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */ 1326f7917c00SJeff Kirsher t3_intr_disable(adapter); 1327f7917c00SJeff Kirsher spin_unlock_irq(&adapter->work_lock); 1328f7917c00SJeff Kirsher 1329f7917c00SJeff Kirsher free_irq_resources(adapter); 1330f7917c00SJeff Kirsher quiesce_rx(adapter); 1331f7917c00SJeff Kirsher t3_sge_stop(adapter); 1332f7917c00SJeff Kirsher if (!on_wq) 1333f7917c00SJeff Kirsher flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */ 1334f7917c00SJeff Kirsher } 1335f7917c00SJeff Kirsher 1336f7917c00SJeff Kirsher static void schedule_chk_task(struct adapter *adap) 1337f7917c00SJeff Kirsher { 1338f7917c00SJeff Kirsher unsigned int timeo; 1339f7917c00SJeff Kirsher 1340f7917c00SJeff Kirsher timeo = adap->params.linkpoll_period ? 1341f7917c00SJeff Kirsher (HZ * adap->params.linkpoll_period) / 10 : 1342f7917c00SJeff Kirsher adap->params.stats_update_period * HZ; 1343f7917c00SJeff Kirsher if (timeo) 1344f7917c00SJeff Kirsher queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo); 1345f7917c00SJeff Kirsher } 1346f7917c00SJeff Kirsher 1347f7917c00SJeff Kirsher static int offload_open(struct net_device *dev) 1348f7917c00SJeff Kirsher { 1349f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 1350f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter; 1351f7917c00SJeff Kirsher struct t3cdev *tdev = dev2t3cdev(dev); 1352f7917c00SJeff Kirsher int adap_up = adapter->open_device_map & PORT_MASK; 1353f7917c00SJeff Kirsher int err; 1354f7917c00SJeff Kirsher 1355f7917c00SJeff Kirsher if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) 1356f7917c00SJeff Kirsher return 0; 1357f7917c00SJeff Kirsher 1358f7917c00SJeff Kirsher if (!adap_up && (err = cxgb_up(adapter)) < 0) 1359f7917c00SJeff Kirsher goto out; 1360f7917c00SJeff Kirsher 1361f7917c00SJeff Kirsher t3_tp_set_offload_mode(adapter, 1); 1362f7917c00SJeff Kirsher tdev->lldev = adapter->port[0]; 1363f7917c00SJeff Kirsher err = cxgb3_offload_activate(adapter); 1364f7917c00SJeff Kirsher if (err) 1365f7917c00SJeff Kirsher goto out; 1366f7917c00SJeff Kirsher 1367f7917c00SJeff Kirsher init_port_mtus(adapter); 1368f7917c00SJeff Kirsher t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd, 1369f7917c00SJeff Kirsher adapter->params.b_wnd, 1370f7917c00SJeff Kirsher adapter->params.rev == 0 ? 1371f7917c00SJeff Kirsher adapter->port[0]->mtu : 0xffff); 1372f7917c00SJeff Kirsher init_smt(adapter); 1373f7917c00SJeff Kirsher 1374f7917c00SJeff Kirsher if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group)) 1375f7917c00SJeff Kirsher dev_dbg(&dev->dev, "cannot create sysfs group\n"); 1376f7917c00SJeff Kirsher 1377f7917c00SJeff Kirsher /* Call back all registered clients */ 1378f7917c00SJeff Kirsher cxgb3_add_clients(tdev); 1379f7917c00SJeff Kirsher 1380f7917c00SJeff Kirsher out: 1381f7917c00SJeff Kirsher /* restore them in case the offload module has changed them */ 1382f7917c00SJeff Kirsher if (err) { 1383f7917c00SJeff Kirsher t3_tp_set_offload_mode(adapter, 0); 1384f7917c00SJeff Kirsher clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map); 1385f7917c00SJeff Kirsher cxgb3_set_dummy_ops(tdev); 1386f7917c00SJeff Kirsher } 1387f7917c00SJeff Kirsher return err; 1388f7917c00SJeff Kirsher } 1389f7917c00SJeff Kirsher 1390f7917c00SJeff Kirsher static int offload_close(struct t3cdev *tdev) 1391f7917c00SJeff Kirsher { 1392f7917c00SJeff Kirsher struct adapter *adapter = tdev2adap(tdev); 1393f7917c00SJeff Kirsher struct t3c_data *td = T3C_DATA(tdev); 1394f7917c00SJeff Kirsher 1395f7917c00SJeff Kirsher if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) 1396f7917c00SJeff Kirsher return 0; 1397f7917c00SJeff Kirsher 1398f7917c00SJeff Kirsher /* Call back all registered clients */ 1399f7917c00SJeff Kirsher cxgb3_remove_clients(tdev); 1400f7917c00SJeff Kirsher 1401f7917c00SJeff Kirsher sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group); 1402f7917c00SJeff Kirsher 1403f7917c00SJeff Kirsher /* Flush work scheduled while releasing TIDs */ 140443829731STejun Heo flush_work(&td->tid_release_task); 1405f7917c00SJeff Kirsher 1406f7917c00SJeff Kirsher tdev->lldev = NULL; 1407f7917c00SJeff Kirsher cxgb3_set_dummy_ops(tdev); 1408f7917c00SJeff Kirsher t3_tp_set_offload_mode(adapter, 0); 1409f7917c00SJeff Kirsher clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map); 1410f7917c00SJeff Kirsher 1411f7917c00SJeff Kirsher if (!adapter->open_device_map) 1412f7917c00SJeff Kirsher cxgb_down(adapter, 0); 1413f7917c00SJeff Kirsher 1414f7917c00SJeff Kirsher cxgb3_offload_deactivate(adapter); 1415f7917c00SJeff Kirsher return 0; 1416f7917c00SJeff Kirsher } 1417f7917c00SJeff Kirsher 1418f7917c00SJeff Kirsher static int cxgb_open(struct net_device *dev) 1419f7917c00SJeff Kirsher { 1420f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 1421f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter; 1422f7917c00SJeff Kirsher int other_ports = adapter->open_device_map & PORT_MASK; 1423f7917c00SJeff Kirsher int err; 1424f7917c00SJeff Kirsher 1425f7917c00SJeff Kirsher if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) 1426f7917c00SJeff Kirsher return err; 1427f7917c00SJeff Kirsher 1428f7917c00SJeff Kirsher set_bit(pi->port_id, &adapter->open_device_map); 1429f7917c00SJeff Kirsher if (is_offload(adapter) && !ofld_disable) { 1430f7917c00SJeff Kirsher err = offload_open(dev); 1431f7917c00SJeff Kirsher if (err) 1432428ac43fSJoe Perches pr_warn("Could not initialize offload capabilities\n"); 1433f7917c00SJeff Kirsher } 1434f7917c00SJeff Kirsher 1435f7917c00SJeff Kirsher netif_set_real_num_tx_queues(dev, pi->nqsets); 1436f7917c00SJeff Kirsher err = netif_set_real_num_rx_queues(dev, pi->nqsets); 1437f7917c00SJeff Kirsher if (err) 1438f7917c00SJeff Kirsher return err; 1439f7917c00SJeff Kirsher link_start(dev); 1440f7917c00SJeff Kirsher t3_port_intr_enable(adapter, pi->port_id); 1441f7917c00SJeff Kirsher netif_tx_start_all_queues(dev); 1442f7917c00SJeff Kirsher if (!other_ports) 1443f7917c00SJeff Kirsher schedule_chk_task(adapter); 1444f7917c00SJeff Kirsher 1445f7917c00SJeff Kirsher cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id); 1446f7917c00SJeff Kirsher return 0; 1447f7917c00SJeff Kirsher } 1448f7917c00SJeff Kirsher 1449f7917c00SJeff Kirsher static int __cxgb_close(struct net_device *dev, int on_wq) 1450f7917c00SJeff Kirsher { 1451f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 1452f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter; 1453f7917c00SJeff Kirsher 1454f7917c00SJeff Kirsher 1455f7917c00SJeff Kirsher if (!adapter->open_device_map) 1456f7917c00SJeff Kirsher return 0; 1457f7917c00SJeff Kirsher 1458f7917c00SJeff Kirsher /* Stop link fault interrupts */ 1459f7917c00SJeff Kirsher t3_xgm_intr_disable(adapter, pi->port_id); 1460f7917c00SJeff Kirsher t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset); 1461f7917c00SJeff Kirsher 1462f7917c00SJeff Kirsher t3_port_intr_disable(adapter, pi->port_id); 1463f7917c00SJeff Kirsher netif_tx_stop_all_queues(dev); 1464f7917c00SJeff Kirsher pi->phy.ops->power_down(&pi->phy, 1); 1465f7917c00SJeff Kirsher netif_carrier_off(dev); 1466f7917c00SJeff Kirsher t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX); 1467f7917c00SJeff Kirsher 1468f7917c00SJeff Kirsher spin_lock_irq(&adapter->work_lock); /* sync with update task */ 1469f7917c00SJeff Kirsher clear_bit(pi->port_id, &adapter->open_device_map); 1470f7917c00SJeff Kirsher spin_unlock_irq(&adapter->work_lock); 1471f7917c00SJeff Kirsher 1472f7917c00SJeff Kirsher if (!(adapter->open_device_map & PORT_MASK)) 1473f7917c00SJeff Kirsher cancel_delayed_work_sync(&adapter->adap_check_task); 1474f7917c00SJeff Kirsher 1475f7917c00SJeff Kirsher if (!adapter->open_device_map) 1476f7917c00SJeff Kirsher cxgb_down(adapter, on_wq); 1477f7917c00SJeff Kirsher 1478f7917c00SJeff Kirsher cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id); 1479f7917c00SJeff Kirsher return 0; 1480f7917c00SJeff Kirsher } 1481f7917c00SJeff Kirsher 1482f7917c00SJeff Kirsher static int cxgb_close(struct net_device *dev) 1483f7917c00SJeff Kirsher { 1484f7917c00SJeff Kirsher return __cxgb_close(dev, 0); 1485f7917c00SJeff Kirsher } 1486f7917c00SJeff Kirsher 1487f7917c00SJeff Kirsher static struct net_device_stats *cxgb_get_stats(struct net_device *dev) 1488f7917c00SJeff Kirsher { 1489f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 1490f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter; 1491a73be7feSTobias Klauser struct net_device_stats *ns = &dev->stats; 1492f7917c00SJeff Kirsher const struct mac_stats *pstats; 1493f7917c00SJeff Kirsher 1494f7917c00SJeff Kirsher spin_lock(&adapter->stats_lock); 1495f7917c00SJeff Kirsher pstats = t3_mac_update_stats(&pi->mac); 1496f7917c00SJeff Kirsher spin_unlock(&adapter->stats_lock); 1497f7917c00SJeff Kirsher 1498f7917c00SJeff Kirsher ns->tx_bytes = pstats->tx_octets; 1499f7917c00SJeff Kirsher ns->tx_packets = pstats->tx_frames; 1500f7917c00SJeff Kirsher ns->rx_bytes = pstats->rx_octets; 1501f7917c00SJeff Kirsher ns->rx_packets = pstats->rx_frames; 1502f7917c00SJeff Kirsher ns->multicast = pstats->rx_mcast_frames; 1503f7917c00SJeff Kirsher 1504f7917c00SJeff Kirsher ns->tx_errors = pstats->tx_underrun; 1505f7917c00SJeff Kirsher ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs + 1506f7917c00SJeff Kirsher pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short + 1507f7917c00SJeff Kirsher pstats->rx_fifo_ovfl; 1508f7917c00SJeff Kirsher 1509f7917c00SJeff Kirsher /* detailed rx_errors */ 1510f7917c00SJeff Kirsher ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long; 1511f7917c00SJeff Kirsher ns->rx_over_errors = 0; 1512f7917c00SJeff Kirsher ns->rx_crc_errors = pstats->rx_fcs_errs; 1513f7917c00SJeff Kirsher ns->rx_frame_errors = pstats->rx_symbol_errs; 1514f7917c00SJeff Kirsher ns->rx_fifo_errors = pstats->rx_fifo_ovfl; 1515f7917c00SJeff Kirsher ns->rx_missed_errors = pstats->rx_cong_drops; 1516f7917c00SJeff Kirsher 1517f7917c00SJeff Kirsher /* detailed tx_errors */ 1518f7917c00SJeff Kirsher ns->tx_aborted_errors = 0; 1519f7917c00SJeff Kirsher ns->tx_carrier_errors = 0; 1520f7917c00SJeff Kirsher ns->tx_fifo_errors = pstats->tx_underrun; 1521f7917c00SJeff Kirsher ns->tx_heartbeat_errors = 0; 1522f7917c00SJeff Kirsher ns->tx_window_errors = 0; 1523f7917c00SJeff Kirsher return ns; 1524f7917c00SJeff Kirsher } 1525f7917c00SJeff Kirsher 1526f7917c00SJeff Kirsher static u32 get_msglevel(struct net_device *dev) 1527f7917c00SJeff Kirsher { 1528f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 1529f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter; 1530f7917c00SJeff Kirsher 1531f7917c00SJeff Kirsher return adapter->msg_enable; 1532f7917c00SJeff Kirsher } 1533f7917c00SJeff Kirsher 1534f7917c00SJeff Kirsher static void set_msglevel(struct net_device *dev, u32 val) 1535f7917c00SJeff Kirsher { 1536f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 1537f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter; 1538f7917c00SJeff Kirsher 1539f7917c00SJeff Kirsher adapter->msg_enable = val; 1540f7917c00SJeff Kirsher } 1541f7917c00SJeff Kirsher 15429ca683c6SJoe Perches static const char stats_strings[][ETH_GSTRING_LEN] = { 1543f7917c00SJeff Kirsher "TxOctetsOK ", 1544f7917c00SJeff Kirsher "TxFramesOK ", 1545f7917c00SJeff Kirsher "TxMulticastFramesOK", 1546f7917c00SJeff Kirsher "TxBroadcastFramesOK", 1547f7917c00SJeff Kirsher "TxPauseFrames ", 1548f7917c00SJeff Kirsher "TxUnderrun ", 1549f7917c00SJeff Kirsher "TxExtUnderrun ", 1550f7917c00SJeff Kirsher 1551f7917c00SJeff Kirsher "TxFrames64 ", 1552f7917c00SJeff Kirsher "TxFrames65To127 ", 1553f7917c00SJeff Kirsher "TxFrames128To255 ", 1554f7917c00SJeff Kirsher "TxFrames256To511 ", 1555f7917c00SJeff Kirsher "TxFrames512To1023 ", 1556f7917c00SJeff Kirsher "TxFrames1024To1518 ", 1557f7917c00SJeff Kirsher "TxFrames1519ToMax ", 1558f7917c00SJeff Kirsher 1559f7917c00SJeff Kirsher "RxOctetsOK ", 1560f7917c00SJeff Kirsher "RxFramesOK ", 1561f7917c00SJeff Kirsher "RxMulticastFramesOK", 1562f7917c00SJeff Kirsher "RxBroadcastFramesOK", 1563f7917c00SJeff Kirsher "RxPauseFrames ", 1564f7917c00SJeff Kirsher "RxFCSErrors ", 1565f7917c00SJeff Kirsher "RxSymbolErrors ", 1566f7917c00SJeff Kirsher "RxShortErrors ", 1567f7917c00SJeff Kirsher "RxJabberErrors ", 1568f7917c00SJeff Kirsher "RxLengthErrors ", 1569f7917c00SJeff Kirsher "RxFIFOoverflow ", 1570f7917c00SJeff Kirsher 1571f7917c00SJeff Kirsher "RxFrames64 ", 1572f7917c00SJeff Kirsher "RxFrames65To127 ", 1573f7917c00SJeff Kirsher "RxFrames128To255 ", 1574f7917c00SJeff Kirsher "RxFrames256To511 ", 1575f7917c00SJeff Kirsher "RxFrames512To1023 ", 1576f7917c00SJeff Kirsher "RxFrames1024To1518 ", 1577f7917c00SJeff Kirsher "RxFrames1519ToMax ", 1578f7917c00SJeff Kirsher 1579f7917c00SJeff Kirsher "PhyFIFOErrors ", 1580f7917c00SJeff Kirsher "TSO ", 1581f7917c00SJeff Kirsher "VLANextractions ", 1582f7917c00SJeff Kirsher "VLANinsertions ", 1583f7917c00SJeff Kirsher "TxCsumOffload ", 1584f7917c00SJeff Kirsher "RxCsumGood ", 1585f7917c00SJeff Kirsher "LroAggregated ", 1586f7917c00SJeff Kirsher "LroFlushed ", 1587f7917c00SJeff Kirsher "LroNoDesc ", 1588f7917c00SJeff Kirsher "RxDrops ", 1589f7917c00SJeff Kirsher 1590f7917c00SJeff Kirsher "CheckTXEnToggled ", 1591f7917c00SJeff Kirsher "CheckResets ", 1592f7917c00SJeff Kirsher 1593f7917c00SJeff Kirsher "LinkFaults ", 1594f7917c00SJeff Kirsher }; 1595f7917c00SJeff Kirsher 1596f7917c00SJeff Kirsher static int get_sset_count(struct net_device *dev, int sset) 1597f7917c00SJeff Kirsher { 1598f7917c00SJeff Kirsher switch (sset) { 1599f7917c00SJeff Kirsher case ETH_SS_STATS: 1600f7917c00SJeff Kirsher return ARRAY_SIZE(stats_strings); 1601f7917c00SJeff Kirsher default: 1602f7917c00SJeff Kirsher return -EOPNOTSUPP; 1603f7917c00SJeff Kirsher } 1604f7917c00SJeff Kirsher } 1605f7917c00SJeff Kirsher 1606f7917c00SJeff Kirsher #define T3_REGMAP_SIZE (3 * 1024) 1607f7917c00SJeff Kirsher 1608f7917c00SJeff Kirsher static int get_regs_len(struct net_device *dev) 1609f7917c00SJeff Kirsher { 1610f7917c00SJeff Kirsher return T3_REGMAP_SIZE; 1611f7917c00SJeff Kirsher } 1612f7917c00SJeff Kirsher 1613f7917c00SJeff Kirsher static int get_eeprom_len(struct net_device *dev) 1614f7917c00SJeff Kirsher { 1615f7917c00SJeff Kirsher return EEPROMSIZE; 1616f7917c00SJeff Kirsher } 1617f7917c00SJeff Kirsher 1618f7917c00SJeff Kirsher static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1619f7917c00SJeff Kirsher { 1620f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 1621f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter; 1622f7917c00SJeff Kirsher u32 fw_vers = 0; 1623f7917c00SJeff Kirsher u32 tp_vers = 0; 1624f7917c00SJeff Kirsher 1625f7917c00SJeff Kirsher spin_lock(&adapter->stats_lock); 1626f7917c00SJeff Kirsher t3_get_fw_version(adapter, &fw_vers); 1627f7917c00SJeff Kirsher t3_get_tp_version(adapter, &tp_vers); 1628f7917c00SJeff Kirsher spin_unlock(&adapter->stats_lock); 1629f7917c00SJeff Kirsher 1630f029c781SWolfram Sang strscpy(info->driver, DRV_NAME, sizeof(info->driver)); 1631f029c781SWolfram Sang strscpy(info->bus_info, pci_name(adapter->pdev), 163223020ab3SRick Jones sizeof(info->bus_info)); 163384b40501SRick Jones if (fw_vers) 1634f7917c00SJeff Kirsher snprintf(info->fw_version, sizeof(info->fw_version), 1635f7917c00SJeff Kirsher "%s %u.%u.%u TP %u.%u.%u", 1636f7917c00SJeff Kirsher G_FW_VERSION_TYPE(fw_vers) ? "T" : "N", 1637f7917c00SJeff Kirsher G_FW_VERSION_MAJOR(fw_vers), 1638f7917c00SJeff Kirsher G_FW_VERSION_MINOR(fw_vers), 1639f7917c00SJeff Kirsher G_FW_VERSION_MICRO(fw_vers), 1640f7917c00SJeff Kirsher G_TP_VERSION_MAJOR(tp_vers), 1641f7917c00SJeff Kirsher G_TP_VERSION_MINOR(tp_vers), 1642f7917c00SJeff Kirsher G_TP_VERSION_MICRO(tp_vers)); 1643f7917c00SJeff Kirsher } 1644f7917c00SJeff Kirsher 1645f7917c00SJeff Kirsher static void get_strings(struct net_device *dev, u32 stringset, u8 * data) 1646f7917c00SJeff Kirsher { 1647f7917c00SJeff Kirsher if (stringset == ETH_SS_STATS) 1648f7917c00SJeff Kirsher memcpy(data, stats_strings, sizeof(stats_strings)); 1649f7917c00SJeff Kirsher } 1650f7917c00SJeff Kirsher 1651f7917c00SJeff Kirsher static unsigned long collect_sge_port_stats(struct adapter *adapter, 1652f7917c00SJeff Kirsher struct port_info *p, int idx) 1653f7917c00SJeff Kirsher { 1654f7917c00SJeff Kirsher int i; 1655f7917c00SJeff Kirsher unsigned long tot = 0; 1656f7917c00SJeff Kirsher 1657f7917c00SJeff Kirsher for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i) 1658f7917c00SJeff Kirsher tot += adapter->sge.qs[i].port_stats[idx]; 1659f7917c00SJeff Kirsher return tot; 1660f7917c00SJeff Kirsher } 1661f7917c00SJeff Kirsher 1662f7917c00SJeff Kirsher static void get_stats(struct net_device *dev, struct ethtool_stats *stats, 1663f7917c00SJeff Kirsher u64 *data) 1664f7917c00SJeff Kirsher { 1665f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 1666f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter; 1667f7917c00SJeff Kirsher const struct mac_stats *s; 1668f7917c00SJeff Kirsher 1669f7917c00SJeff Kirsher spin_lock(&adapter->stats_lock); 1670f7917c00SJeff Kirsher s = t3_mac_update_stats(&pi->mac); 1671f7917c00SJeff Kirsher spin_unlock(&adapter->stats_lock); 1672f7917c00SJeff Kirsher 1673f7917c00SJeff Kirsher *data++ = s->tx_octets; 1674f7917c00SJeff Kirsher *data++ = s->tx_frames; 1675f7917c00SJeff Kirsher *data++ = s->tx_mcast_frames; 1676f7917c00SJeff Kirsher *data++ = s->tx_bcast_frames; 1677f7917c00SJeff Kirsher *data++ = s->tx_pause; 1678f7917c00SJeff Kirsher *data++ = s->tx_underrun; 1679f7917c00SJeff Kirsher *data++ = s->tx_fifo_urun; 1680f7917c00SJeff Kirsher 1681f7917c00SJeff Kirsher *data++ = s->tx_frames_64; 1682f7917c00SJeff Kirsher *data++ = s->tx_frames_65_127; 1683f7917c00SJeff Kirsher *data++ = s->tx_frames_128_255; 1684f7917c00SJeff Kirsher *data++ = s->tx_frames_256_511; 1685f7917c00SJeff Kirsher *data++ = s->tx_frames_512_1023; 1686f7917c00SJeff Kirsher *data++ = s->tx_frames_1024_1518; 1687f7917c00SJeff Kirsher *data++ = s->tx_frames_1519_max; 1688f7917c00SJeff Kirsher 1689f7917c00SJeff Kirsher *data++ = s->rx_octets; 1690f7917c00SJeff Kirsher *data++ = s->rx_frames; 1691f7917c00SJeff Kirsher *data++ = s->rx_mcast_frames; 1692f7917c00SJeff Kirsher *data++ = s->rx_bcast_frames; 1693f7917c00SJeff Kirsher *data++ = s->rx_pause; 1694f7917c00SJeff Kirsher *data++ = s->rx_fcs_errs; 1695f7917c00SJeff Kirsher *data++ = s->rx_symbol_errs; 1696f7917c00SJeff Kirsher *data++ = s->rx_short; 1697f7917c00SJeff Kirsher *data++ = s->rx_jabber; 1698f7917c00SJeff Kirsher *data++ = s->rx_too_long; 1699f7917c00SJeff Kirsher *data++ = s->rx_fifo_ovfl; 1700f7917c00SJeff Kirsher 1701f7917c00SJeff Kirsher *data++ = s->rx_frames_64; 1702f7917c00SJeff Kirsher *data++ = s->rx_frames_65_127; 1703f7917c00SJeff Kirsher *data++ = s->rx_frames_128_255; 1704f7917c00SJeff Kirsher *data++ = s->rx_frames_256_511; 1705f7917c00SJeff Kirsher *data++ = s->rx_frames_512_1023; 1706f7917c00SJeff Kirsher *data++ = s->rx_frames_1024_1518; 1707f7917c00SJeff Kirsher *data++ = s->rx_frames_1519_max; 1708f7917c00SJeff Kirsher 1709f7917c00SJeff Kirsher *data++ = pi->phy.fifo_errors; 1710f7917c00SJeff Kirsher 1711f7917c00SJeff Kirsher *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO); 1712f7917c00SJeff Kirsher *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX); 1713f7917c00SJeff Kirsher *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS); 1714f7917c00SJeff Kirsher *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM); 1715f7917c00SJeff Kirsher *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD); 1716f7917c00SJeff Kirsher *data++ = 0; 1717f7917c00SJeff Kirsher *data++ = 0; 1718f7917c00SJeff Kirsher *data++ = 0; 1719f7917c00SJeff Kirsher *data++ = s->rx_cong_drops; 1720f7917c00SJeff Kirsher 1721f7917c00SJeff Kirsher *data++ = s->num_toggled; 1722f7917c00SJeff Kirsher *data++ = s->num_resets; 1723f7917c00SJeff Kirsher 1724f7917c00SJeff Kirsher *data++ = s->link_faults; 1725f7917c00SJeff Kirsher } 1726f7917c00SJeff Kirsher 1727f7917c00SJeff Kirsher static inline void reg_block_dump(struct adapter *ap, void *buf, 1728f7917c00SJeff Kirsher unsigned int start, unsigned int end) 1729f7917c00SJeff Kirsher { 1730f7917c00SJeff Kirsher u32 *p = buf + start; 1731f7917c00SJeff Kirsher 1732f7917c00SJeff Kirsher for (; start <= end; start += sizeof(u32)) 1733f7917c00SJeff Kirsher *p++ = t3_read_reg(ap, start); 1734f7917c00SJeff Kirsher } 1735f7917c00SJeff Kirsher 1736f7917c00SJeff Kirsher static void get_regs(struct net_device *dev, struct ethtool_regs *regs, 1737f7917c00SJeff Kirsher void *buf) 1738f7917c00SJeff Kirsher { 1739f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 1740f7917c00SJeff Kirsher struct adapter *ap = pi->adapter; 1741f7917c00SJeff Kirsher 1742f7917c00SJeff Kirsher /* 1743f7917c00SJeff Kirsher * Version scheme: 1744f7917c00SJeff Kirsher * bits 0..9: chip version 1745f7917c00SJeff Kirsher * bits 10..15: chip revision 1746f7917c00SJeff Kirsher * bit 31: set for PCIe cards 1747f7917c00SJeff Kirsher */ 1748f7917c00SJeff Kirsher regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31); 1749f7917c00SJeff Kirsher 1750f7917c00SJeff Kirsher /* 1751f7917c00SJeff Kirsher * We skip the MAC statistics registers because they are clear-on-read. 1752f7917c00SJeff Kirsher * Also reading multi-register stats would need to synchronize with the 1753f7917c00SJeff Kirsher * periodic mac stats accumulation. Hard to justify the complexity. 1754f7917c00SJeff Kirsher */ 1755f7917c00SJeff Kirsher memset(buf, 0, T3_REGMAP_SIZE); 1756f7917c00SJeff Kirsher reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN); 1757f7917c00SJeff Kirsher reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT); 1758f7917c00SJeff Kirsher reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE); 1759f7917c00SJeff Kirsher reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA); 1760f7917c00SJeff Kirsher reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3); 1761f7917c00SJeff Kirsher reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0, 1762f7917c00SJeff Kirsher XGM_REG(A_XGM_SERDES_STAT3, 1)); 1763f7917c00SJeff Kirsher reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1), 1764f7917c00SJeff Kirsher XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1)); 1765f7917c00SJeff Kirsher } 1766f7917c00SJeff Kirsher 1767f7917c00SJeff Kirsher static int restart_autoneg(struct net_device *dev) 1768f7917c00SJeff Kirsher { 1769f7917c00SJeff Kirsher struct port_info *p = netdev_priv(dev); 1770f7917c00SJeff Kirsher 1771f7917c00SJeff Kirsher if (!netif_running(dev)) 1772f7917c00SJeff Kirsher return -EAGAIN; 1773f7917c00SJeff Kirsher if (p->link_config.autoneg != AUTONEG_ENABLE) 1774f7917c00SJeff Kirsher return -EINVAL; 1775f7917c00SJeff Kirsher p->phy.ops->autoneg_restart(&p->phy); 1776f7917c00SJeff Kirsher return 0; 1777f7917c00SJeff Kirsher } 1778f7917c00SJeff Kirsher 1779f7917c00SJeff Kirsher static int set_phys_id(struct net_device *dev, 1780f7917c00SJeff Kirsher enum ethtool_phys_id_state state) 1781f7917c00SJeff Kirsher { 1782f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 1783f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter; 1784f7917c00SJeff Kirsher 1785f7917c00SJeff Kirsher switch (state) { 1786f7917c00SJeff Kirsher case ETHTOOL_ID_ACTIVE: 1787f7917c00SJeff Kirsher return 1; /* cycle on/off once per second */ 1788f7917c00SJeff Kirsher 1789f7917c00SJeff Kirsher case ETHTOOL_ID_OFF: 1790f7917c00SJeff Kirsher t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0); 1791f7917c00SJeff Kirsher break; 1792f7917c00SJeff Kirsher 1793f7917c00SJeff Kirsher case ETHTOOL_ID_ON: 1794f7917c00SJeff Kirsher case ETHTOOL_ID_INACTIVE: 1795f7917c00SJeff Kirsher t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 1796f7917c00SJeff Kirsher F_GPIO0_OUT_VAL); 1797f7917c00SJeff Kirsher } 1798f7917c00SJeff Kirsher 1799f7917c00SJeff Kirsher return 0; 1800f7917c00SJeff Kirsher } 1801f7917c00SJeff Kirsher 1802b7b44fd2SPhilippe Reynes static int get_link_ksettings(struct net_device *dev, 1803b7b44fd2SPhilippe Reynes struct ethtool_link_ksettings *cmd) 1804f7917c00SJeff Kirsher { 1805f7917c00SJeff Kirsher struct port_info *p = netdev_priv(dev); 1806b7b44fd2SPhilippe Reynes u32 supported; 1807f7917c00SJeff Kirsher 1808b7b44fd2SPhilippe Reynes ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 1809b7b44fd2SPhilippe Reynes p->link_config.supported); 1810b7b44fd2SPhilippe Reynes ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 1811b7b44fd2SPhilippe Reynes p->link_config.advertising); 1812f7917c00SJeff Kirsher 1813f7917c00SJeff Kirsher if (netif_carrier_ok(dev)) { 1814b7b44fd2SPhilippe Reynes cmd->base.speed = p->link_config.speed; 1815b7b44fd2SPhilippe Reynes cmd->base.duplex = p->link_config.duplex; 1816f7917c00SJeff Kirsher } else { 1817b7b44fd2SPhilippe Reynes cmd->base.speed = SPEED_UNKNOWN; 1818b7b44fd2SPhilippe Reynes cmd->base.duplex = DUPLEX_UNKNOWN; 1819f7917c00SJeff Kirsher } 1820f7917c00SJeff Kirsher 1821b7b44fd2SPhilippe Reynes ethtool_convert_link_mode_to_legacy_u32(&supported, 1822b7b44fd2SPhilippe Reynes cmd->link_modes.supported); 1823b7b44fd2SPhilippe Reynes 1824b7b44fd2SPhilippe Reynes cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; 1825b7b44fd2SPhilippe Reynes cmd->base.phy_address = p->phy.mdio.prtad; 1826b7b44fd2SPhilippe Reynes cmd->base.autoneg = p->link_config.autoneg; 1827f7917c00SJeff Kirsher return 0; 1828f7917c00SJeff Kirsher } 1829f7917c00SJeff Kirsher 1830f7917c00SJeff Kirsher static int speed_duplex_to_caps(int speed, int duplex) 1831f7917c00SJeff Kirsher { 1832f7917c00SJeff Kirsher int cap = 0; 1833f7917c00SJeff Kirsher 1834f7917c00SJeff Kirsher switch (speed) { 1835f7917c00SJeff Kirsher case SPEED_10: 1836f7917c00SJeff Kirsher if (duplex == DUPLEX_FULL) 1837f7917c00SJeff Kirsher cap = SUPPORTED_10baseT_Full; 1838f7917c00SJeff Kirsher else 1839f7917c00SJeff Kirsher cap = SUPPORTED_10baseT_Half; 1840f7917c00SJeff Kirsher break; 1841f7917c00SJeff Kirsher case SPEED_100: 1842f7917c00SJeff Kirsher if (duplex == DUPLEX_FULL) 1843f7917c00SJeff Kirsher cap = SUPPORTED_100baseT_Full; 1844f7917c00SJeff Kirsher else 1845f7917c00SJeff Kirsher cap = SUPPORTED_100baseT_Half; 1846f7917c00SJeff Kirsher break; 1847f7917c00SJeff Kirsher case SPEED_1000: 1848f7917c00SJeff Kirsher if (duplex == DUPLEX_FULL) 1849f7917c00SJeff Kirsher cap = SUPPORTED_1000baseT_Full; 1850f7917c00SJeff Kirsher else 1851f7917c00SJeff Kirsher cap = SUPPORTED_1000baseT_Half; 1852f7917c00SJeff Kirsher break; 1853f7917c00SJeff Kirsher case SPEED_10000: 1854f7917c00SJeff Kirsher if (duplex == DUPLEX_FULL) 1855f7917c00SJeff Kirsher cap = SUPPORTED_10000baseT_Full; 1856f7917c00SJeff Kirsher } 1857f7917c00SJeff Kirsher return cap; 1858f7917c00SJeff Kirsher } 1859f7917c00SJeff Kirsher 1860f7917c00SJeff Kirsher #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \ 1861f7917c00SJeff Kirsher ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \ 1862f7917c00SJeff Kirsher ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \ 1863f7917c00SJeff Kirsher ADVERTISED_10000baseT_Full) 1864f7917c00SJeff Kirsher 1865b7b44fd2SPhilippe Reynes static int set_link_ksettings(struct net_device *dev, 1866b7b44fd2SPhilippe Reynes const struct ethtool_link_ksettings *cmd) 1867f7917c00SJeff Kirsher { 1868f7917c00SJeff Kirsher struct port_info *p = netdev_priv(dev); 1869f7917c00SJeff Kirsher struct link_config *lc = &p->link_config; 1870b7b44fd2SPhilippe Reynes u32 advertising; 1871b7b44fd2SPhilippe Reynes 1872b7b44fd2SPhilippe Reynes ethtool_convert_link_mode_to_legacy_u32(&advertising, 1873b7b44fd2SPhilippe Reynes cmd->link_modes.advertising); 1874f7917c00SJeff Kirsher 1875f7917c00SJeff Kirsher if (!(lc->supported & SUPPORTED_Autoneg)) { 1876f7917c00SJeff Kirsher /* 1877f7917c00SJeff Kirsher * PHY offers a single speed/duplex. See if that's what's 1878f7917c00SJeff Kirsher * being requested. 1879f7917c00SJeff Kirsher */ 1880b7b44fd2SPhilippe Reynes if (cmd->base.autoneg == AUTONEG_DISABLE) { 1881b7b44fd2SPhilippe Reynes u32 speed = cmd->base.speed; 1882b7b44fd2SPhilippe Reynes int cap = speed_duplex_to_caps(speed, cmd->base.duplex); 1883f7917c00SJeff Kirsher if (lc->supported & cap) 1884f7917c00SJeff Kirsher return 0; 1885f7917c00SJeff Kirsher } 1886f7917c00SJeff Kirsher return -EINVAL; 1887f7917c00SJeff Kirsher } 1888f7917c00SJeff Kirsher 1889b7b44fd2SPhilippe Reynes if (cmd->base.autoneg == AUTONEG_DISABLE) { 1890b7b44fd2SPhilippe Reynes u32 speed = cmd->base.speed; 1891b7b44fd2SPhilippe Reynes int cap = speed_duplex_to_caps(speed, cmd->base.duplex); 1892f7917c00SJeff Kirsher 1893f7917c00SJeff Kirsher if (!(lc->supported & cap) || (speed == SPEED_1000)) 1894f7917c00SJeff Kirsher return -EINVAL; 1895f7917c00SJeff Kirsher lc->requested_speed = speed; 1896b7b44fd2SPhilippe Reynes lc->requested_duplex = cmd->base.duplex; 1897f7917c00SJeff Kirsher lc->advertising = 0; 1898f7917c00SJeff Kirsher } else { 1899b7b44fd2SPhilippe Reynes advertising &= ADVERTISED_MASK; 1900b7b44fd2SPhilippe Reynes advertising &= lc->supported; 1901b7b44fd2SPhilippe Reynes if (!advertising) 1902f7917c00SJeff Kirsher return -EINVAL; 1903f7917c00SJeff Kirsher lc->requested_speed = SPEED_INVALID; 1904f7917c00SJeff Kirsher lc->requested_duplex = DUPLEX_INVALID; 1905b7b44fd2SPhilippe Reynes lc->advertising = advertising | ADVERTISED_Autoneg; 1906f7917c00SJeff Kirsher } 1907b7b44fd2SPhilippe Reynes lc->autoneg = cmd->base.autoneg; 1908f7917c00SJeff Kirsher if (netif_running(dev)) 1909f7917c00SJeff Kirsher t3_link_start(&p->phy, &p->mac, lc); 1910f7917c00SJeff Kirsher return 0; 1911f7917c00SJeff Kirsher } 1912f7917c00SJeff Kirsher 1913f7917c00SJeff Kirsher static void get_pauseparam(struct net_device *dev, 1914f7917c00SJeff Kirsher struct ethtool_pauseparam *epause) 1915f7917c00SJeff Kirsher { 1916f7917c00SJeff Kirsher struct port_info *p = netdev_priv(dev); 1917f7917c00SJeff Kirsher 1918f7917c00SJeff Kirsher epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0; 1919f7917c00SJeff Kirsher epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0; 1920f7917c00SJeff Kirsher epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0; 1921f7917c00SJeff Kirsher } 1922f7917c00SJeff Kirsher 1923f7917c00SJeff Kirsher static int set_pauseparam(struct net_device *dev, 1924f7917c00SJeff Kirsher struct ethtool_pauseparam *epause) 1925f7917c00SJeff Kirsher { 1926f7917c00SJeff Kirsher struct port_info *p = netdev_priv(dev); 1927f7917c00SJeff Kirsher struct link_config *lc = &p->link_config; 1928f7917c00SJeff Kirsher 1929f7917c00SJeff Kirsher if (epause->autoneg == AUTONEG_DISABLE) 1930f7917c00SJeff Kirsher lc->requested_fc = 0; 1931f7917c00SJeff Kirsher else if (lc->supported & SUPPORTED_Autoneg) 1932f7917c00SJeff Kirsher lc->requested_fc = PAUSE_AUTONEG; 1933f7917c00SJeff Kirsher else 1934f7917c00SJeff Kirsher return -EINVAL; 1935f7917c00SJeff Kirsher 1936f7917c00SJeff Kirsher if (epause->rx_pause) 1937f7917c00SJeff Kirsher lc->requested_fc |= PAUSE_RX; 1938f7917c00SJeff Kirsher if (epause->tx_pause) 1939f7917c00SJeff Kirsher lc->requested_fc |= PAUSE_TX; 1940f7917c00SJeff Kirsher if (lc->autoneg == AUTONEG_ENABLE) { 1941f7917c00SJeff Kirsher if (netif_running(dev)) 1942f7917c00SJeff Kirsher t3_link_start(&p->phy, &p->mac, lc); 1943f7917c00SJeff Kirsher } else { 1944f7917c00SJeff Kirsher lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 1945f7917c00SJeff Kirsher if (netif_running(dev)) 1946f7917c00SJeff Kirsher t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc); 1947f7917c00SJeff Kirsher } 1948f7917c00SJeff Kirsher return 0; 1949f7917c00SJeff Kirsher } 1950f7917c00SJeff Kirsher 195174624944SHao Chen static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e, 195274624944SHao Chen struct kernel_ethtool_ringparam *kernel_e, 195374624944SHao Chen struct netlink_ext_ack *extack) 1954f7917c00SJeff Kirsher { 1955f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 1956f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter; 1957f7917c00SJeff Kirsher const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset]; 1958f7917c00SJeff Kirsher 1959f7917c00SJeff Kirsher e->rx_max_pending = MAX_RX_BUFFERS; 1960f7917c00SJeff Kirsher e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS; 1961f7917c00SJeff Kirsher e->tx_max_pending = MAX_TXQ_ENTRIES; 1962f7917c00SJeff Kirsher 1963f7917c00SJeff Kirsher e->rx_pending = q->fl_size; 1964f7917c00SJeff Kirsher e->rx_mini_pending = q->rspq_size; 1965f7917c00SJeff Kirsher e->rx_jumbo_pending = q->jumbo_size; 1966f7917c00SJeff Kirsher e->tx_pending = q->txq_size[0]; 1967f7917c00SJeff Kirsher } 1968f7917c00SJeff Kirsher 196974624944SHao Chen static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e, 197074624944SHao Chen struct kernel_ethtool_ringparam *kernel_e, 197174624944SHao Chen struct netlink_ext_ack *extack) 1972f7917c00SJeff Kirsher { 1973f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 1974f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter; 1975f7917c00SJeff Kirsher struct qset_params *q; 1976f7917c00SJeff Kirsher int i; 1977f7917c00SJeff Kirsher 1978f7917c00SJeff Kirsher if (e->rx_pending > MAX_RX_BUFFERS || 1979f7917c00SJeff Kirsher e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS || 1980f7917c00SJeff Kirsher e->tx_pending > MAX_TXQ_ENTRIES || 1981f7917c00SJeff Kirsher e->rx_mini_pending > MAX_RSPQ_ENTRIES || 1982f7917c00SJeff Kirsher e->rx_mini_pending < MIN_RSPQ_ENTRIES || 1983f7917c00SJeff Kirsher e->rx_pending < MIN_FL_ENTRIES || 1984f7917c00SJeff Kirsher e->rx_jumbo_pending < MIN_FL_ENTRIES || 1985f7917c00SJeff Kirsher e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES) 1986f7917c00SJeff Kirsher return -EINVAL; 1987f7917c00SJeff Kirsher 1988f7917c00SJeff Kirsher if (adapter->flags & FULL_INIT_DONE) 1989f7917c00SJeff Kirsher return -EBUSY; 1990f7917c00SJeff Kirsher 1991f7917c00SJeff Kirsher q = &adapter->params.sge.qset[pi->first_qset]; 1992f7917c00SJeff Kirsher for (i = 0; i < pi->nqsets; ++i, ++q) { 1993f7917c00SJeff Kirsher q->rspq_size = e->rx_mini_pending; 1994f7917c00SJeff Kirsher q->fl_size = e->rx_pending; 1995f7917c00SJeff Kirsher q->jumbo_size = e->rx_jumbo_pending; 1996f7917c00SJeff Kirsher q->txq_size[0] = e->tx_pending; 1997f7917c00SJeff Kirsher q->txq_size[1] = e->tx_pending; 1998f7917c00SJeff Kirsher q->txq_size[2] = e->tx_pending; 1999f7917c00SJeff Kirsher } 2000f7917c00SJeff Kirsher return 0; 2001f7917c00SJeff Kirsher } 2002f7917c00SJeff Kirsher 2003f3ccfda1SYufeng Mo static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c, 2004f3ccfda1SYufeng Mo struct kernel_ethtool_coalesce *kernel_coal, 2005f3ccfda1SYufeng Mo struct netlink_ext_ack *extack) 2006f7917c00SJeff Kirsher { 2007f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 2008f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter; 2009f7917c00SJeff Kirsher struct qset_params *qsp; 2010f7917c00SJeff Kirsher struct sge_qset *qs; 2011f7917c00SJeff Kirsher int i; 2012f7917c00SJeff Kirsher 2013f7917c00SJeff Kirsher if (c->rx_coalesce_usecs * 10 > M_NEWTIMER) 2014f7917c00SJeff Kirsher return -EINVAL; 2015f7917c00SJeff Kirsher 2016f7917c00SJeff Kirsher for (i = 0; i < pi->nqsets; i++) { 2017f7917c00SJeff Kirsher qsp = &adapter->params.sge.qset[i]; 2018f7917c00SJeff Kirsher qs = &adapter->sge.qs[i]; 2019f7917c00SJeff Kirsher qsp->coalesce_usecs = c->rx_coalesce_usecs; 2020f7917c00SJeff Kirsher t3_update_qset_coalesce(qs, qsp); 2021f7917c00SJeff Kirsher } 2022f7917c00SJeff Kirsher 2023f7917c00SJeff Kirsher return 0; 2024f7917c00SJeff Kirsher } 2025f7917c00SJeff Kirsher 2026f3ccfda1SYufeng Mo static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c, 2027f3ccfda1SYufeng Mo struct kernel_ethtool_coalesce *kernel_coal, 2028f3ccfda1SYufeng Mo struct netlink_ext_ack *extack) 2029f7917c00SJeff Kirsher { 2030f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 2031f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter; 2032f7917c00SJeff Kirsher struct qset_params *q = adapter->params.sge.qset; 2033f7917c00SJeff Kirsher 2034f7917c00SJeff Kirsher c->rx_coalesce_usecs = q->coalesce_usecs; 2035f7917c00SJeff Kirsher return 0; 2036f7917c00SJeff Kirsher } 2037f7917c00SJeff Kirsher 2038f7917c00SJeff Kirsher static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, 2039f7917c00SJeff Kirsher u8 * data) 2040f7917c00SJeff Kirsher { 2041f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 2042f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter; 204348225f18SHeiner Kallweit int cnt; 2044f7917c00SJeff Kirsher 2045f7917c00SJeff Kirsher e->magic = EEPROM_MAGIC; 204648225f18SHeiner Kallweit cnt = pci_read_vpd(adapter->pdev, e->offset, e->len, data); 204748225f18SHeiner Kallweit if (cnt < 0) 204848225f18SHeiner Kallweit return cnt; 2049f7917c00SJeff Kirsher 205048225f18SHeiner Kallweit e->len = cnt; 205148225f18SHeiner Kallweit 205248225f18SHeiner Kallweit return 0; 2053f7917c00SJeff Kirsher } 2054f7917c00SJeff Kirsher 2055f7917c00SJeff Kirsher static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, 2056f7917c00SJeff Kirsher u8 * data) 2057f7917c00SJeff Kirsher { 2058f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 2059f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter; 2060f7917c00SJeff Kirsher u32 aligned_offset, aligned_len; 2061f7917c00SJeff Kirsher u8 *buf; 2062f7917c00SJeff Kirsher int err; 2063f7917c00SJeff Kirsher 2064f7917c00SJeff Kirsher if (eeprom->magic != EEPROM_MAGIC) 2065f7917c00SJeff Kirsher return -EINVAL; 2066f7917c00SJeff Kirsher 2067f7917c00SJeff Kirsher aligned_offset = eeprom->offset & ~3; 2068f7917c00SJeff Kirsher aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; 2069f7917c00SJeff Kirsher 2070f7917c00SJeff Kirsher if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { 2071f7917c00SJeff Kirsher buf = kmalloc(aligned_len, GFP_KERNEL); 2072f7917c00SJeff Kirsher if (!buf) 2073f7917c00SJeff Kirsher return -ENOMEM; 207448225f18SHeiner Kallweit err = pci_read_vpd(adapter->pdev, aligned_offset, aligned_len, 207548225f18SHeiner Kallweit buf); 207648225f18SHeiner Kallweit if (err < 0) 2077f7917c00SJeff Kirsher goto out; 2078f7917c00SJeff Kirsher memcpy(buf + (eeprom->offset & 3), data, eeprom->len); 2079f7917c00SJeff Kirsher } else 2080f7917c00SJeff Kirsher buf = data; 2081f7917c00SJeff Kirsher 2082f7917c00SJeff Kirsher err = t3_seeprom_wp(adapter, 0); 2083f7917c00SJeff Kirsher if (err) 2084f7917c00SJeff Kirsher goto out; 2085f7917c00SJeff Kirsher 208678b5d5c9SHeiner Kallweit err = pci_write_vpd(adapter->pdev, aligned_offset, aligned_len, buf); 208778b5d5c9SHeiner Kallweit if (err >= 0) 2088f7917c00SJeff Kirsher err = t3_seeprom_wp(adapter, 1); 2089f7917c00SJeff Kirsher out: 2090f7917c00SJeff Kirsher if (buf != data) 2091f7917c00SJeff Kirsher kfree(buf); 209278b5d5c9SHeiner Kallweit return err < 0 ? err : 0; 2093f7917c00SJeff Kirsher } 2094f7917c00SJeff Kirsher 2095f7917c00SJeff Kirsher static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2096f7917c00SJeff Kirsher { 2097f7917c00SJeff Kirsher wol->supported = 0; 2098f7917c00SJeff Kirsher wol->wolopts = 0; 2099f7917c00SJeff Kirsher memset(&wol->sopass, 0, sizeof(wol->sopass)); 2100f7917c00SJeff Kirsher } 2101f7917c00SJeff Kirsher 2102f7917c00SJeff Kirsher static const struct ethtool_ops cxgb_ethtool_ops = { 210362923b6aSJakub Kicinski .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS, 2104f7917c00SJeff Kirsher .get_drvinfo = get_drvinfo, 2105f7917c00SJeff Kirsher .get_msglevel = get_msglevel, 2106f7917c00SJeff Kirsher .set_msglevel = set_msglevel, 2107f7917c00SJeff Kirsher .get_ringparam = get_sge_param, 2108f7917c00SJeff Kirsher .set_ringparam = set_sge_param, 2109f7917c00SJeff Kirsher .get_coalesce = get_coalesce, 2110f7917c00SJeff Kirsher .set_coalesce = set_coalesce, 2111f7917c00SJeff Kirsher .get_eeprom_len = get_eeprom_len, 2112f7917c00SJeff Kirsher .get_eeprom = get_eeprom, 2113f7917c00SJeff Kirsher .set_eeprom = set_eeprom, 2114f7917c00SJeff Kirsher .get_pauseparam = get_pauseparam, 2115f7917c00SJeff Kirsher .set_pauseparam = set_pauseparam, 2116f7917c00SJeff Kirsher .get_link = ethtool_op_get_link, 2117f7917c00SJeff Kirsher .get_strings = get_strings, 2118f7917c00SJeff Kirsher .set_phys_id = set_phys_id, 2119f7917c00SJeff Kirsher .nway_reset = restart_autoneg, 2120f7917c00SJeff Kirsher .get_sset_count = get_sset_count, 2121f7917c00SJeff Kirsher .get_ethtool_stats = get_stats, 2122f7917c00SJeff Kirsher .get_regs_len = get_regs_len, 2123f7917c00SJeff Kirsher .get_regs = get_regs, 2124f7917c00SJeff Kirsher .get_wol = get_wol, 2125b7b44fd2SPhilippe Reynes .get_link_ksettings = get_link_ksettings, 2126b7b44fd2SPhilippe Reynes .set_link_ksettings = set_link_ksettings, 2127f7917c00SJeff Kirsher }; 2128f7917c00SJeff Kirsher 2129f7917c00SJeff Kirsher static int in_range(int val, int lo, int hi) 2130f7917c00SJeff Kirsher { 2131f7917c00SJeff Kirsher return val < 0 || (val <= hi && val >= lo); 2132f7917c00SJeff Kirsher } 2133f7917c00SJeff Kirsher 2134ebb4a911SArnd Bergmann static int cxgb_siocdevprivate(struct net_device *dev, 2135ebb4a911SArnd Bergmann struct ifreq *ifreq, 2136ebb4a911SArnd Bergmann void __user *useraddr, 2137ebb4a911SArnd Bergmann int cmd) 2138f7917c00SJeff Kirsher { 2139f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 2140f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter; 2141f7917c00SJeff Kirsher int ret; 2142f7917c00SJeff Kirsher 2143ebb4a911SArnd Bergmann if (cmd != SIOCCHIOCTL) 2144ebb4a911SArnd Bergmann return -EOPNOTSUPP; 2145ebb4a911SArnd Bergmann 2146f7917c00SJeff Kirsher if (copy_from_user(&cmd, useraddr, sizeof(cmd))) 2147f7917c00SJeff Kirsher return -EFAULT; 2148f7917c00SJeff Kirsher 2149f7917c00SJeff Kirsher switch (cmd) { 2150f7917c00SJeff Kirsher case CHELSIO_SET_QSET_PARAMS:{ 2151f7917c00SJeff Kirsher int i; 2152f7917c00SJeff Kirsher struct qset_params *q; 2153f7917c00SJeff Kirsher struct ch_qset_params t; 2154f7917c00SJeff Kirsher int q1 = pi->first_qset; 2155f7917c00SJeff Kirsher int nqsets = pi->nqsets; 2156f7917c00SJeff Kirsher 2157f7917c00SJeff Kirsher if (!capable(CAP_NET_ADMIN)) 2158f7917c00SJeff Kirsher return -EPERM; 2159f7917c00SJeff Kirsher if (copy_from_user(&t, useraddr, sizeof(t))) 2160f7917c00SJeff Kirsher return -EFAULT; 21612c05d888SWenwen Wang if (t.cmd != CHELSIO_SET_QSET_PARAMS) 21622c05d888SWenwen Wang return -EINVAL; 2163f7917c00SJeff Kirsher if (t.qset_idx >= SGE_QSETS) 2164f7917c00SJeff Kirsher return -EINVAL; 2165f7917c00SJeff Kirsher if (!in_range(t.intr_lat, 0, M_NEWTIMER) || 2166f7917c00SJeff Kirsher !in_range(t.cong_thres, 0, 255) || 2167f7917c00SJeff Kirsher !in_range(t.txq_size[0], MIN_TXQ_ENTRIES, 2168f7917c00SJeff Kirsher MAX_TXQ_ENTRIES) || 2169f7917c00SJeff Kirsher !in_range(t.txq_size[1], MIN_TXQ_ENTRIES, 2170f7917c00SJeff Kirsher MAX_TXQ_ENTRIES) || 2171f7917c00SJeff Kirsher !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES, 2172f7917c00SJeff Kirsher MAX_CTRL_TXQ_ENTRIES) || 2173f7917c00SJeff Kirsher !in_range(t.fl_size[0], MIN_FL_ENTRIES, 2174f7917c00SJeff Kirsher MAX_RX_BUFFERS) || 2175f7917c00SJeff Kirsher !in_range(t.fl_size[1], MIN_FL_ENTRIES, 2176f7917c00SJeff Kirsher MAX_RX_JUMBO_BUFFERS) || 2177f7917c00SJeff Kirsher !in_range(t.rspq_size, MIN_RSPQ_ENTRIES, 2178f7917c00SJeff Kirsher MAX_RSPQ_ENTRIES)) 2179f7917c00SJeff Kirsher return -EINVAL; 2180f7917c00SJeff Kirsher 2181f7917c00SJeff Kirsher if ((adapter->flags & FULL_INIT_DONE) && 2182f7917c00SJeff Kirsher (t.rspq_size >= 0 || t.fl_size[0] >= 0 || 2183f7917c00SJeff Kirsher t.fl_size[1] >= 0 || t.txq_size[0] >= 0 || 2184f7917c00SJeff Kirsher t.txq_size[1] >= 0 || t.txq_size[2] >= 0 || 2185f7917c00SJeff Kirsher t.polling >= 0 || t.cong_thres >= 0)) 2186f7917c00SJeff Kirsher return -EBUSY; 2187f7917c00SJeff Kirsher 2188f7917c00SJeff Kirsher /* Allow setting of any available qset when offload enabled */ 2189f7917c00SJeff Kirsher if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { 2190f7917c00SJeff Kirsher q1 = 0; 2191f7917c00SJeff Kirsher for_each_port(adapter, i) { 2192f7917c00SJeff Kirsher pi = adap2pinfo(adapter, i); 2193f7917c00SJeff Kirsher nqsets += pi->first_qset + pi->nqsets; 2194f7917c00SJeff Kirsher } 2195f7917c00SJeff Kirsher } 2196f7917c00SJeff Kirsher 2197f7917c00SJeff Kirsher if (t.qset_idx < q1) 2198f7917c00SJeff Kirsher return -EINVAL; 2199f7917c00SJeff Kirsher if (t.qset_idx > q1 + nqsets - 1) 2200f7917c00SJeff Kirsher return -EINVAL; 2201f7917c00SJeff Kirsher 2202f7917c00SJeff Kirsher q = &adapter->params.sge.qset[t.qset_idx]; 2203f7917c00SJeff Kirsher 2204f7917c00SJeff Kirsher if (t.rspq_size >= 0) 2205f7917c00SJeff Kirsher q->rspq_size = t.rspq_size; 2206f7917c00SJeff Kirsher if (t.fl_size[0] >= 0) 2207f7917c00SJeff Kirsher q->fl_size = t.fl_size[0]; 2208f7917c00SJeff Kirsher if (t.fl_size[1] >= 0) 2209f7917c00SJeff Kirsher q->jumbo_size = t.fl_size[1]; 2210f7917c00SJeff Kirsher if (t.txq_size[0] >= 0) 2211f7917c00SJeff Kirsher q->txq_size[0] = t.txq_size[0]; 2212f7917c00SJeff Kirsher if (t.txq_size[1] >= 0) 2213f7917c00SJeff Kirsher q->txq_size[1] = t.txq_size[1]; 2214f7917c00SJeff Kirsher if (t.txq_size[2] >= 0) 2215f7917c00SJeff Kirsher q->txq_size[2] = t.txq_size[2]; 2216f7917c00SJeff Kirsher if (t.cong_thres >= 0) 2217f7917c00SJeff Kirsher q->cong_thres = t.cong_thres; 2218f7917c00SJeff Kirsher if (t.intr_lat >= 0) { 2219f7917c00SJeff Kirsher struct sge_qset *qs = 2220f7917c00SJeff Kirsher &adapter->sge.qs[t.qset_idx]; 2221f7917c00SJeff Kirsher 2222f7917c00SJeff Kirsher q->coalesce_usecs = t.intr_lat; 2223f7917c00SJeff Kirsher t3_update_qset_coalesce(qs, q); 2224f7917c00SJeff Kirsher } 2225f7917c00SJeff Kirsher if (t.polling >= 0) { 2226f7917c00SJeff Kirsher if (adapter->flags & USING_MSIX) 2227f7917c00SJeff Kirsher q->polling = t.polling; 2228f7917c00SJeff Kirsher else { 2229f7917c00SJeff Kirsher /* No polling with INTx for T3A */ 2230f7917c00SJeff Kirsher if (adapter->params.rev == 0 && 2231f7917c00SJeff Kirsher !(adapter->flags & USING_MSI)) 2232f7917c00SJeff Kirsher t.polling = 0; 2233f7917c00SJeff Kirsher 2234f7917c00SJeff Kirsher for (i = 0; i < SGE_QSETS; i++) { 2235f7917c00SJeff Kirsher q = &adapter->params.sge. 2236f7917c00SJeff Kirsher qset[i]; 2237f7917c00SJeff Kirsher q->polling = t.polling; 2238f7917c00SJeff Kirsher } 2239f7917c00SJeff Kirsher } 2240f7917c00SJeff Kirsher } 2241f7917c00SJeff Kirsher 2242f7917c00SJeff Kirsher if (t.lro >= 0) { 2243f7917c00SJeff Kirsher if (t.lro) 2244f7917c00SJeff Kirsher dev->wanted_features |= NETIF_F_GRO; 2245f7917c00SJeff Kirsher else 2246f7917c00SJeff Kirsher dev->wanted_features &= ~NETIF_F_GRO; 2247f7917c00SJeff Kirsher netdev_update_features(dev); 2248f7917c00SJeff Kirsher } 2249f7917c00SJeff Kirsher 2250f7917c00SJeff Kirsher break; 2251f7917c00SJeff Kirsher } 2252f7917c00SJeff Kirsher case CHELSIO_GET_QSET_PARAMS:{ 2253f7917c00SJeff Kirsher struct qset_params *q; 2254f7917c00SJeff Kirsher struct ch_qset_params t; 2255f7917c00SJeff Kirsher int q1 = pi->first_qset; 2256f7917c00SJeff Kirsher int nqsets = pi->nqsets; 2257f7917c00SJeff Kirsher int i; 2258f7917c00SJeff Kirsher 2259f7917c00SJeff Kirsher if (copy_from_user(&t, useraddr, sizeof(t))) 2260f7917c00SJeff Kirsher return -EFAULT; 2261f7917c00SJeff Kirsher 22622c05d888SWenwen Wang if (t.cmd != CHELSIO_GET_QSET_PARAMS) 22632c05d888SWenwen Wang return -EINVAL; 22642c05d888SWenwen Wang 2265f7917c00SJeff Kirsher /* Display qsets for all ports when offload enabled */ 2266f7917c00SJeff Kirsher if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { 2267f7917c00SJeff Kirsher q1 = 0; 2268f7917c00SJeff Kirsher for_each_port(adapter, i) { 2269f7917c00SJeff Kirsher pi = adap2pinfo(adapter, i); 2270f7917c00SJeff Kirsher nqsets = pi->first_qset + pi->nqsets; 2271f7917c00SJeff Kirsher } 2272f7917c00SJeff Kirsher } 2273f7917c00SJeff Kirsher 2274f7917c00SJeff Kirsher if (t.qset_idx >= nqsets) 2275f7917c00SJeff Kirsher return -EINVAL; 2276676bcfecSGustavo A. R. Silva t.qset_idx = array_index_nospec(t.qset_idx, nqsets); 2277f7917c00SJeff Kirsher 2278f7917c00SJeff Kirsher q = &adapter->params.sge.qset[q1 + t.qset_idx]; 2279f7917c00SJeff Kirsher t.rspq_size = q->rspq_size; 2280f7917c00SJeff Kirsher t.txq_size[0] = q->txq_size[0]; 2281f7917c00SJeff Kirsher t.txq_size[1] = q->txq_size[1]; 2282f7917c00SJeff Kirsher t.txq_size[2] = q->txq_size[2]; 2283f7917c00SJeff Kirsher t.fl_size[0] = q->fl_size; 2284f7917c00SJeff Kirsher t.fl_size[1] = q->jumbo_size; 2285f7917c00SJeff Kirsher t.polling = q->polling; 2286f7917c00SJeff Kirsher t.lro = !!(dev->features & NETIF_F_GRO); 2287f7917c00SJeff Kirsher t.intr_lat = q->coalesce_usecs; 2288f7917c00SJeff Kirsher t.cong_thres = q->cong_thres; 2289f7917c00SJeff Kirsher t.qnum = q1; 2290f7917c00SJeff Kirsher 2291f7917c00SJeff Kirsher if (adapter->flags & USING_MSIX) 2292f7917c00SJeff Kirsher t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec; 2293f7917c00SJeff Kirsher else 2294f7917c00SJeff Kirsher t.vector = adapter->pdev->irq; 2295f7917c00SJeff Kirsher 2296f7917c00SJeff Kirsher if (copy_to_user(useraddr, &t, sizeof(t))) 2297f7917c00SJeff Kirsher return -EFAULT; 2298f7917c00SJeff Kirsher break; 2299f7917c00SJeff Kirsher } 2300f7917c00SJeff Kirsher case CHELSIO_SET_QSET_NUM:{ 2301f7917c00SJeff Kirsher struct ch_reg edata; 2302f7917c00SJeff Kirsher unsigned int i, first_qset = 0, other_qsets = 0; 2303f7917c00SJeff Kirsher 2304f7917c00SJeff Kirsher if (!capable(CAP_NET_ADMIN)) 2305f7917c00SJeff Kirsher return -EPERM; 2306f7917c00SJeff Kirsher if (adapter->flags & FULL_INIT_DONE) 2307f7917c00SJeff Kirsher return -EBUSY; 2308f7917c00SJeff Kirsher if (copy_from_user(&edata, useraddr, sizeof(edata))) 2309f7917c00SJeff Kirsher return -EFAULT; 23102c05d888SWenwen Wang if (edata.cmd != CHELSIO_SET_QSET_NUM) 23112c05d888SWenwen Wang return -EINVAL; 2312f7917c00SJeff Kirsher if (edata.val < 1 || 2313f7917c00SJeff Kirsher (edata.val > 1 && !(adapter->flags & USING_MSIX))) 2314f7917c00SJeff Kirsher return -EINVAL; 2315f7917c00SJeff Kirsher 2316f7917c00SJeff Kirsher for_each_port(adapter, i) 2317f7917c00SJeff Kirsher if (adapter->port[i] && adapter->port[i] != dev) 2318f7917c00SJeff Kirsher other_qsets += adap2pinfo(adapter, i)->nqsets; 2319f7917c00SJeff Kirsher 2320f7917c00SJeff Kirsher if (edata.val + other_qsets > SGE_QSETS) 2321f7917c00SJeff Kirsher return -EINVAL; 2322f7917c00SJeff Kirsher 2323f7917c00SJeff Kirsher pi->nqsets = edata.val; 2324f7917c00SJeff Kirsher 2325f7917c00SJeff Kirsher for_each_port(adapter, i) 2326f7917c00SJeff Kirsher if (adapter->port[i]) { 2327f7917c00SJeff Kirsher pi = adap2pinfo(adapter, i); 2328f7917c00SJeff Kirsher pi->first_qset = first_qset; 2329f7917c00SJeff Kirsher first_qset += pi->nqsets; 2330f7917c00SJeff Kirsher } 2331f7917c00SJeff Kirsher break; 2332f7917c00SJeff Kirsher } 2333f7917c00SJeff Kirsher case CHELSIO_GET_QSET_NUM:{ 2334f7917c00SJeff Kirsher struct ch_reg edata; 2335f7917c00SJeff Kirsher 2336f7917c00SJeff Kirsher memset(&edata, 0, sizeof(struct ch_reg)); 2337f7917c00SJeff Kirsher 2338f7917c00SJeff Kirsher edata.cmd = CHELSIO_GET_QSET_NUM; 2339f7917c00SJeff Kirsher edata.val = pi->nqsets; 2340f7917c00SJeff Kirsher if (copy_to_user(useraddr, &edata, sizeof(edata))) 2341f7917c00SJeff Kirsher return -EFAULT; 2342f7917c00SJeff Kirsher break; 2343f7917c00SJeff Kirsher } 2344f7917c00SJeff Kirsher case CHELSIO_LOAD_FW:{ 2345f7917c00SJeff Kirsher u8 *fw_data; 2346f7917c00SJeff Kirsher struct ch_mem_range t; 2347f7917c00SJeff Kirsher 2348f7917c00SJeff Kirsher if (!capable(CAP_SYS_RAWIO)) 2349f7917c00SJeff Kirsher return -EPERM; 2350f7917c00SJeff Kirsher if (copy_from_user(&t, useraddr, sizeof(t))) 2351f7917c00SJeff Kirsher return -EFAULT; 23522c05d888SWenwen Wang if (t.cmd != CHELSIO_LOAD_FW) 23532c05d888SWenwen Wang return -EINVAL; 2354f7917c00SJeff Kirsher /* Check t.len sanity ? */ 2355f7917c00SJeff Kirsher fw_data = memdup_user(useraddr + sizeof(t), t.len); 2356f7917c00SJeff Kirsher if (IS_ERR(fw_data)) 2357f7917c00SJeff Kirsher return PTR_ERR(fw_data); 2358f7917c00SJeff Kirsher 2359f7917c00SJeff Kirsher ret = t3_load_fw(adapter, fw_data, t.len); 2360f7917c00SJeff Kirsher kfree(fw_data); 2361f7917c00SJeff Kirsher if (ret) 2362f7917c00SJeff Kirsher return ret; 2363f7917c00SJeff Kirsher break; 2364f7917c00SJeff Kirsher } 2365f7917c00SJeff Kirsher case CHELSIO_SETMTUTAB:{ 2366f7917c00SJeff Kirsher struct ch_mtus m; 2367f7917c00SJeff Kirsher int i; 2368f7917c00SJeff Kirsher 2369f7917c00SJeff Kirsher if (!is_offload(adapter)) 2370f7917c00SJeff Kirsher return -EOPNOTSUPP; 2371f7917c00SJeff Kirsher if (!capable(CAP_NET_ADMIN)) 2372f7917c00SJeff Kirsher return -EPERM; 2373f7917c00SJeff Kirsher if (offload_running(adapter)) 2374f7917c00SJeff Kirsher return -EBUSY; 2375f7917c00SJeff Kirsher if (copy_from_user(&m, useraddr, sizeof(m))) 2376f7917c00SJeff Kirsher return -EFAULT; 23772c05d888SWenwen Wang if (m.cmd != CHELSIO_SETMTUTAB) 23782c05d888SWenwen Wang return -EINVAL; 2379f7917c00SJeff Kirsher if (m.nmtus != NMTUS) 2380f7917c00SJeff Kirsher return -EINVAL; 2381f7917c00SJeff Kirsher if (m.mtus[0] < 81) /* accommodate SACK */ 2382f7917c00SJeff Kirsher return -EINVAL; 2383f7917c00SJeff Kirsher 2384f7917c00SJeff Kirsher /* MTUs must be in ascending order */ 2385f7917c00SJeff Kirsher for (i = 1; i < NMTUS; ++i) 2386f7917c00SJeff Kirsher if (m.mtus[i] < m.mtus[i - 1]) 2387f7917c00SJeff Kirsher return -EINVAL; 2388f7917c00SJeff Kirsher 2389f7917c00SJeff Kirsher memcpy(adapter->params.mtus, m.mtus, 2390f7917c00SJeff Kirsher sizeof(adapter->params.mtus)); 2391f7917c00SJeff Kirsher break; 2392f7917c00SJeff Kirsher } 2393f7917c00SJeff Kirsher case CHELSIO_GET_PM:{ 2394f7917c00SJeff Kirsher struct tp_params *p = &adapter->params.tp; 2395f7917c00SJeff Kirsher struct ch_pm m = {.cmd = CHELSIO_GET_PM }; 2396f7917c00SJeff Kirsher 2397f7917c00SJeff Kirsher if (!is_offload(adapter)) 2398f7917c00SJeff Kirsher return -EOPNOTSUPP; 2399f7917c00SJeff Kirsher m.tx_pg_sz = p->tx_pg_size; 2400f7917c00SJeff Kirsher m.tx_num_pg = p->tx_num_pgs; 2401f7917c00SJeff Kirsher m.rx_pg_sz = p->rx_pg_size; 2402f7917c00SJeff Kirsher m.rx_num_pg = p->rx_num_pgs; 2403f7917c00SJeff Kirsher m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan; 2404f7917c00SJeff Kirsher if (copy_to_user(useraddr, &m, sizeof(m))) 2405f7917c00SJeff Kirsher return -EFAULT; 2406f7917c00SJeff Kirsher break; 2407f7917c00SJeff Kirsher } 2408f7917c00SJeff Kirsher case CHELSIO_SET_PM:{ 2409f7917c00SJeff Kirsher struct ch_pm m; 2410f7917c00SJeff Kirsher struct tp_params *p = &adapter->params.tp; 2411f7917c00SJeff Kirsher 2412f7917c00SJeff Kirsher if (!is_offload(adapter)) 2413f7917c00SJeff Kirsher return -EOPNOTSUPP; 2414f7917c00SJeff Kirsher if (!capable(CAP_NET_ADMIN)) 2415f7917c00SJeff Kirsher return -EPERM; 2416f7917c00SJeff Kirsher if (adapter->flags & FULL_INIT_DONE) 2417f7917c00SJeff Kirsher return -EBUSY; 2418f7917c00SJeff Kirsher if (copy_from_user(&m, useraddr, sizeof(m))) 2419f7917c00SJeff Kirsher return -EFAULT; 24202c05d888SWenwen Wang if (m.cmd != CHELSIO_SET_PM) 24212c05d888SWenwen Wang return -EINVAL; 2422f7917c00SJeff Kirsher if (!is_power_of_2(m.rx_pg_sz) || 2423f7917c00SJeff Kirsher !is_power_of_2(m.tx_pg_sz)) 2424f7917c00SJeff Kirsher return -EINVAL; /* not power of 2 */ 2425f7917c00SJeff Kirsher if (!(m.rx_pg_sz & 0x14000)) 2426f7917c00SJeff Kirsher return -EINVAL; /* not 16KB or 64KB */ 2427f7917c00SJeff Kirsher if (!(m.tx_pg_sz & 0x1554000)) 2428f7917c00SJeff Kirsher return -EINVAL; 2429f7917c00SJeff Kirsher if (m.tx_num_pg == -1) 2430f7917c00SJeff Kirsher m.tx_num_pg = p->tx_num_pgs; 2431f7917c00SJeff Kirsher if (m.rx_num_pg == -1) 2432f7917c00SJeff Kirsher m.rx_num_pg = p->rx_num_pgs; 2433f7917c00SJeff Kirsher if (m.tx_num_pg % 24 || m.rx_num_pg % 24) 2434f7917c00SJeff Kirsher return -EINVAL; 2435f7917c00SJeff Kirsher if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size || 2436f7917c00SJeff Kirsher m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size) 2437f7917c00SJeff Kirsher return -EINVAL; 2438f7917c00SJeff Kirsher p->rx_pg_size = m.rx_pg_sz; 2439f7917c00SJeff Kirsher p->tx_pg_size = m.tx_pg_sz; 2440f7917c00SJeff Kirsher p->rx_num_pgs = m.rx_num_pg; 2441f7917c00SJeff Kirsher p->tx_num_pgs = m.tx_num_pg; 2442f7917c00SJeff Kirsher break; 2443f7917c00SJeff Kirsher } 2444f7917c00SJeff Kirsher case CHELSIO_GET_MEM:{ 2445f7917c00SJeff Kirsher struct ch_mem_range t; 2446f7917c00SJeff Kirsher struct mc7 *mem; 2447f7917c00SJeff Kirsher u64 buf[32]; 2448f7917c00SJeff Kirsher 2449f7917c00SJeff Kirsher if (!is_offload(adapter)) 2450f7917c00SJeff Kirsher return -EOPNOTSUPP; 24513546d8f1SMichael Ellerman if (!capable(CAP_NET_ADMIN)) 24523546d8f1SMichael Ellerman return -EPERM; 2453f7917c00SJeff Kirsher if (!(adapter->flags & FULL_INIT_DONE)) 2454f7917c00SJeff Kirsher return -EIO; /* need the memory controllers */ 2455f7917c00SJeff Kirsher if (copy_from_user(&t, useraddr, sizeof(t))) 2456f7917c00SJeff Kirsher return -EFAULT; 24572c05d888SWenwen Wang if (t.cmd != CHELSIO_GET_MEM) 24582c05d888SWenwen Wang return -EINVAL; 2459f7917c00SJeff Kirsher if ((t.addr & 7) || (t.len & 7)) 2460f7917c00SJeff Kirsher return -EINVAL; 2461f7917c00SJeff Kirsher if (t.mem_id == MEM_CM) 2462f7917c00SJeff Kirsher mem = &adapter->cm; 2463f7917c00SJeff Kirsher else if (t.mem_id == MEM_PMRX) 2464f7917c00SJeff Kirsher mem = &adapter->pmrx; 2465f7917c00SJeff Kirsher else if (t.mem_id == MEM_PMTX) 2466f7917c00SJeff Kirsher mem = &adapter->pmtx; 2467f7917c00SJeff Kirsher else 2468f7917c00SJeff Kirsher return -EINVAL; 2469f7917c00SJeff Kirsher 2470f7917c00SJeff Kirsher /* 2471f7917c00SJeff Kirsher * Version scheme: 2472f7917c00SJeff Kirsher * bits 0..9: chip version 2473f7917c00SJeff Kirsher * bits 10..15: chip revision 2474f7917c00SJeff Kirsher */ 2475f7917c00SJeff Kirsher t.version = 3 | (adapter->params.rev << 10); 2476f7917c00SJeff Kirsher if (copy_to_user(useraddr, &t, sizeof(t))) 2477f7917c00SJeff Kirsher return -EFAULT; 2478f7917c00SJeff Kirsher 2479f7917c00SJeff Kirsher /* 2480f7917c00SJeff Kirsher * Read 256 bytes at a time as len can be large and we don't 2481f7917c00SJeff Kirsher * want to use huge intermediate buffers. 2482f7917c00SJeff Kirsher */ 2483f7917c00SJeff Kirsher useraddr += sizeof(t); /* advance to start of buffer */ 2484f7917c00SJeff Kirsher while (t.len) { 2485f7917c00SJeff Kirsher unsigned int chunk = 2486f7917c00SJeff Kirsher min_t(unsigned int, t.len, sizeof(buf)); 2487f7917c00SJeff Kirsher 2488f7917c00SJeff Kirsher ret = 2489f7917c00SJeff Kirsher t3_mc7_bd_read(mem, t.addr / 8, chunk / 8, 2490f7917c00SJeff Kirsher buf); 2491f7917c00SJeff Kirsher if (ret) 2492f7917c00SJeff Kirsher return ret; 2493f7917c00SJeff Kirsher if (copy_to_user(useraddr, buf, chunk)) 2494f7917c00SJeff Kirsher return -EFAULT; 2495f7917c00SJeff Kirsher useraddr += chunk; 2496f7917c00SJeff Kirsher t.addr += chunk; 2497f7917c00SJeff Kirsher t.len -= chunk; 2498f7917c00SJeff Kirsher } 2499f7917c00SJeff Kirsher break; 2500f7917c00SJeff Kirsher } 2501f7917c00SJeff Kirsher case CHELSIO_SET_TRACE_FILTER:{ 2502f7917c00SJeff Kirsher struct ch_trace t; 2503f7917c00SJeff Kirsher const struct trace_params *tp; 2504f7917c00SJeff Kirsher 2505f7917c00SJeff Kirsher if (!capable(CAP_NET_ADMIN)) 2506f7917c00SJeff Kirsher return -EPERM; 2507f7917c00SJeff Kirsher if (!offload_running(adapter)) 2508f7917c00SJeff Kirsher return -EAGAIN; 2509f7917c00SJeff Kirsher if (copy_from_user(&t, useraddr, sizeof(t))) 2510f7917c00SJeff Kirsher return -EFAULT; 25112c05d888SWenwen Wang if (t.cmd != CHELSIO_SET_TRACE_FILTER) 25122c05d888SWenwen Wang return -EINVAL; 2513f7917c00SJeff Kirsher 2514f7917c00SJeff Kirsher tp = (const struct trace_params *)&t.sip; 2515f7917c00SJeff Kirsher if (t.config_tx) 2516f7917c00SJeff Kirsher t3_config_trace_filter(adapter, tp, 0, 2517f7917c00SJeff Kirsher t.invert_match, 2518f7917c00SJeff Kirsher t.trace_tx); 2519f7917c00SJeff Kirsher if (t.config_rx) 2520f7917c00SJeff Kirsher t3_config_trace_filter(adapter, tp, 1, 2521f7917c00SJeff Kirsher t.invert_match, 2522f7917c00SJeff Kirsher t.trace_rx); 2523f7917c00SJeff Kirsher break; 2524f7917c00SJeff Kirsher } 2525f7917c00SJeff Kirsher default: 2526f7917c00SJeff Kirsher return -EOPNOTSUPP; 2527f7917c00SJeff Kirsher } 2528f7917c00SJeff Kirsher return 0; 2529f7917c00SJeff Kirsher } 2530f7917c00SJeff Kirsher 2531f7917c00SJeff Kirsher static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 2532f7917c00SJeff Kirsher { 2533f7917c00SJeff Kirsher struct mii_ioctl_data *data = if_mii(req); 2534f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 2535f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter; 2536f7917c00SJeff Kirsher 2537f7917c00SJeff Kirsher switch (cmd) { 2538f7917c00SJeff Kirsher case SIOCGMIIREG: 2539f7917c00SJeff Kirsher case SIOCSMIIREG: 2540f7917c00SJeff Kirsher /* Convert phy_id from older PRTAD/DEVAD format */ 2541f7917c00SJeff Kirsher if (is_10G(adapter) && 2542f7917c00SJeff Kirsher !mdio_phy_id_is_c45(data->phy_id) && 2543f7917c00SJeff Kirsher (data->phy_id & 0x1f00) && 2544f7917c00SJeff Kirsher !(data->phy_id & 0xe0e0)) 2545f7917c00SJeff Kirsher data->phy_id = mdio_phy_id_c45(data->phy_id >> 8, 2546f7917c00SJeff Kirsher data->phy_id & 0x1f); 2547df561f66SGustavo A. R. Silva fallthrough; 2548f7917c00SJeff Kirsher case SIOCGMIIPHY: 2549f7917c00SJeff Kirsher return mdio_mii_ioctl(&pi->phy.mdio, data, cmd); 2550f7917c00SJeff Kirsher default: 2551f7917c00SJeff Kirsher return -EOPNOTSUPP; 2552f7917c00SJeff Kirsher } 2553f7917c00SJeff Kirsher } 2554f7917c00SJeff Kirsher 2555f7917c00SJeff Kirsher static int cxgb_change_mtu(struct net_device *dev, int new_mtu) 2556f7917c00SJeff Kirsher { 2557f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 2558f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter; 2559f7917c00SJeff Kirsher int ret; 2560f7917c00SJeff Kirsher 2561f7917c00SJeff Kirsher if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu))) 2562f7917c00SJeff Kirsher return ret; 2563f7917c00SJeff Kirsher dev->mtu = new_mtu; 2564f7917c00SJeff Kirsher init_port_mtus(adapter); 2565f7917c00SJeff Kirsher if (adapter->params.rev == 0 && offload_running(adapter)) 2566f7917c00SJeff Kirsher t3_load_mtus(adapter, adapter->params.mtus, 2567f7917c00SJeff Kirsher adapter->params.a_wnd, adapter->params.b_wnd, 2568f7917c00SJeff Kirsher adapter->port[0]->mtu); 2569f7917c00SJeff Kirsher return 0; 2570f7917c00SJeff Kirsher } 2571f7917c00SJeff Kirsher 2572f7917c00SJeff Kirsher static int cxgb_set_mac_addr(struct net_device *dev, void *p) 2573f7917c00SJeff Kirsher { 2574f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 2575f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter; 2576f7917c00SJeff Kirsher struct sockaddr *addr = p; 2577f7917c00SJeff Kirsher 2578f7917c00SJeff Kirsher if (!is_valid_ether_addr(addr->sa_data)) 2579504f9b5aSDanny Kukawka return -EADDRNOTAVAIL; 2580f7917c00SJeff Kirsher 2581a05e4c0aSJakub Kicinski eth_hw_addr_set(dev, addr->sa_data); 2582f7917c00SJeff Kirsher t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr); 2583f7917c00SJeff Kirsher if (offload_running(adapter)) 2584f7917c00SJeff Kirsher write_smt_entry(adapter, pi->port_id); 2585f7917c00SJeff Kirsher return 0; 2586f7917c00SJeff Kirsher } 2587f7917c00SJeff Kirsher 2588c8f44affSMichał Mirosław static netdev_features_t cxgb_fix_features(struct net_device *dev, 2589c8f44affSMichał Mirosław netdev_features_t features) 2590f7917c00SJeff Kirsher { 2591f7917c00SJeff Kirsher /* 2592f7917c00SJeff Kirsher * Since there is no support for separate rx/tx vlan accel 2593f7917c00SJeff Kirsher * enable/disable make sure tx flag is always in same state as rx. 2594f7917c00SJeff Kirsher */ 2595f646968fSPatrick McHardy if (features & NETIF_F_HW_VLAN_CTAG_RX) 2596f646968fSPatrick McHardy features |= NETIF_F_HW_VLAN_CTAG_TX; 2597f7917c00SJeff Kirsher else 2598f646968fSPatrick McHardy features &= ~NETIF_F_HW_VLAN_CTAG_TX; 2599f7917c00SJeff Kirsher 2600f7917c00SJeff Kirsher return features; 2601f7917c00SJeff Kirsher } 2602f7917c00SJeff Kirsher 2603c8f44affSMichał Mirosław static int cxgb_set_features(struct net_device *dev, netdev_features_t features) 2604f7917c00SJeff Kirsher { 2605c8f44affSMichał Mirosław netdev_features_t changed = dev->features ^ features; 2606f7917c00SJeff Kirsher 2607f646968fSPatrick McHardy if (changed & NETIF_F_HW_VLAN_CTAG_RX) 2608f7917c00SJeff Kirsher cxgb_vlan_mode(dev, features); 2609f7917c00SJeff Kirsher 2610f7917c00SJeff Kirsher return 0; 2611f7917c00SJeff Kirsher } 2612f7917c00SJeff Kirsher 2613f7917c00SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 2614f7917c00SJeff Kirsher static void cxgb_netpoll(struct net_device *dev) 2615f7917c00SJeff Kirsher { 2616f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 2617f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter; 2618f7917c00SJeff Kirsher int qidx; 2619f7917c00SJeff Kirsher 2620f7917c00SJeff Kirsher for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) { 2621f7917c00SJeff Kirsher struct sge_qset *qs = &adapter->sge.qs[qidx]; 2622f7917c00SJeff Kirsher void *source; 2623f7917c00SJeff Kirsher 2624f7917c00SJeff Kirsher if (adapter->flags & USING_MSIX) 2625f7917c00SJeff Kirsher source = qs; 2626f7917c00SJeff Kirsher else 2627f7917c00SJeff Kirsher source = adapter; 2628f7917c00SJeff Kirsher 2629f7917c00SJeff Kirsher t3_intr_handler(adapter, qs->rspq.polling) (0, source); 2630f7917c00SJeff Kirsher } 2631f7917c00SJeff Kirsher } 2632f7917c00SJeff Kirsher #endif 2633f7917c00SJeff Kirsher 2634f7917c00SJeff Kirsher /* 2635f7917c00SJeff Kirsher * Periodic accumulation of MAC statistics. 2636f7917c00SJeff Kirsher */ 2637f7917c00SJeff Kirsher static void mac_stats_update(struct adapter *adapter) 2638f7917c00SJeff Kirsher { 2639f7917c00SJeff Kirsher int i; 2640f7917c00SJeff Kirsher 2641f7917c00SJeff Kirsher for_each_port(adapter, i) { 2642f7917c00SJeff Kirsher struct net_device *dev = adapter->port[i]; 2643f7917c00SJeff Kirsher struct port_info *p = netdev_priv(dev); 2644f7917c00SJeff Kirsher 2645f7917c00SJeff Kirsher if (netif_running(dev)) { 2646f7917c00SJeff Kirsher spin_lock(&adapter->stats_lock); 2647f7917c00SJeff Kirsher t3_mac_update_stats(&p->mac); 2648f7917c00SJeff Kirsher spin_unlock(&adapter->stats_lock); 2649f7917c00SJeff Kirsher } 2650f7917c00SJeff Kirsher } 2651f7917c00SJeff Kirsher } 2652f7917c00SJeff Kirsher 2653f7917c00SJeff Kirsher static void check_link_status(struct adapter *adapter) 2654f7917c00SJeff Kirsher { 2655f7917c00SJeff Kirsher int i; 2656f7917c00SJeff Kirsher 2657f7917c00SJeff Kirsher for_each_port(adapter, i) { 2658f7917c00SJeff Kirsher struct net_device *dev = adapter->port[i]; 2659f7917c00SJeff Kirsher struct port_info *p = netdev_priv(dev); 2660f7917c00SJeff Kirsher int link_fault; 2661f7917c00SJeff Kirsher 2662f7917c00SJeff Kirsher spin_lock_irq(&adapter->work_lock); 2663f7917c00SJeff Kirsher link_fault = p->link_fault; 2664f7917c00SJeff Kirsher spin_unlock_irq(&adapter->work_lock); 2665f7917c00SJeff Kirsher 2666f7917c00SJeff Kirsher if (link_fault) { 2667f7917c00SJeff Kirsher t3_link_fault(adapter, i); 2668f7917c00SJeff Kirsher continue; 2669f7917c00SJeff Kirsher } 2670f7917c00SJeff Kirsher 2671f7917c00SJeff Kirsher if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) { 2672f7917c00SJeff Kirsher t3_xgm_intr_disable(adapter, i); 2673f7917c00SJeff Kirsher t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset); 2674f7917c00SJeff Kirsher 2675f7917c00SJeff Kirsher t3_link_changed(adapter, i); 2676f7917c00SJeff Kirsher t3_xgm_intr_enable(adapter, i); 2677f7917c00SJeff Kirsher } 2678f7917c00SJeff Kirsher } 2679f7917c00SJeff Kirsher } 2680f7917c00SJeff Kirsher 2681f7917c00SJeff Kirsher static void check_t3b2_mac(struct adapter *adapter) 2682f7917c00SJeff Kirsher { 2683f7917c00SJeff Kirsher int i; 2684f7917c00SJeff Kirsher 2685f7917c00SJeff Kirsher if (!rtnl_trylock()) /* synchronize with ifdown */ 2686f7917c00SJeff Kirsher return; 2687f7917c00SJeff Kirsher 2688f7917c00SJeff Kirsher for_each_port(adapter, i) { 2689f7917c00SJeff Kirsher struct net_device *dev = adapter->port[i]; 2690f7917c00SJeff Kirsher struct port_info *p = netdev_priv(dev); 2691f7917c00SJeff Kirsher int status; 2692f7917c00SJeff Kirsher 2693f7917c00SJeff Kirsher if (!netif_running(dev)) 2694f7917c00SJeff Kirsher continue; 2695f7917c00SJeff Kirsher 2696f7917c00SJeff Kirsher status = 0; 2697f7917c00SJeff Kirsher if (netif_running(dev) && netif_carrier_ok(dev)) 2698f7917c00SJeff Kirsher status = t3b2_mac_watchdog_task(&p->mac); 2699f7917c00SJeff Kirsher if (status == 1) 2700f7917c00SJeff Kirsher p->mac.stats.num_toggled++; 2701f7917c00SJeff Kirsher else if (status == 2) { 2702f7917c00SJeff Kirsher struct cmac *mac = &p->mac; 2703f7917c00SJeff Kirsher 2704f7917c00SJeff Kirsher t3_mac_set_mtu(mac, dev->mtu); 2705f7917c00SJeff Kirsher t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr); 2706f7917c00SJeff Kirsher cxgb_set_rxmode(dev); 2707f7917c00SJeff Kirsher t3_link_start(&p->phy, mac, &p->link_config); 2708f7917c00SJeff Kirsher t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); 2709f7917c00SJeff Kirsher t3_port_intr_enable(adapter, p->port_id); 2710f7917c00SJeff Kirsher p->mac.stats.num_resets++; 2711f7917c00SJeff Kirsher } 2712f7917c00SJeff Kirsher } 2713f7917c00SJeff Kirsher rtnl_unlock(); 2714f7917c00SJeff Kirsher } 2715f7917c00SJeff Kirsher 2716f7917c00SJeff Kirsher 2717f7917c00SJeff Kirsher static void t3_adap_check_task(struct work_struct *work) 2718f7917c00SJeff Kirsher { 2719f7917c00SJeff Kirsher struct adapter *adapter = container_of(work, struct adapter, 2720f7917c00SJeff Kirsher adap_check_task.work); 2721f7917c00SJeff Kirsher const struct adapter_params *p = &adapter->params; 2722f7917c00SJeff Kirsher int port; 2723f7917c00SJeff Kirsher unsigned int v, status, reset; 2724f7917c00SJeff Kirsher 2725f7917c00SJeff Kirsher adapter->check_task_cnt++; 2726f7917c00SJeff Kirsher 2727f7917c00SJeff Kirsher check_link_status(adapter); 2728f7917c00SJeff Kirsher 2729f7917c00SJeff Kirsher /* Accumulate MAC stats if needed */ 2730f7917c00SJeff Kirsher if (!p->linkpoll_period || 2731f7917c00SJeff Kirsher (adapter->check_task_cnt * p->linkpoll_period) / 10 >= 2732f7917c00SJeff Kirsher p->stats_update_period) { 2733f7917c00SJeff Kirsher mac_stats_update(adapter); 2734f7917c00SJeff Kirsher adapter->check_task_cnt = 0; 2735f7917c00SJeff Kirsher } 2736f7917c00SJeff Kirsher 2737f7917c00SJeff Kirsher if (p->rev == T3_REV_B2) 2738f7917c00SJeff Kirsher check_t3b2_mac(adapter); 2739f7917c00SJeff Kirsher 2740f7917c00SJeff Kirsher /* 2741f7917c00SJeff Kirsher * Scan the XGMAC's to check for various conditions which we want to 2742f7917c00SJeff Kirsher * monitor in a periodic polling manner rather than via an interrupt 2743f7917c00SJeff Kirsher * condition. This is used for conditions which would otherwise flood 2744f7917c00SJeff Kirsher * the system with interrupts and we only really need to know that the 2745f7917c00SJeff Kirsher * conditions are "happening" ... For each condition we count the 2746f7917c00SJeff Kirsher * detection of the condition and reset it for the next polling loop. 2747f7917c00SJeff Kirsher */ 2748f7917c00SJeff Kirsher for_each_port(adapter, port) { 2749f7917c00SJeff Kirsher struct cmac *mac = &adap2pinfo(adapter, port)->mac; 2750f7917c00SJeff Kirsher u32 cause; 2751f7917c00SJeff Kirsher 2752f7917c00SJeff Kirsher cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset); 2753f7917c00SJeff Kirsher reset = 0; 2754f7917c00SJeff Kirsher if (cause & F_RXFIFO_OVERFLOW) { 2755f7917c00SJeff Kirsher mac->stats.rx_fifo_ovfl++; 2756f7917c00SJeff Kirsher reset |= F_RXFIFO_OVERFLOW; 2757f7917c00SJeff Kirsher } 2758f7917c00SJeff Kirsher 2759f7917c00SJeff Kirsher t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset); 2760f7917c00SJeff Kirsher } 2761f7917c00SJeff Kirsher 2762f7917c00SJeff Kirsher /* 2763f7917c00SJeff Kirsher * We do the same as above for FL_EMPTY interrupts. 2764f7917c00SJeff Kirsher */ 2765f7917c00SJeff Kirsher status = t3_read_reg(adapter, A_SG_INT_CAUSE); 2766f7917c00SJeff Kirsher reset = 0; 2767f7917c00SJeff Kirsher 2768f7917c00SJeff Kirsher if (status & F_FLEMPTY) { 2769f7917c00SJeff Kirsher struct sge_qset *qs = &adapter->sge.qs[0]; 2770f7917c00SJeff Kirsher int i = 0; 2771f7917c00SJeff Kirsher 2772f7917c00SJeff Kirsher reset |= F_FLEMPTY; 2773f7917c00SJeff Kirsher 2774f7917c00SJeff Kirsher v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) & 2775f7917c00SJeff Kirsher 0xffff; 2776f7917c00SJeff Kirsher 2777f7917c00SJeff Kirsher while (v) { 2778f7917c00SJeff Kirsher qs->fl[i].empty += (v & 1); 2779f7917c00SJeff Kirsher if (i) 2780f7917c00SJeff Kirsher qs++; 2781f7917c00SJeff Kirsher i ^= 1; 2782f7917c00SJeff Kirsher v >>= 1; 2783f7917c00SJeff Kirsher } 2784f7917c00SJeff Kirsher } 2785f7917c00SJeff Kirsher 2786f7917c00SJeff Kirsher t3_write_reg(adapter, A_SG_INT_CAUSE, reset); 2787f7917c00SJeff Kirsher 2788f7917c00SJeff Kirsher /* Schedule the next check update if any port is active. */ 2789f7917c00SJeff Kirsher spin_lock_irq(&adapter->work_lock); 2790f7917c00SJeff Kirsher if (adapter->open_device_map & PORT_MASK) 2791f7917c00SJeff Kirsher schedule_chk_task(adapter); 2792f7917c00SJeff Kirsher spin_unlock_irq(&adapter->work_lock); 2793f7917c00SJeff Kirsher } 2794f7917c00SJeff Kirsher 2795f7917c00SJeff Kirsher static void db_full_task(struct work_struct *work) 2796f7917c00SJeff Kirsher { 2797f7917c00SJeff Kirsher struct adapter *adapter = container_of(work, struct adapter, 2798f7917c00SJeff Kirsher db_full_task); 2799f7917c00SJeff Kirsher 2800f7917c00SJeff Kirsher cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0); 2801f7917c00SJeff Kirsher } 2802f7917c00SJeff Kirsher 2803f7917c00SJeff Kirsher static void db_empty_task(struct work_struct *work) 2804f7917c00SJeff Kirsher { 2805f7917c00SJeff Kirsher struct adapter *adapter = container_of(work, struct adapter, 2806f7917c00SJeff Kirsher db_empty_task); 2807f7917c00SJeff Kirsher 2808f7917c00SJeff Kirsher cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0); 2809f7917c00SJeff Kirsher } 2810f7917c00SJeff Kirsher 2811f7917c00SJeff Kirsher static void db_drop_task(struct work_struct *work) 2812f7917c00SJeff Kirsher { 2813f7917c00SJeff Kirsher struct adapter *adapter = container_of(work, struct adapter, 2814f7917c00SJeff Kirsher db_drop_task); 2815f7917c00SJeff Kirsher unsigned long delay = 1000; 2816f7917c00SJeff Kirsher unsigned short r; 2817f7917c00SJeff Kirsher 2818f7917c00SJeff Kirsher cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0); 2819f7917c00SJeff Kirsher 2820f7917c00SJeff Kirsher /* 2821f7917c00SJeff Kirsher * Sleep a while before ringing the driver qset dbs. 2822f7917c00SJeff Kirsher * The delay is between 1000-2023 usecs. 2823f7917c00SJeff Kirsher */ 2824f7917c00SJeff Kirsher get_random_bytes(&r, 2); 2825f7917c00SJeff Kirsher delay += r & 1023; 2826f7917c00SJeff Kirsher set_current_state(TASK_UNINTERRUPTIBLE); 2827f7917c00SJeff Kirsher schedule_timeout(usecs_to_jiffies(delay)); 2828f7917c00SJeff Kirsher ring_dbs(adapter); 2829f7917c00SJeff Kirsher } 2830f7917c00SJeff Kirsher 2831f7917c00SJeff Kirsher /* 2832f7917c00SJeff Kirsher * Processes external (PHY) interrupts in process context. 2833f7917c00SJeff Kirsher */ 2834f7917c00SJeff Kirsher static void ext_intr_task(struct work_struct *work) 2835f7917c00SJeff Kirsher { 2836f7917c00SJeff Kirsher struct adapter *adapter = container_of(work, struct adapter, 2837f7917c00SJeff Kirsher ext_intr_handler_task); 2838f7917c00SJeff Kirsher int i; 2839f7917c00SJeff Kirsher 2840f7917c00SJeff Kirsher /* Disable link fault interrupts */ 2841f7917c00SJeff Kirsher for_each_port(adapter, i) { 2842f7917c00SJeff Kirsher struct net_device *dev = adapter->port[i]; 2843f7917c00SJeff Kirsher struct port_info *p = netdev_priv(dev); 2844f7917c00SJeff Kirsher 2845f7917c00SJeff Kirsher t3_xgm_intr_disable(adapter, i); 2846f7917c00SJeff Kirsher t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset); 2847f7917c00SJeff Kirsher } 2848f7917c00SJeff Kirsher 2849f7917c00SJeff Kirsher /* Re-enable link fault interrupts */ 2850f7917c00SJeff Kirsher t3_phy_intr_handler(adapter); 2851f7917c00SJeff Kirsher 2852f7917c00SJeff Kirsher for_each_port(adapter, i) 2853f7917c00SJeff Kirsher t3_xgm_intr_enable(adapter, i); 2854f7917c00SJeff Kirsher 2855f7917c00SJeff Kirsher /* Now reenable external interrupts */ 2856f7917c00SJeff Kirsher spin_lock_irq(&adapter->work_lock); 2857f7917c00SJeff Kirsher if (adapter->slow_intr_mask) { 2858f7917c00SJeff Kirsher adapter->slow_intr_mask |= F_T3DBG; 2859f7917c00SJeff Kirsher t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG); 2860f7917c00SJeff Kirsher t3_write_reg(adapter, A_PL_INT_ENABLE0, 2861f7917c00SJeff Kirsher adapter->slow_intr_mask); 2862f7917c00SJeff Kirsher } 2863f7917c00SJeff Kirsher spin_unlock_irq(&adapter->work_lock); 2864f7917c00SJeff Kirsher } 2865f7917c00SJeff Kirsher 2866f7917c00SJeff Kirsher /* 2867f7917c00SJeff Kirsher * Interrupt-context handler for external (PHY) interrupts. 2868f7917c00SJeff Kirsher */ 2869f7917c00SJeff Kirsher void t3_os_ext_intr_handler(struct adapter *adapter) 2870f7917c00SJeff Kirsher { 2871f7917c00SJeff Kirsher /* 2872f7917c00SJeff Kirsher * Schedule a task to handle external interrupts as they may be slow 2873f7917c00SJeff Kirsher * and we use a mutex to protect MDIO registers. We disable PHY 2874f7917c00SJeff Kirsher * interrupts in the meantime and let the task reenable them when 2875f7917c00SJeff Kirsher * it's done. 2876f7917c00SJeff Kirsher */ 2877f7917c00SJeff Kirsher spin_lock(&adapter->work_lock); 2878f7917c00SJeff Kirsher if (adapter->slow_intr_mask) { 2879f7917c00SJeff Kirsher adapter->slow_intr_mask &= ~F_T3DBG; 2880f7917c00SJeff Kirsher t3_write_reg(adapter, A_PL_INT_ENABLE0, 2881f7917c00SJeff Kirsher adapter->slow_intr_mask); 2882f7917c00SJeff Kirsher queue_work(cxgb3_wq, &adapter->ext_intr_handler_task); 2883f7917c00SJeff Kirsher } 2884f7917c00SJeff Kirsher spin_unlock(&adapter->work_lock); 2885f7917c00SJeff Kirsher } 2886f7917c00SJeff Kirsher 2887f7917c00SJeff Kirsher void t3_os_link_fault_handler(struct adapter *adapter, int port_id) 2888f7917c00SJeff Kirsher { 2889f7917c00SJeff Kirsher struct net_device *netdev = adapter->port[port_id]; 2890f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(netdev); 2891f7917c00SJeff Kirsher 2892f7917c00SJeff Kirsher spin_lock(&adapter->work_lock); 2893f7917c00SJeff Kirsher pi->link_fault = 1; 2894f7917c00SJeff Kirsher spin_unlock(&adapter->work_lock); 2895f7917c00SJeff Kirsher } 2896f7917c00SJeff Kirsher 2897f7917c00SJeff Kirsher static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq) 2898f7917c00SJeff Kirsher { 2899f7917c00SJeff Kirsher int i, ret = 0; 2900f7917c00SJeff Kirsher 2901f7917c00SJeff Kirsher if (is_offload(adapter) && 2902f7917c00SJeff Kirsher test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { 2903f7917c00SJeff Kirsher cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0); 2904f7917c00SJeff Kirsher offload_close(&adapter->tdev); 2905f7917c00SJeff Kirsher } 2906f7917c00SJeff Kirsher 2907f7917c00SJeff Kirsher /* Stop all ports */ 2908f7917c00SJeff Kirsher for_each_port(adapter, i) { 2909f7917c00SJeff Kirsher struct net_device *netdev = adapter->port[i]; 2910f7917c00SJeff Kirsher 2911f7917c00SJeff Kirsher if (netif_running(netdev)) 2912f7917c00SJeff Kirsher __cxgb_close(netdev, on_wq); 2913f7917c00SJeff Kirsher } 2914f7917c00SJeff Kirsher 2915f7917c00SJeff Kirsher /* Stop SGE timers */ 2916f7917c00SJeff Kirsher t3_stop_sge_timers(adapter); 2917f7917c00SJeff Kirsher 2918f7917c00SJeff Kirsher adapter->flags &= ~FULL_INIT_DONE; 2919f7917c00SJeff Kirsher 2920f7917c00SJeff Kirsher if (reset) 2921f7917c00SJeff Kirsher ret = t3_reset_adapter(adapter); 2922f7917c00SJeff Kirsher 2923f7917c00SJeff Kirsher pci_disable_device(adapter->pdev); 2924f7917c00SJeff Kirsher 2925f7917c00SJeff Kirsher return ret; 2926f7917c00SJeff Kirsher } 2927f7917c00SJeff Kirsher 2928f7917c00SJeff Kirsher static int t3_reenable_adapter(struct adapter *adapter) 2929f7917c00SJeff Kirsher { 2930f7917c00SJeff Kirsher if (pci_enable_device(adapter->pdev)) { 2931f7917c00SJeff Kirsher dev_err(&adapter->pdev->dev, 2932f7917c00SJeff Kirsher "Cannot re-enable PCI device after reset.\n"); 2933f7917c00SJeff Kirsher goto err; 2934f7917c00SJeff Kirsher } 2935f7917c00SJeff Kirsher pci_set_master(adapter->pdev); 2936f7917c00SJeff Kirsher pci_restore_state(adapter->pdev); 2937f7917c00SJeff Kirsher pci_save_state(adapter->pdev); 2938f7917c00SJeff Kirsher 2939f7917c00SJeff Kirsher /* Free sge resources */ 2940f7917c00SJeff Kirsher t3_free_sge_resources(adapter); 2941f7917c00SJeff Kirsher 2942f7917c00SJeff Kirsher if (t3_replay_prep_adapter(adapter)) 2943f7917c00SJeff Kirsher goto err; 2944f7917c00SJeff Kirsher 2945f7917c00SJeff Kirsher return 0; 2946f7917c00SJeff Kirsher err: 2947f7917c00SJeff Kirsher return -1; 2948f7917c00SJeff Kirsher } 2949f7917c00SJeff Kirsher 2950f7917c00SJeff Kirsher static void t3_resume_ports(struct adapter *adapter) 2951f7917c00SJeff Kirsher { 2952f7917c00SJeff Kirsher int i; 2953f7917c00SJeff Kirsher 2954f7917c00SJeff Kirsher /* Restart the ports */ 2955f7917c00SJeff Kirsher for_each_port(adapter, i) { 2956f7917c00SJeff Kirsher struct net_device *netdev = adapter->port[i]; 2957f7917c00SJeff Kirsher 2958f7917c00SJeff Kirsher if (netif_running(netdev)) { 2959f7917c00SJeff Kirsher if (cxgb_open(netdev)) { 2960f7917c00SJeff Kirsher dev_err(&adapter->pdev->dev, 2961f7917c00SJeff Kirsher "can't bring device back up" 2962f7917c00SJeff Kirsher " after reset\n"); 2963f7917c00SJeff Kirsher continue; 2964f7917c00SJeff Kirsher } 2965f7917c00SJeff Kirsher } 2966f7917c00SJeff Kirsher } 2967f7917c00SJeff Kirsher 2968f7917c00SJeff Kirsher if (is_offload(adapter) && !ofld_disable) 2969f7917c00SJeff Kirsher cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0); 2970f7917c00SJeff Kirsher } 2971f7917c00SJeff Kirsher 2972f7917c00SJeff Kirsher /* 2973f7917c00SJeff Kirsher * processes a fatal error. 2974f7917c00SJeff Kirsher * Bring the ports down, reset the chip, bring the ports back up. 2975f7917c00SJeff Kirsher */ 2976f7917c00SJeff Kirsher static void fatal_error_task(struct work_struct *work) 2977f7917c00SJeff Kirsher { 2978f7917c00SJeff Kirsher struct adapter *adapter = container_of(work, struct adapter, 2979f7917c00SJeff Kirsher fatal_error_handler_task); 2980f7917c00SJeff Kirsher int err = 0; 2981f7917c00SJeff Kirsher 2982f7917c00SJeff Kirsher rtnl_lock(); 2983f7917c00SJeff Kirsher err = t3_adapter_error(adapter, 1, 1); 2984f7917c00SJeff Kirsher if (!err) 2985f7917c00SJeff Kirsher err = t3_reenable_adapter(adapter); 2986f7917c00SJeff Kirsher if (!err) 2987f7917c00SJeff Kirsher t3_resume_ports(adapter); 2988f7917c00SJeff Kirsher 2989f7917c00SJeff Kirsher CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded"); 2990f7917c00SJeff Kirsher rtnl_unlock(); 2991f7917c00SJeff Kirsher } 2992f7917c00SJeff Kirsher 2993f7917c00SJeff Kirsher void t3_fatal_err(struct adapter *adapter) 2994f7917c00SJeff Kirsher { 2995f7917c00SJeff Kirsher unsigned int fw_status[4]; 2996f7917c00SJeff Kirsher 2997f7917c00SJeff Kirsher if (adapter->flags & FULL_INIT_DONE) { 2998a17409e7SThomas Gleixner t3_sge_stop_dma(adapter); 2999f7917c00SJeff Kirsher t3_write_reg(adapter, A_XGM_TX_CTRL, 0); 3000f7917c00SJeff Kirsher t3_write_reg(adapter, A_XGM_RX_CTRL, 0); 3001f7917c00SJeff Kirsher t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0); 3002f7917c00SJeff Kirsher t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0); 3003f7917c00SJeff Kirsher 3004f7917c00SJeff Kirsher spin_lock(&adapter->work_lock); 3005f7917c00SJeff Kirsher t3_intr_disable(adapter); 3006f7917c00SJeff Kirsher queue_work(cxgb3_wq, &adapter->fatal_error_handler_task); 3007f7917c00SJeff Kirsher spin_unlock(&adapter->work_lock); 3008f7917c00SJeff Kirsher } 3009f7917c00SJeff Kirsher CH_ALERT(adapter, "encountered fatal error, operation suspended\n"); 3010f7917c00SJeff Kirsher if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status)) 3011f7917c00SJeff Kirsher CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n", 3012f7917c00SJeff Kirsher fw_status[0], fw_status[1], 3013f7917c00SJeff Kirsher fw_status[2], fw_status[3]); 3014f7917c00SJeff Kirsher } 3015f7917c00SJeff Kirsher 3016f7917c00SJeff Kirsher /** 3017f7917c00SJeff Kirsher * t3_io_error_detected - called when PCI error is detected 3018f7917c00SJeff Kirsher * @pdev: Pointer to PCI device 3019f7917c00SJeff Kirsher * @state: The current pci connection state 3020f7917c00SJeff Kirsher * 3021f7917c00SJeff Kirsher * This function is called after a PCI bus error affecting 3022f7917c00SJeff Kirsher * this device has been detected. 3023f7917c00SJeff Kirsher */ 3024f7917c00SJeff Kirsher static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev, 3025f7917c00SJeff Kirsher pci_channel_state_t state) 3026f7917c00SJeff Kirsher { 3027f7917c00SJeff Kirsher struct adapter *adapter = pci_get_drvdata(pdev); 3028f7917c00SJeff Kirsher 3029f7917c00SJeff Kirsher if (state == pci_channel_io_perm_failure) 3030f7917c00SJeff Kirsher return PCI_ERS_RESULT_DISCONNECT; 3031f7917c00SJeff Kirsher 3032f7917c00SJeff Kirsher t3_adapter_error(adapter, 0, 0); 3033f7917c00SJeff Kirsher 3034f7917c00SJeff Kirsher /* Request a slot reset. */ 3035f7917c00SJeff Kirsher return PCI_ERS_RESULT_NEED_RESET; 3036f7917c00SJeff Kirsher } 3037f7917c00SJeff Kirsher 3038f7917c00SJeff Kirsher /** 3039f7917c00SJeff Kirsher * t3_io_slot_reset - called after the pci bus has been reset. 3040f7917c00SJeff Kirsher * @pdev: Pointer to PCI device 3041f7917c00SJeff Kirsher * 3042f7917c00SJeff Kirsher * Restart the card from scratch, as if from a cold-boot. 3043f7917c00SJeff Kirsher */ 3044f7917c00SJeff Kirsher static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev) 3045f7917c00SJeff Kirsher { 3046f7917c00SJeff Kirsher struct adapter *adapter = pci_get_drvdata(pdev); 3047f7917c00SJeff Kirsher 3048f7917c00SJeff Kirsher if (!t3_reenable_adapter(adapter)) 3049f7917c00SJeff Kirsher return PCI_ERS_RESULT_RECOVERED; 3050f7917c00SJeff Kirsher 3051f7917c00SJeff Kirsher return PCI_ERS_RESULT_DISCONNECT; 3052f7917c00SJeff Kirsher } 3053f7917c00SJeff Kirsher 3054f7917c00SJeff Kirsher /** 3055f7917c00SJeff Kirsher * t3_io_resume - called when traffic can start flowing again. 3056f7917c00SJeff Kirsher * @pdev: Pointer to PCI device 3057f7917c00SJeff Kirsher * 3058f7917c00SJeff Kirsher * This callback is called when the error recovery driver tells us that 3059f7917c00SJeff Kirsher * its OK to resume normal operation. 3060f7917c00SJeff Kirsher */ 3061f7917c00SJeff Kirsher static void t3_io_resume(struct pci_dev *pdev) 3062f7917c00SJeff Kirsher { 3063f7917c00SJeff Kirsher struct adapter *adapter = pci_get_drvdata(pdev); 3064f7917c00SJeff Kirsher 3065f7917c00SJeff Kirsher CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n", 3066f7917c00SJeff Kirsher t3_read_reg(adapter, A_PCIE_PEX_ERR)); 3067f7917c00SJeff Kirsher 30687cc47d13SBenjamin Herrenschmidt rtnl_lock(); 3069f7917c00SJeff Kirsher t3_resume_ports(adapter); 30707cc47d13SBenjamin Herrenschmidt rtnl_unlock(); 3071f7917c00SJeff Kirsher } 3072f7917c00SJeff Kirsher 30733646f0e5SStephen Hemminger static const struct pci_error_handlers t3_err_handler = { 3074f7917c00SJeff Kirsher .error_detected = t3_io_error_detected, 3075f7917c00SJeff Kirsher .slot_reset = t3_io_slot_reset, 3076f7917c00SJeff Kirsher .resume = t3_io_resume, 3077f7917c00SJeff Kirsher }; 3078f7917c00SJeff Kirsher 3079f7917c00SJeff Kirsher /* 3080f7917c00SJeff Kirsher * Set the number of qsets based on the number of CPUs and the number of ports, 3081f7917c00SJeff Kirsher * not to exceed the number of available qsets, assuming there are enough qsets 3082f7917c00SJeff Kirsher * per port in HW. 3083f7917c00SJeff Kirsher */ 3084f7917c00SJeff Kirsher static void set_nqsets(struct adapter *adap) 3085f7917c00SJeff Kirsher { 3086f7917c00SJeff Kirsher int i, j = 0; 3087dbfa6001SYuval Mintz int num_cpus = netif_get_num_default_rss_queues(); 3088f7917c00SJeff Kirsher int hwports = adap->params.nports; 3089f7917c00SJeff Kirsher int nqsets = adap->msix_nvectors - 1; 3090f7917c00SJeff Kirsher 3091f7917c00SJeff Kirsher if (adap->params.rev > 0 && adap->flags & USING_MSIX) { 3092f7917c00SJeff Kirsher if (hwports == 2 && 3093f7917c00SJeff Kirsher (hwports * nqsets > SGE_QSETS || 3094f7917c00SJeff Kirsher num_cpus >= nqsets / hwports)) 3095f7917c00SJeff Kirsher nqsets /= hwports; 3096f7917c00SJeff Kirsher if (nqsets > num_cpus) 3097f7917c00SJeff Kirsher nqsets = num_cpus; 3098f7917c00SJeff Kirsher if (nqsets < 1 || hwports == 4) 3099f7917c00SJeff Kirsher nqsets = 1; 31006a8dd8b2SÍñigo Huguet } else { 3101f7917c00SJeff Kirsher nqsets = 1; 31026a8dd8b2SÍñigo Huguet } 3103f7917c00SJeff Kirsher 3104f7917c00SJeff Kirsher for_each_port(adap, i) { 3105f7917c00SJeff Kirsher struct port_info *pi = adap2pinfo(adap, i); 3106f7917c00SJeff Kirsher 3107f7917c00SJeff Kirsher pi->first_qset = j; 3108f7917c00SJeff Kirsher pi->nqsets = nqsets; 3109f7917c00SJeff Kirsher j = pi->first_qset + nqsets; 3110f7917c00SJeff Kirsher 3111f7917c00SJeff Kirsher dev_info(&adap->pdev->dev, 3112f7917c00SJeff Kirsher "Port %d using %d queue sets.\n", i, nqsets); 3113f7917c00SJeff Kirsher } 3114f7917c00SJeff Kirsher } 3115f7917c00SJeff Kirsher 31162109eaabSBill Pemberton static int cxgb_enable_msix(struct adapter *adap) 3117f7917c00SJeff Kirsher { 3118f7917c00SJeff Kirsher struct msix_entry entries[SGE_QSETS + 1]; 3119f7917c00SJeff Kirsher int vectors; 3120fc1d0bf1SAlexander Gordeev int i; 3121f7917c00SJeff Kirsher 3122f7917c00SJeff Kirsher vectors = ARRAY_SIZE(entries); 3123f7917c00SJeff Kirsher for (i = 0; i < vectors; ++i) 3124f7917c00SJeff Kirsher entries[i].entry = i; 3125f7917c00SJeff Kirsher 3126fc1d0bf1SAlexander Gordeev vectors = pci_enable_msix_range(adap->pdev, entries, 3127fc1d0bf1SAlexander Gordeev adap->params.nports + 1, vectors); 3128fc1d0bf1SAlexander Gordeev if (vectors < 0) 3129fc1d0bf1SAlexander Gordeev return vectors; 3130f7917c00SJeff Kirsher 3131f7917c00SJeff Kirsher for (i = 0; i < vectors; ++i) 3132f7917c00SJeff Kirsher adap->msix_info[i].vec = entries[i].vector; 3133f7917c00SJeff Kirsher adap->msix_nvectors = vectors; 3134f7917c00SJeff Kirsher 3135fc1d0bf1SAlexander Gordeev return 0; 3136f7917c00SJeff Kirsher } 3137f7917c00SJeff Kirsher 31381dd06ae8SGreg Kroah-Hartman static void print_port_info(struct adapter *adap, const struct adapter_info *ai) 3139f7917c00SJeff Kirsher { 3140f7917c00SJeff Kirsher static const char *pci_variant[] = { 3141f7917c00SJeff Kirsher "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express" 3142f7917c00SJeff Kirsher }; 3143f7917c00SJeff Kirsher 3144f7917c00SJeff Kirsher int i; 3145f7917c00SJeff Kirsher char buf[80]; 3146f7917c00SJeff Kirsher 3147f7917c00SJeff Kirsher if (is_pcie(adap)) 3148f7917c00SJeff Kirsher snprintf(buf, sizeof(buf), "%s x%d", 3149f7917c00SJeff Kirsher pci_variant[adap->params.pci.variant], 3150f7917c00SJeff Kirsher adap->params.pci.width); 3151f7917c00SJeff Kirsher else 3152f7917c00SJeff Kirsher snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit", 3153f7917c00SJeff Kirsher pci_variant[adap->params.pci.variant], 3154f7917c00SJeff Kirsher adap->params.pci.speed, adap->params.pci.width); 3155f7917c00SJeff Kirsher 3156f7917c00SJeff Kirsher for_each_port(adap, i) { 3157f7917c00SJeff Kirsher struct net_device *dev = adap->port[i]; 3158f7917c00SJeff Kirsher const struct port_info *pi = netdev_priv(dev); 3159f7917c00SJeff Kirsher 3160f7917c00SJeff Kirsher if (!test_bit(i, &adap->registered_device_map)) 3161f7917c00SJeff Kirsher continue; 3162428ac43fSJoe Perches netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n", 3163428ac43fSJoe Perches ai->desc, pi->phy.desc, 3164f7917c00SJeff Kirsher is_offload(adap) ? "R" : "", adap->params.rev, buf, 3165f7917c00SJeff Kirsher (adap->flags & USING_MSIX) ? " MSI-X" : 3166f7917c00SJeff Kirsher (adap->flags & USING_MSI) ? " MSI" : ""); 3167f7917c00SJeff Kirsher if (adap->name == dev->name && adap->params.vpd.mclk) 3168428ac43fSJoe Perches pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n", 3169f7917c00SJeff Kirsher adap->name, t3_mc7_size(&adap->cm) >> 20, 3170f7917c00SJeff Kirsher t3_mc7_size(&adap->pmtx) >> 20, 3171f7917c00SJeff Kirsher t3_mc7_size(&adap->pmrx) >> 20, 3172f7917c00SJeff Kirsher adap->params.vpd.sn); 3173f7917c00SJeff Kirsher } 3174f7917c00SJeff Kirsher } 3175f7917c00SJeff Kirsher 3176f7917c00SJeff Kirsher static const struct net_device_ops cxgb_netdev_ops = { 3177f7917c00SJeff Kirsher .ndo_open = cxgb_open, 3178f7917c00SJeff Kirsher .ndo_stop = cxgb_close, 3179f7917c00SJeff Kirsher .ndo_start_xmit = t3_eth_xmit, 3180f7917c00SJeff Kirsher .ndo_get_stats = cxgb_get_stats, 3181f7917c00SJeff Kirsher .ndo_validate_addr = eth_validate_addr, 3182afc4b13dSJiri Pirko .ndo_set_rx_mode = cxgb_set_rxmode, 3183a7605370SArnd Bergmann .ndo_eth_ioctl = cxgb_ioctl, 3184ebb4a911SArnd Bergmann .ndo_siocdevprivate = cxgb_siocdevprivate, 3185f7917c00SJeff Kirsher .ndo_change_mtu = cxgb_change_mtu, 3186f7917c00SJeff Kirsher .ndo_set_mac_address = cxgb_set_mac_addr, 3187f7917c00SJeff Kirsher .ndo_fix_features = cxgb_fix_features, 3188f7917c00SJeff Kirsher .ndo_set_features = cxgb_set_features, 3189f7917c00SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 3190f7917c00SJeff Kirsher .ndo_poll_controller = cxgb_netpoll, 3191f7917c00SJeff Kirsher #endif 3192f7917c00SJeff Kirsher }; 3193f7917c00SJeff Kirsher 31942109eaabSBill Pemberton static void cxgb3_init_iscsi_mac(struct net_device *dev) 3195f7917c00SJeff Kirsher { 3196f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev); 3197f7917c00SJeff Kirsher 3198f7917c00SJeff Kirsher memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN); 3199f7917c00SJeff Kirsher pi->iscsic.mac_addr[3] |= 0x80; 3200f7917c00SJeff Kirsher } 3201f7917c00SJeff Kirsher 32021d962ecfSbrenohl@br.ibm.com #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) 32031d962ecfSbrenohl@br.ibm.com #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \ 32041d962ecfSbrenohl@br.ibm.com NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) 32051dd06ae8SGreg Kroah-Hartman static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 3206f7917c00SJeff Kirsher { 3207544bdad0SChristophe JAILLET int i, err; 3208f7917c00SJeff Kirsher resource_size_t mmio_start, mmio_len; 3209f7917c00SJeff Kirsher const struct adapter_info *ai; 3210f7917c00SJeff Kirsher struct adapter *adapter = NULL; 3211f7917c00SJeff Kirsher struct port_info *pi; 3212f7917c00SJeff Kirsher 3213f7917c00SJeff Kirsher if (!cxgb3_wq) { 3214f7917c00SJeff Kirsher cxgb3_wq = create_singlethread_workqueue(DRV_NAME); 3215f7917c00SJeff Kirsher if (!cxgb3_wq) { 3216428ac43fSJoe Perches pr_err("cannot initialize work queue\n"); 3217f7917c00SJeff Kirsher return -ENOMEM; 3218f7917c00SJeff Kirsher } 3219f7917c00SJeff Kirsher } 3220f7917c00SJeff Kirsher 3221f7917c00SJeff Kirsher err = pci_enable_device(pdev); 3222f7917c00SJeff Kirsher if (err) { 3223f7917c00SJeff Kirsher dev_err(&pdev->dev, "cannot enable PCI device\n"); 3224f7917c00SJeff Kirsher goto out; 3225f7917c00SJeff Kirsher } 3226f7917c00SJeff Kirsher 3227f7917c00SJeff Kirsher err = pci_request_regions(pdev, DRV_NAME); 3228f7917c00SJeff Kirsher if (err) { 3229f7917c00SJeff Kirsher /* Just info, some other driver may have claimed the device. */ 3230f7917c00SJeff Kirsher dev_info(&pdev->dev, "cannot obtain PCI resources\n"); 3231f7917c00SJeff Kirsher goto out_disable_device; 3232f7917c00SJeff Kirsher } 3233f7917c00SJeff Kirsher 3234544bdad0SChristophe JAILLET err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3235544bdad0SChristophe JAILLET if (err) { 3236f7917c00SJeff Kirsher dev_err(&pdev->dev, "no usable DMA configuration\n"); 3237f7917c00SJeff Kirsher goto out_release_regions; 3238f7917c00SJeff Kirsher } 3239f7917c00SJeff Kirsher 3240f7917c00SJeff Kirsher pci_set_master(pdev); 3241f7917c00SJeff Kirsher pci_save_state(pdev); 3242f7917c00SJeff Kirsher 3243f7917c00SJeff Kirsher mmio_start = pci_resource_start(pdev, 0); 3244f7917c00SJeff Kirsher mmio_len = pci_resource_len(pdev, 0); 3245f7917c00SJeff Kirsher ai = t3_get_adapter_info(ent->driver_data); 3246f7917c00SJeff Kirsher 3247f7917c00SJeff Kirsher adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 3248f7917c00SJeff Kirsher if (!adapter) { 3249f7917c00SJeff Kirsher err = -ENOMEM; 3250f7917c00SJeff Kirsher goto out_release_regions; 3251f7917c00SJeff Kirsher } 3252f7917c00SJeff Kirsher 3253f7917c00SJeff Kirsher adapter->nofail_skb = 3254f7917c00SJeff Kirsher alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL); 3255f7917c00SJeff Kirsher if (!adapter->nofail_skb) { 3256f7917c00SJeff Kirsher dev_err(&pdev->dev, "cannot allocate nofail buffer\n"); 3257f7917c00SJeff Kirsher err = -ENOMEM; 3258f7917c00SJeff Kirsher goto out_free_adapter; 3259f7917c00SJeff Kirsher } 3260f7917c00SJeff Kirsher 32614bdc0d67SChristoph Hellwig adapter->regs = ioremap(mmio_start, mmio_len); 3262f7917c00SJeff Kirsher if (!adapter->regs) { 3263f7917c00SJeff Kirsher dev_err(&pdev->dev, "cannot map device registers\n"); 3264f7917c00SJeff Kirsher err = -ENOMEM; 3265debea2cdSChristophe JAILLET goto out_free_adapter_nofail; 3266f7917c00SJeff Kirsher } 3267f7917c00SJeff Kirsher 3268f7917c00SJeff Kirsher adapter->pdev = pdev; 3269f7917c00SJeff Kirsher adapter->name = pci_name(pdev); 3270f7917c00SJeff Kirsher adapter->msg_enable = dflt_msg_enable; 3271f7917c00SJeff Kirsher adapter->mmio_len = mmio_len; 3272f7917c00SJeff Kirsher 3273f7917c00SJeff Kirsher mutex_init(&adapter->mdio_lock); 3274f7917c00SJeff Kirsher spin_lock_init(&adapter->work_lock); 3275f7917c00SJeff Kirsher spin_lock_init(&adapter->stats_lock); 3276f7917c00SJeff Kirsher 3277f7917c00SJeff Kirsher INIT_LIST_HEAD(&adapter->adapter_list); 3278f7917c00SJeff Kirsher INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task); 3279f7917c00SJeff Kirsher INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task); 3280f7917c00SJeff Kirsher 3281f7917c00SJeff Kirsher INIT_WORK(&adapter->db_full_task, db_full_task); 3282f7917c00SJeff Kirsher INIT_WORK(&adapter->db_empty_task, db_empty_task); 3283f7917c00SJeff Kirsher INIT_WORK(&adapter->db_drop_task, db_drop_task); 3284f7917c00SJeff Kirsher 3285f7917c00SJeff Kirsher INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task); 3286f7917c00SJeff Kirsher 3287f7917c00SJeff Kirsher for (i = 0; i < ai->nports0 + ai->nports1; ++i) { 3288f7917c00SJeff Kirsher struct net_device *netdev; 3289f7917c00SJeff Kirsher 3290f7917c00SJeff Kirsher netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS); 3291f7917c00SJeff Kirsher if (!netdev) { 3292f7917c00SJeff Kirsher err = -ENOMEM; 3293f7917c00SJeff Kirsher goto out_free_dev; 3294f7917c00SJeff Kirsher } 3295f7917c00SJeff Kirsher 3296f7917c00SJeff Kirsher SET_NETDEV_DEV(netdev, &pdev->dev); 3297f7917c00SJeff Kirsher 3298f7917c00SJeff Kirsher adapter->port[i] = netdev; 3299f7917c00SJeff Kirsher pi = netdev_priv(netdev); 3300f7917c00SJeff Kirsher pi->adapter = adapter; 3301f7917c00SJeff Kirsher pi->port_id = i; 3302f7917c00SJeff Kirsher netif_carrier_off(netdev); 3303f7917c00SJeff Kirsher netdev->irq = pdev->irq; 3304f7917c00SJeff Kirsher netdev->mem_start = mmio_start; 3305f7917c00SJeff Kirsher netdev->mem_end = mmio_start + mmio_len - 1; 3306f7917c00SJeff Kirsher netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | 3307f646968fSPatrick McHardy NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX; 3308f646968fSPatrick McHardy netdev->features |= netdev->hw_features | 3309f646968fSPatrick McHardy NETIF_F_HW_VLAN_CTAG_TX; 33101d962ecfSbrenohl@br.ibm.com netdev->vlan_features |= netdev->features & VLAN_FEAT; 3311544bdad0SChristophe JAILLET 3312f7917c00SJeff Kirsher netdev->features |= NETIF_F_HIGHDMA; 3313f7917c00SJeff Kirsher 3314f7917c00SJeff Kirsher netdev->netdev_ops = &cxgb_netdev_ops; 33157ad24ea4SWilfried Klaebe netdev->ethtool_ops = &cxgb_ethtool_ops; 3316d894be57SJarod Wilson netdev->min_mtu = 81; 3317d894be57SJarod Wilson netdev->max_mtu = ETH_MAX_MTU; 33188fc79766SArjun Vynipadath netdev->dev_port = pi->port_id; 3319f7917c00SJeff Kirsher } 3320f7917c00SJeff Kirsher 3321f7917c00SJeff Kirsher pci_set_drvdata(pdev, adapter); 3322f7917c00SJeff Kirsher if (t3_prep_adapter(adapter, ai, 1) < 0) { 3323f7917c00SJeff Kirsher err = -ENODEV; 3324f7917c00SJeff Kirsher goto out_free_dev; 3325f7917c00SJeff Kirsher } 3326f7917c00SJeff Kirsher 3327f7917c00SJeff Kirsher /* 3328f7917c00SJeff Kirsher * The card is now ready to go. If any errors occur during device 3329f7917c00SJeff Kirsher * registration we do not fail the whole card but rather proceed only 3330f7917c00SJeff Kirsher * with the ports we manage to register successfully. However we must 3331f7917c00SJeff Kirsher * register at least one net device. 3332f7917c00SJeff Kirsher */ 3333f7917c00SJeff Kirsher for_each_port(adapter, i) { 3334f7917c00SJeff Kirsher err = register_netdev(adapter->port[i]); 3335f7917c00SJeff Kirsher if (err) 3336f7917c00SJeff Kirsher dev_warn(&pdev->dev, 3337f7917c00SJeff Kirsher "cannot register net device %s, skipping\n", 3338f7917c00SJeff Kirsher adapter->port[i]->name); 3339f7917c00SJeff Kirsher else { 3340f7917c00SJeff Kirsher /* 3341f7917c00SJeff Kirsher * Change the name we use for messages to the name of 3342f7917c00SJeff Kirsher * the first successfully registered interface. 3343f7917c00SJeff Kirsher */ 3344f7917c00SJeff Kirsher if (!adapter->registered_device_map) 3345f7917c00SJeff Kirsher adapter->name = adapter->port[i]->name; 3346f7917c00SJeff Kirsher 3347f7917c00SJeff Kirsher __set_bit(i, &adapter->registered_device_map); 3348f7917c00SJeff Kirsher } 3349f7917c00SJeff Kirsher } 3350f7917c00SJeff Kirsher if (!adapter->registered_device_map) { 3351f7917c00SJeff Kirsher dev_err(&pdev->dev, "could not register any net devices\n"); 335269adcb98SZheyu Ma err = -ENODEV; 3353f7917c00SJeff Kirsher goto out_free_dev; 3354f7917c00SJeff Kirsher } 3355f7917c00SJeff Kirsher 3356f7917c00SJeff Kirsher for_each_port(adapter, i) 3357f7917c00SJeff Kirsher cxgb3_init_iscsi_mac(adapter->port[i]); 3358f7917c00SJeff Kirsher 3359f7917c00SJeff Kirsher /* Driver's ready. Reflect it on LEDs */ 3360f7917c00SJeff Kirsher t3_led_ready(adapter); 3361f7917c00SJeff Kirsher 3362f7917c00SJeff Kirsher if (is_offload(adapter)) { 3363f7917c00SJeff Kirsher __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map); 3364f7917c00SJeff Kirsher cxgb3_adapter_ofld(adapter); 3365f7917c00SJeff Kirsher } 3366f7917c00SJeff Kirsher 3367f7917c00SJeff Kirsher /* See what interrupts we'll be using */ 3368f7917c00SJeff Kirsher if (msi > 1 && cxgb_enable_msix(adapter) == 0) 3369f7917c00SJeff Kirsher adapter->flags |= USING_MSIX; 3370f7917c00SJeff Kirsher else if (msi > 0 && pci_enable_msi(pdev) == 0) 3371f7917c00SJeff Kirsher adapter->flags |= USING_MSI; 3372f7917c00SJeff Kirsher 3373f7917c00SJeff Kirsher set_nqsets(adapter); 3374f7917c00SJeff Kirsher 3375f7917c00SJeff Kirsher err = sysfs_create_group(&adapter->port[0]->dev.kobj, 3376f7917c00SJeff Kirsher &cxgb3_attr_group); 33777c099773SZhouyang Jia if (err) { 33787c099773SZhouyang Jia dev_err(&pdev->dev, "cannot create sysfs group\n"); 33797c099773SZhouyang Jia goto out_close_led; 33807c099773SZhouyang Jia } 3381f7917c00SJeff Kirsher 3382f7917c00SJeff Kirsher print_port_info(adapter, ai); 3383f7917c00SJeff Kirsher return 0; 3384f7917c00SJeff Kirsher 33857c099773SZhouyang Jia out_close_led: 33867c099773SZhouyang Jia t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0); 33877c099773SZhouyang Jia 3388f7917c00SJeff Kirsher out_free_dev: 3389f7917c00SJeff Kirsher iounmap(adapter->regs); 3390f7917c00SJeff Kirsher for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i) 3391f7917c00SJeff Kirsher if (adapter->port[i]) 3392f7917c00SJeff Kirsher free_netdev(adapter->port[i]); 3393f7917c00SJeff Kirsher 3394debea2cdSChristophe JAILLET out_free_adapter_nofail: 3395debea2cdSChristophe JAILLET kfree_skb(adapter->nofail_skb); 3396debea2cdSChristophe JAILLET 3397f7917c00SJeff Kirsher out_free_adapter: 3398f7917c00SJeff Kirsher kfree(adapter); 3399f7917c00SJeff Kirsher 3400f7917c00SJeff Kirsher out_release_regions: 3401f7917c00SJeff Kirsher pci_release_regions(pdev); 3402f7917c00SJeff Kirsher out_disable_device: 3403f7917c00SJeff Kirsher pci_disable_device(pdev); 3404f7917c00SJeff Kirsher out: 3405f7917c00SJeff Kirsher return err; 3406f7917c00SJeff Kirsher } 3407f7917c00SJeff Kirsher 34082109eaabSBill Pemberton static void remove_one(struct pci_dev *pdev) 3409f7917c00SJeff Kirsher { 3410f7917c00SJeff Kirsher struct adapter *adapter = pci_get_drvdata(pdev); 3411f7917c00SJeff Kirsher 3412f7917c00SJeff Kirsher if (adapter) { 3413f7917c00SJeff Kirsher int i; 3414f7917c00SJeff Kirsher 3415f7917c00SJeff Kirsher t3_sge_stop(adapter); 3416f7917c00SJeff Kirsher sysfs_remove_group(&adapter->port[0]->dev.kobj, 3417f7917c00SJeff Kirsher &cxgb3_attr_group); 3418f7917c00SJeff Kirsher 3419f7917c00SJeff Kirsher if (is_offload(adapter)) { 3420f7917c00SJeff Kirsher cxgb3_adapter_unofld(adapter); 3421f7917c00SJeff Kirsher if (test_bit(OFFLOAD_DEVMAP_BIT, 3422f7917c00SJeff Kirsher &adapter->open_device_map)) 3423f7917c00SJeff Kirsher offload_close(&adapter->tdev); 3424f7917c00SJeff Kirsher } 3425f7917c00SJeff Kirsher 3426f7917c00SJeff Kirsher for_each_port(adapter, i) 3427f7917c00SJeff Kirsher if (test_bit(i, &adapter->registered_device_map)) 3428f7917c00SJeff Kirsher unregister_netdev(adapter->port[i]); 3429f7917c00SJeff Kirsher 3430f7917c00SJeff Kirsher t3_stop_sge_timers(adapter); 3431f7917c00SJeff Kirsher t3_free_sge_resources(adapter); 3432f7917c00SJeff Kirsher cxgb_disable_msi(adapter); 3433f7917c00SJeff Kirsher 3434f7917c00SJeff Kirsher for_each_port(adapter, i) 3435f7917c00SJeff Kirsher if (adapter->port[i]) 3436f7917c00SJeff Kirsher free_netdev(adapter->port[i]); 3437f7917c00SJeff Kirsher 3438f7917c00SJeff Kirsher iounmap(adapter->regs); 3439f7917c00SJeff Kirsher kfree_skb(adapter->nofail_skb); 3440f7917c00SJeff Kirsher kfree(adapter); 3441f7917c00SJeff Kirsher pci_release_regions(pdev); 3442f7917c00SJeff Kirsher pci_disable_device(pdev); 3443f7917c00SJeff Kirsher } 3444f7917c00SJeff Kirsher } 3445f7917c00SJeff Kirsher 3446f7917c00SJeff Kirsher static struct pci_driver driver = { 3447f7917c00SJeff Kirsher .name = DRV_NAME, 3448f7917c00SJeff Kirsher .id_table = cxgb3_pci_tbl, 3449f7917c00SJeff Kirsher .probe = init_one, 34502109eaabSBill Pemberton .remove = remove_one, 3451f7917c00SJeff Kirsher .err_handler = &t3_err_handler, 3452f7917c00SJeff Kirsher }; 3453f7917c00SJeff Kirsher 3454f7917c00SJeff Kirsher static int __init cxgb3_init_module(void) 3455f7917c00SJeff Kirsher { 3456f7917c00SJeff Kirsher int ret; 3457f7917c00SJeff Kirsher 3458f7917c00SJeff Kirsher cxgb3_offload_init(); 3459f7917c00SJeff Kirsher 3460f7917c00SJeff Kirsher ret = pci_register_driver(&driver); 3461f7917c00SJeff Kirsher return ret; 3462f7917c00SJeff Kirsher } 3463f7917c00SJeff Kirsher 3464f7917c00SJeff Kirsher static void __exit cxgb3_cleanup_module(void) 3465f7917c00SJeff Kirsher { 3466f7917c00SJeff Kirsher pci_unregister_driver(&driver); 3467f7917c00SJeff Kirsher if (cxgb3_wq) 3468f7917c00SJeff Kirsher destroy_workqueue(cxgb3_wq); 3469f7917c00SJeff Kirsher } 3470f7917c00SJeff Kirsher 3471f7917c00SJeff Kirsher module_init(cxgb3_init_module); 3472f7917c00SJeff Kirsher module_exit(cxgb3_cleanup_module); 3473