1f7917c00SJeff Kirsher /*
2f7917c00SJeff Kirsher  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3f7917c00SJeff Kirsher  *
4f7917c00SJeff Kirsher  * This software is available to you under a choice of one of two
5f7917c00SJeff Kirsher  * licenses.  You may choose to be licensed under the terms of the GNU
6f7917c00SJeff Kirsher  * General Public License (GPL) Version 2, available from the file
7f7917c00SJeff Kirsher  * COPYING in the main directory of this source tree, or the
8f7917c00SJeff Kirsher  * OpenIB.org BSD license below:
9f7917c00SJeff Kirsher  *
10f7917c00SJeff Kirsher  *     Redistribution and use in source and binary forms, with or
11f7917c00SJeff Kirsher  *     without modification, are permitted provided that the following
12f7917c00SJeff Kirsher  *     conditions are met:
13f7917c00SJeff Kirsher  *
14f7917c00SJeff Kirsher  *      - Redistributions of source code must retain the above
15f7917c00SJeff Kirsher  *        copyright notice, this list of conditions and the following
16f7917c00SJeff Kirsher  *        disclaimer.
17f7917c00SJeff Kirsher  *
18f7917c00SJeff Kirsher  *      - Redistributions in binary form must reproduce the above
19f7917c00SJeff Kirsher  *        copyright notice, this list of conditions and the following
20f7917c00SJeff Kirsher  *        disclaimer in the documentation and/or other materials
21f7917c00SJeff Kirsher  *        provided with the distribution.
22f7917c00SJeff Kirsher  *
23f7917c00SJeff Kirsher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24f7917c00SJeff Kirsher  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25f7917c00SJeff Kirsher  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26f7917c00SJeff Kirsher  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27f7917c00SJeff Kirsher  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28f7917c00SJeff Kirsher  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29f7917c00SJeff Kirsher  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30f7917c00SJeff Kirsher  * SOFTWARE.
31f7917c00SJeff Kirsher  */
32f7917c00SJeff Kirsher #include <linux/module.h>
33f7917c00SJeff Kirsher #include <linux/moduleparam.h>
34f7917c00SJeff Kirsher #include <linux/init.h>
35f7917c00SJeff Kirsher #include <linux/pci.h>
36f7917c00SJeff Kirsher #include <linux/dma-mapping.h>
37f7917c00SJeff Kirsher #include <linux/netdevice.h>
38f7917c00SJeff Kirsher #include <linux/etherdevice.h>
39f7917c00SJeff Kirsher #include <linux/if_vlan.h>
40f7917c00SJeff Kirsher #include <linux/mdio.h>
41f7917c00SJeff Kirsher #include <linux/sockios.h>
42f7917c00SJeff Kirsher #include <linux/workqueue.h>
43f7917c00SJeff Kirsher #include <linux/proc_fs.h>
44f7917c00SJeff Kirsher #include <linux/rtnetlink.h>
45f7917c00SJeff Kirsher #include <linux/firmware.h>
46f7917c00SJeff Kirsher #include <linux/log2.h>
47f7917c00SJeff Kirsher #include <linux/stringify.h>
48f7917c00SJeff Kirsher #include <linux/sched.h>
49f7917c00SJeff Kirsher #include <linux/slab.h>
50f7917c00SJeff Kirsher #include <asm/uaccess.h>
51f7917c00SJeff Kirsher 
52f7917c00SJeff Kirsher #include "common.h"
53f7917c00SJeff Kirsher #include "cxgb3_ioctl.h"
54f7917c00SJeff Kirsher #include "regs.h"
55f7917c00SJeff Kirsher #include "cxgb3_offload.h"
56f7917c00SJeff Kirsher #include "version.h"
57f7917c00SJeff Kirsher 
58f7917c00SJeff Kirsher #include "cxgb3_ctl_defs.h"
59f7917c00SJeff Kirsher #include "t3_cpl.h"
60f7917c00SJeff Kirsher #include "firmware_exports.h"
61f7917c00SJeff Kirsher 
62f7917c00SJeff Kirsher enum {
63f7917c00SJeff Kirsher 	MAX_TXQ_ENTRIES = 16384,
64f7917c00SJeff Kirsher 	MAX_CTRL_TXQ_ENTRIES = 1024,
65f7917c00SJeff Kirsher 	MAX_RSPQ_ENTRIES = 16384,
66f7917c00SJeff Kirsher 	MAX_RX_BUFFERS = 16384,
67f7917c00SJeff Kirsher 	MAX_RX_JUMBO_BUFFERS = 16384,
68f7917c00SJeff Kirsher 	MIN_TXQ_ENTRIES = 4,
69f7917c00SJeff Kirsher 	MIN_CTRL_TXQ_ENTRIES = 4,
70f7917c00SJeff Kirsher 	MIN_RSPQ_ENTRIES = 32,
71f7917c00SJeff Kirsher 	MIN_FL_ENTRIES = 32
72f7917c00SJeff Kirsher };
73f7917c00SJeff Kirsher 
74f7917c00SJeff Kirsher #define PORT_MASK ((1 << MAX_NPORTS) - 1)
75f7917c00SJeff Kirsher 
76f7917c00SJeff Kirsher #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
77f7917c00SJeff Kirsher 			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
78f7917c00SJeff Kirsher 			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
79f7917c00SJeff Kirsher 
80f7917c00SJeff Kirsher #define EEPROM_MAGIC 0x38E2F10C
81f7917c00SJeff Kirsher 
82f7917c00SJeff Kirsher #define CH_DEVICE(devid, idx) \
83f7917c00SJeff Kirsher 	{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
84f7917c00SJeff Kirsher 
85f7917c00SJeff Kirsher static DEFINE_PCI_DEVICE_TABLE(cxgb3_pci_tbl) = {
86f7917c00SJeff Kirsher 	CH_DEVICE(0x20, 0),	/* PE9000 */
87f7917c00SJeff Kirsher 	CH_DEVICE(0x21, 1),	/* T302E */
88f7917c00SJeff Kirsher 	CH_DEVICE(0x22, 2),	/* T310E */
89f7917c00SJeff Kirsher 	CH_DEVICE(0x23, 3),	/* T320X */
90f7917c00SJeff Kirsher 	CH_DEVICE(0x24, 1),	/* T302X */
91f7917c00SJeff Kirsher 	CH_DEVICE(0x25, 3),	/* T320E */
92f7917c00SJeff Kirsher 	CH_DEVICE(0x26, 2),	/* T310X */
93f7917c00SJeff Kirsher 	CH_DEVICE(0x30, 2),	/* T3B10 */
94f7917c00SJeff Kirsher 	CH_DEVICE(0x31, 3),	/* T3B20 */
95f7917c00SJeff Kirsher 	CH_DEVICE(0x32, 1),	/* T3B02 */
96f7917c00SJeff Kirsher 	CH_DEVICE(0x35, 6),	/* T3C20-derived T3C10 */
97f7917c00SJeff Kirsher 	CH_DEVICE(0x36, 3),	/* S320E-CR */
98f7917c00SJeff Kirsher 	CH_DEVICE(0x37, 7),	/* N320E-G2 */
99f7917c00SJeff Kirsher 	{0,}
100f7917c00SJeff Kirsher };
101f7917c00SJeff Kirsher 
102f7917c00SJeff Kirsher MODULE_DESCRIPTION(DRV_DESC);
103f7917c00SJeff Kirsher MODULE_AUTHOR("Chelsio Communications");
104f7917c00SJeff Kirsher MODULE_LICENSE("Dual BSD/GPL");
105f7917c00SJeff Kirsher MODULE_VERSION(DRV_VERSION);
106f7917c00SJeff Kirsher MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
107f7917c00SJeff Kirsher 
108f7917c00SJeff Kirsher static int dflt_msg_enable = DFLT_MSG_ENABLE;
109f7917c00SJeff Kirsher 
110f7917c00SJeff Kirsher module_param(dflt_msg_enable, int, 0644);
111f7917c00SJeff Kirsher MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
112f7917c00SJeff Kirsher 
113f7917c00SJeff Kirsher /*
114f7917c00SJeff Kirsher  * The driver uses the best interrupt scheme available on a platform in the
115f7917c00SJeff Kirsher  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
116f7917c00SJeff Kirsher  * of these schemes the driver may consider as follows:
117f7917c00SJeff Kirsher  *
118f7917c00SJeff Kirsher  * msi = 2: choose from among all three options
119f7917c00SJeff Kirsher  * msi = 1: only consider MSI and pin interrupts
120f7917c00SJeff Kirsher  * msi = 0: force pin interrupts
121f7917c00SJeff Kirsher  */
122f7917c00SJeff Kirsher static int msi = 2;
123f7917c00SJeff Kirsher 
124f7917c00SJeff Kirsher module_param(msi, int, 0644);
125f7917c00SJeff Kirsher MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
126f7917c00SJeff Kirsher 
127f7917c00SJeff Kirsher /*
128f7917c00SJeff Kirsher  * The driver enables offload as a default.
129f7917c00SJeff Kirsher  * To disable it, use ofld_disable = 1.
130f7917c00SJeff Kirsher  */
131f7917c00SJeff Kirsher 
132f7917c00SJeff Kirsher static int ofld_disable = 0;
133f7917c00SJeff Kirsher 
134f7917c00SJeff Kirsher module_param(ofld_disable, int, 0644);
135f7917c00SJeff Kirsher MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
136f7917c00SJeff Kirsher 
137f7917c00SJeff Kirsher /*
138f7917c00SJeff Kirsher  * We have work elements that we need to cancel when an interface is taken
139f7917c00SJeff Kirsher  * down.  Normally the work elements would be executed by keventd but that
140f7917c00SJeff Kirsher  * can deadlock because of linkwatch.  If our close method takes the rtnl
141f7917c00SJeff Kirsher  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
142f7917c00SJeff Kirsher  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
143f7917c00SJeff Kirsher  * for our work to complete.  Get our own work queue to solve this.
144f7917c00SJeff Kirsher  */
145f7917c00SJeff Kirsher struct workqueue_struct *cxgb3_wq;
146f7917c00SJeff Kirsher 
147f7917c00SJeff Kirsher /**
148f7917c00SJeff Kirsher  *	link_report - show link status and link speed/duplex
149f7917c00SJeff Kirsher  *	@p: the port whose settings are to be reported
150f7917c00SJeff Kirsher  *
151f7917c00SJeff Kirsher  *	Shows the link status, speed, and duplex of a port.
152f7917c00SJeff Kirsher  */
153f7917c00SJeff Kirsher static void link_report(struct net_device *dev)
154f7917c00SJeff Kirsher {
155f7917c00SJeff Kirsher 	if (!netif_carrier_ok(dev))
156f7917c00SJeff Kirsher 		printk(KERN_INFO "%s: link down\n", dev->name);
157f7917c00SJeff Kirsher 	else {
158f7917c00SJeff Kirsher 		const char *s = "10Mbps";
159f7917c00SJeff Kirsher 		const struct port_info *p = netdev_priv(dev);
160f7917c00SJeff Kirsher 
161f7917c00SJeff Kirsher 		switch (p->link_config.speed) {
162f7917c00SJeff Kirsher 		case SPEED_10000:
163f7917c00SJeff Kirsher 			s = "10Gbps";
164f7917c00SJeff Kirsher 			break;
165f7917c00SJeff Kirsher 		case SPEED_1000:
166f7917c00SJeff Kirsher 			s = "1000Mbps";
167f7917c00SJeff Kirsher 			break;
168f7917c00SJeff Kirsher 		case SPEED_100:
169f7917c00SJeff Kirsher 			s = "100Mbps";
170f7917c00SJeff Kirsher 			break;
171f7917c00SJeff Kirsher 		}
172f7917c00SJeff Kirsher 
173f7917c00SJeff Kirsher 		printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
174f7917c00SJeff Kirsher 		       p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
175f7917c00SJeff Kirsher 	}
176f7917c00SJeff Kirsher }
177f7917c00SJeff Kirsher 
178f7917c00SJeff Kirsher static void enable_tx_fifo_drain(struct adapter *adapter,
179f7917c00SJeff Kirsher 				 struct port_info *pi)
180f7917c00SJeff Kirsher {
181f7917c00SJeff Kirsher 	t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
182f7917c00SJeff Kirsher 			 F_ENDROPPKT);
183f7917c00SJeff Kirsher 	t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
184f7917c00SJeff Kirsher 	t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
185f7917c00SJeff Kirsher 	t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
186f7917c00SJeff Kirsher }
187f7917c00SJeff Kirsher 
188f7917c00SJeff Kirsher static void disable_tx_fifo_drain(struct adapter *adapter,
189f7917c00SJeff Kirsher 				  struct port_info *pi)
190f7917c00SJeff Kirsher {
191f7917c00SJeff Kirsher 	t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
192f7917c00SJeff Kirsher 			 F_ENDROPPKT, 0);
193f7917c00SJeff Kirsher }
194f7917c00SJeff Kirsher 
195f7917c00SJeff Kirsher void t3_os_link_fault(struct adapter *adap, int port_id, int state)
196f7917c00SJeff Kirsher {
197f7917c00SJeff Kirsher 	struct net_device *dev = adap->port[port_id];
198f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
199f7917c00SJeff Kirsher 
200f7917c00SJeff Kirsher 	if (state == netif_carrier_ok(dev))
201f7917c00SJeff Kirsher 		return;
202f7917c00SJeff Kirsher 
203f7917c00SJeff Kirsher 	if (state) {
204f7917c00SJeff Kirsher 		struct cmac *mac = &pi->mac;
205f7917c00SJeff Kirsher 
206f7917c00SJeff Kirsher 		netif_carrier_on(dev);
207f7917c00SJeff Kirsher 
208f7917c00SJeff Kirsher 		disable_tx_fifo_drain(adap, pi);
209f7917c00SJeff Kirsher 
210f7917c00SJeff Kirsher 		/* Clear local faults */
211f7917c00SJeff Kirsher 		t3_xgm_intr_disable(adap, pi->port_id);
212f7917c00SJeff Kirsher 		t3_read_reg(adap, A_XGM_INT_STATUS +
213f7917c00SJeff Kirsher 				    pi->mac.offset);
214f7917c00SJeff Kirsher 		t3_write_reg(adap,
215f7917c00SJeff Kirsher 			     A_XGM_INT_CAUSE + pi->mac.offset,
216f7917c00SJeff Kirsher 			     F_XGM_INT);
217f7917c00SJeff Kirsher 
218f7917c00SJeff Kirsher 		t3_set_reg_field(adap,
219f7917c00SJeff Kirsher 				 A_XGM_INT_ENABLE +
220f7917c00SJeff Kirsher 				 pi->mac.offset,
221f7917c00SJeff Kirsher 				 F_XGM_INT, F_XGM_INT);
222f7917c00SJeff Kirsher 		t3_xgm_intr_enable(adap, pi->port_id);
223f7917c00SJeff Kirsher 
224f7917c00SJeff Kirsher 		t3_mac_enable(mac, MAC_DIRECTION_TX);
225f7917c00SJeff Kirsher 	} else {
226f7917c00SJeff Kirsher 		netif_carrier_off(dev);
227f7917c00SJeff Kirsher 
228f7917c00SJeff Kirsher 		/* Flush TX FIFO */
229f7917c00SJeff Kirsher 		enable_tx_fifo_drain(adap, pi);
230f7917c00SJeff Kirsher 	}
231f7917c00SJeff Kirsher 	link_report(dev);
232f7917c00SJeff Kirsher }
233f7917c00SJeff Kirsher 
234f7917c00SJeff Kirsher /**
235f7917c00SJeff Kirsher  *	t3_os_link_changed - handle link status changes
236f7917c00SJeff Kirsher  *	@adapter: the adapter associated with the link change
237f7917c00SJeff Kirsher  *	@port_id: the port index whose limk status has changed
238f7917c00SJeff Kirsher  *	@link_stat: the new status of the link
239f7917c00SJeff Kirsher  *	@speed: the new speed setting
240f7917c00SJeff Kirsher  *	@duplex: the new duplex setting
241f7917c00SJeff Kirsher  *	@pause: the new flow-control setting
242f7917c00SJeff Kirsher  *
243f7917c00SJeff Kirsher  *	This is the OS-dependent handler for link status changes.  The OS
244f7917c00SJeff Kirsher  *	neutral handler takes care of most of the processing for these events,
245f7917c00SJeff Kirsher  *	then calls this handler for any OS-specific processing.
246f7917c00SJeff Kirsher  */
247f7917c00SJeff Kirsher void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
248f7917c00SJeff Kirsher 			int speed, int duplex, int pause)
249f7917c00SJeff Kirsher {
250f7917c00SJeff Kirsher 	struct net_device *dev = adapter->port[port_id];
251f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
252f7917c00SJeff Kirsher 	struct cmac *mac = &pi->mac;
253f7917c00SJeff Kirsher 
254f7917c00SJeff Kirsher 	/* Skip changes from disabled ports. */
255f7917c00SJeff Kirsher 	if (!netif_running(dev))
256f7917c00SJeff Kirsher 		return;
257f7917c00SJeff Kirsher 
258f7917c00SJeff Kirsher 	if (link_stat != netif_carrier_ok(dev)) {
259f7917c00SJeff Kirsher 		if (link_stat) {
260f7917c00SJeff Kirsher 			disable_tx_fifo_drain(adapter, pi);
261f7917c00SJeff Kirsher 
262f7917c00SJeff Kirsher 			t3_mac_enable(mac, MAC_DIRECTION_RX);
263f7917c00SJeff Kirsher 
264f7917c00SJeff Kirsher 			/* Clear local faults */
265f7917c00SJeff Kirsher 			t3_xgm_intr_disable(adapter, pi->port_id);
266f7917c00SJeff Kirsher 			t3_read_reg(adapter, A_XGM_INT_STATUS +
267f7917c00SJeff Kirsher 				    pi->mac.offset);
268f7917c00SJeff Kirsher 			t3_write_reg(adapter,
269f7917c00SJeff Kirsher 				     A_XGM_INT_CAUSE + pi->mac.offset,
270f7917c00SJeff Kirsher 				     F_XGM_INT);
271f7917c00SJeff Kirsher 
272f7917c00SJeff Kirsher 			t3_set_reg_field(adapter,
273f7917c00SJeff Kirsher 					 A_XGM_INT_ENABLE + pi->mac.offset,
274f7917c00SJeff Kirsher 					 F_XGM_INT, F_XGM_INT);
275f7917c00SJeff Kirsher 			t3_xgm_intr_enable(adapter, pi->port_id);
276f7917c00SJeff Kirsher 
277f7917c00SJeff Kirsher 			netif_carrier_on(dev);
278f7917c00SJeff Kirsher 		} else {
279f7917c00SJeff Kirsher 			netif_carrier_off(dev);
280f7917c00SJeff Kirsher 
281f7917c00SJeff Kirsher 			t3_xgm_intr_disable(adapter, pi->port_id);
282f7917c00SJeff Kirsher 			t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
283f7917c00SJeff Kirsher 			t3_set_reg_field(adapter,
284f7917c00SJeff Kirsher 					 A_XGM_INT_ENABLE + pi->mac.offset,
285f7917c00SJeff Kirsher 					 F_XGM_INT, 0);
286f7917c00SJeff Kirsher 
287f7917c00SJeff Kirsher 			if (is_10G(adapter))
288f7917c00SJeff Kirsher 				pi->phy.ops->power_down(&pi->phy, 1);
289f7917c00SJeff Kirsher 
290f7917c00SJeff Kirsher 			t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
291f7917c00SJeff Kirsher 			t3_mac_disable(mac, MAC_DIRECTION_RX);
292f7917c00SJeff Kirsher 			t3_link_start(&pi->phy, mac, &pi->link_config);
293f7917c00SJeff Kirsher 
294f7917c00SJeff Kirsher 			/* Flush TX FIFO */
295f7917c00SJeff Kirsher 			enable_tx_fifo_drain(adapter, pi);
296f7917c00SJeff Kirsher 		}
297f7917c00SJeff Kirsher 
298f7917c00SJeff Kirsher 		link_report(dev);
299f7917c00SJeff Kirsher 	}
300f7917c00SJeff Kirsher }
301f7917c00SJeff Kirsher 
302f7917c00SJeff Kirsher /**
303f7917c00SJeff Kirsher  *	t3_os_phymod_changed - handle PHY module changes
304f7917c00SJeff Kirsher  *	@phy: the PHY reporting the module change
305f7917c00SJeff Kirsher  *	@mod_type: new module type
306f7917c00SJeff Kirsher  *
307f7917c00SJeff Kirsher  *	This is the OS-dependent handler for PHY module changes.  It is
308f7917c00SJeff Kirsher  *	invoked when a PHY module is removed or inserted for any OS-specific
309f7917c00SJeff Kirsher  *	processing.
310f7917c00SJeff Kirsher  */
311f7917c00SJeff Kirsher void t3_os_phymod_changed(struct adapter *adap, int port_id)
312f7917c00SJeff Kirsher {
313f7917c00SJeff Kirsher 	static const char *mod_str[] = {
314f7917c00SJeff Kirsher 		NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
315f7917c00SJeff Kirsher 	};
316f7917c00SJeff Kirsher 
317f7917c00SJeff Kirsher 	const struct net_device *dev = adap->port[port_id];
318f7917c00SJeff Kirsher 	const struct port_info *pi = netdev_priv(dev);
319f7917c00SJeff Kirsher 
320f7917c00SJeff Kirsher 	if (pi->phy.modtype == phy_modtype_none)
321f7917c00SJeff Kirsher 		printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
322f7917c00SJeff Kirsher 	else
323f7917c00SJeff Kirsher 		printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
324f7917c00SJeff Kirsher 		       mod_str[pi->phy.modtype]);
325f7917c00SJeff Kirsher }
326f7917c00SJeff Kirsher 
327f7917c00SJeff Kirsher static void cxgb_set_rxmode(struct net_device *dev)
328f7917c00SJeff Kirsher {
329f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
330f7917c00SJeff Kirsher 
331f7917c00SJeff Kirsher 	t3_mac_set_rx_mode(&pi->mac, dev);
332f7917c00SJeff Kirsher }
333f7917c00SJeff Kirsher 
334f7917c00SJeff Kirsher /**
335f7917c00SJeff Kirsher  *	link_start - enable a port
336f7917c00SJeff Kirsher  *	@dev: the device to enable
337f7917c00SJeff Kirsher  *
338f7917c00SJeff Kirsher  *	Performs the MAC and PHY actions needed to enable a port.
339f7917c00SJeff Kirsher  */
340f7917c00SJeff Kirsher static void link_start(struct net_device *dev)
341f7917c00SJeff Kirsher {
342f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
343f7917c00SJeff Kirsher 	struct cmac *mac = &pi->mac;
344f7917c00SJeff Kirsher 
345f7917c00SJeff Kirsher 	t3_mac_reset(mac);
346f7917c00SJeff Kirsher 	t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
347f7917c00SJeff Kirsher 	t3_mac_set_mtu(mac, dev->mtu);
348f7917c00SJeff Kirsher 	t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
349f7917c00SJeff Kirsher 	t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
350f7917c00SJeff Kirsher 	t3_mac_set_rx_mode(mac, dev);
351f7917c00SJeff Kirsher 	t3_link_start(&pi->phy, mac, &pi->link_config);
352f7917c00SJeff Kirsher 	t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
353f7917c00SJeff Kirsher }
354f7917c00SJeff Kirsher 
355f7917c00SJeff Kirsher static inline void cxgb_disable_msi(struct adapter *adapter)
356f7917c00SJeff Kirsher {
357f7917c00SJeff Kirsher 	if (adapter->flags & USING_MSIX) {
358f7917c00SJeff Kirsher 		pci_disable_msix(adapter->pdev);
359f7917c00SJeff Kirsher 		adapter->flags &= ~USING_MSIX;
360f7917c00SJeff Kirsher 	} else if (adapter->flags & USING_MSI) {
361f7917c00SJeff Kirsher 		pci_disable_msi(adapter->pdev);
362f7917c00SJeff Kirsher 		adapter->flags &= ~USING_MSI;
363f7917c00SJeff Kirsher 	}
364f7917c00SJeff Kirsher }
365f7917c00SJeff Kirsher 
366f7917c00SJeff Kirsher /*
367f7917c00SJeff Kirsher  * Interrupt handler for asynchronous events used with MSI-X.
368f7917c00SJeff Kirsher  */
369f7917c00SJeff Kirsher static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
370f7917c00SJeff Kirsher {
371f7917c00SJeff Kirsher 	t3_slow_intr_handler(cookie);
372f7917c00SJeff Kirsher 	return IRQ_HANDLED;
373f7917c00SJeff Kirsher }
374f7917c00SJeff Kirsher 
375f7917c00SJeff Kirsher /*
376f7917c00SJeff Kirsher  * Name the MSI-X interrupts.
377f7917c00SJeff Kirsher  */
378f7917c00SJeff Kirsher static void name_msix_vecs(struct adapter *adap)
379f7917c00SJeff Kirsher {
380f7917c00SJeff Kirsher 	int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
381f7917c00SJeff Kirsher 
382f7917c00SJeff Kirsher 	snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
383f7917c00SJeff Kirsher 	adap->msix_info[0].desc[n] = 0;
384f7917c00SJeff Kirsher 
385f7917c00SJeff Kirsher 	for_each_port(adap, j) {
386f7917c00SJeff Kirsher 		struct net_device *d = adap->port[j];
387f7917c00SJeff Kirsher 		const struct port_info *pi = netdev_priv(d);
388f7917c00SJeff Kirsher 
389f7917c00SJeff Kirsher 		for (i = 0; i < pi->nqsets; i++, msi_idx++) {
390f7917c00SJeff Kirsher 			snprintf(adap->msix_info[msi_idx].desc, n,
391f7917c00SJeff Kirsher 				 "%s-%d", d->name, pi->first_qset + i);
392f7917c00SJeff Kirsher 			adap->msix_info[msi_idx].desc[n] = 0;
393f7917c00SJeff Kirsher 		}
394f7917c00SJeff Kirsher 	}
395f7917c00SJeff Kirsher }
396f7917c00SJeff Kirsher 
397f7917c00SJeff Kirsher static int request_msix_data_irqs(struct adapter *adap)
398f7917c00SJeff Kirsher {
399f7917c00SJeff Kirsher 	int i, j, err, qidx = 0;
400f7917c00SJeff Kirsher 
401f7917c00SJeff Kirsher 	for_each_port(adap, i) {
402f7917c00SJeff Kirsher 		int nqsets = adap2pinfo(adap, i)->nqsets;
403f7917c00SJeff Kirsher 
404f7917c00SJeff Kirsher 		for (j = 0; j < nqsets; ++j) {
405f7917c00SJeff Kirsher 			err = request_irq(adap->msix_info[qidx + 1].vec,
406f7917c00SJeff Kirsher 					  t3_intr_handler(adap,
407f7917c00SJeff Kirsher 							  adap->sge.qs[qidx].
408f7917c00SJeff Kirsher 							  rspq.polling), 0,
409f7917c00SJeff Kirsher 					  adap->msix_info[qidx + 1].desc,
410f7917c00SJeff Kirsher 					  &adap->sge.qs[qidx]);
411f7917c00SJeff Kirsher 			if (err) {
412f7917c00SJeff Kirsher 				while (--qidx >= 0)
413f7917c00SJeff Kirsher 					free_irq(adap->msix_info[qidx + 1].vec,
414f7917c00SJeff Kirsher 						 &adap->sge.qs[qidx]);
415f7917c00SJeff Kirsher 				return err;
416f7917c00SJeff Kirsher 			}
417f7917c00SJeff Kirsher 			qidx++;
418f7917c00SJeff Kirsher 		}
419f7917c00SJeff Kirsher 	}
420f7917c00SJeff Kirsher 	return 0;
421f7917c00SJeff Kirsher }
422f7917c00SJeff Kirsher 
423f7917c00SJeff Kirsher static void free_irq_resources(struct adapter *adapter)
424f7917c00SJeff Kirsher {
425f7917c00SJeff Kirsher 	if (adapter->flags & USING_MSIX) {
426f7917c00SJeff Kirsher 		int i, n = 0;
427f7917c00SJeff Kirsher 
428f7917c00SJeff Kirsher 		free_irq(adapter->msix_info[0].vec, adapter);
429f7917c00SJeff Kirsher 		for_each_port(adapter, i)
430f7917c00SJeff Kirsher 			n += adap2pinfo(adapter, i)->nqsets;
431f7917c00SJeff Kirsher 
432f7917c00SJeff Kirsher 		for (i = 0; i < n; ++i)
433f7917c00SJeff Kirsher 			free_irq(adapter->msix_info[i + 1].vec,
434f7917c00SJeff Kirsher 				 &adapter->sge.qs[i]);
435f7917c00SJeff Kirsher 	} else
436f7917c00SJeff Kirsher 		free_irq(adapter->pdev->irq, adapter);
437f7917c00SJeff Kirsher }
438f7917c00SJeff Kirsher 
439f7917c00SJeff Kirsher static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
440f7917c00SJeff Kirsher 			      unsigned long n)
441f7917c00SJeff Kirsher {
442f7917c00SJeff Kirsher 	int attempts = 10;
443f7917c00SJeff Kirsher 
444f7917c00SJeff Kirsher 	while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
445f7917c00SJeff Kirsher 		if (!--attempts)
446f7917c00SJeff Kirsher 			return -ETIMEDOUT;
447f7917c00SJeff Kirsher 		msleep(10);
448f7917c00SJeff Kirsher 	}
449f7917c00SJeff Kirsher 	return 0;
450f7917c00SJeff Kirsher }
451f7917c00SJeff Kirsher 
452f7917c00SJeff Kirsher static int init_tp_parity(struct adapter *adap)
453f7917c00SJeff Kirsher {
454f7917c00SJeff Kirsher 	int i;
455f7917c00SJeff Kirsher 	struct sk_buff *skb;
456f7917c00SJeff Kirsher 	struct cpl_set_tcb_field *greq;
457f7917c00SJeff Kirsher 	unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
458f7917c00SJeff Kirsher 
459f7917c00SJeff Kirsher 	t3_tp_set_offload_mode(adap, 1);
460f7917c00SJeff Kirsher 
461f7917c00SJeff Kirsher 	for (i = 0; i < 16; i++) {
462f7917c00SJeff Kirsher 		struct cpl_smt_write_req *req;
463f7917c00SJeff Kirsher 
464f7917c00SJeff Kirsher 		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
465f7917c00SJeff Kirsher 		if (!skb)
466f7917c00SJeff Kirsher 			skb = adap->nofail_skb;
467f7917c00SJeff Kirsher 		if (!skb)
468f7917c00SJeff Kirsher 			goto alloc_skb_fail;
469f7917c00SJeff Kirsher 
470f7917c00SJeff Kirsher 		req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
471f7917c00SJeff Kirsher 		memset(req, 0, sizeof(*req));
472f7917c00SJeff Kirsher 		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
473f7917c00SJeff Kirsher 		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
474f7917c00SJeff Kirsher 		req->mtu_idx = NMTUS - 1;
475f7917c00SJeff Kirsher 		req->iff = i;
476f7917c00SJeff Kirsher 		t3_mgmt_tx(adap, skb);
477f7917c00SJeff Kirsher 		if (skb == adap->nofail_skb) {
478f7917c00SJeff Kirsher 			await_mgmt_replies(adap, cnt, i + 1);
479f7917c00SJeff Kirsher 			adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
480f7917c00SJeff Kirsher 			if (!adap->nofail_skb)
481f7917c00SJeff Kirsher 				goto alloc_skb_fail;
482f7917c00SJeff Kirsher 		}
483f7917c00SJeff Kirsher 	}
484f7917c00SJeff Kirsher 
485f7917c00SJeff Kirsher 	for (i = 0; i < 2048; i++) {
486f7917c00SJeff Kirsher 		struct cpl_l2t_write_req *req;
487f7917c00SJeff Kirsher 
488f7917c00SJeff Kirsher 		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
489f7917c00SJeff Kirsher 		if (!skb)
490f7917c00SJeff Kirsher 			skb = adap->nofail_skb;
491f7917c00SJeff Kirsher 		if (!skb)
492f7917c00SJeff Kirsher 			goto alloc_skb_fail;
493f7917c00SJeff Kirsher 
494f7917c00SJeff Kirsher 		req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
495f7917c00SJeff Kirsher 		memset(req, 0, sizeof(*req));
496f7917c00SJeff Kirsher 		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
497f7917c00SJeff Kirsher 		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
498f7917c00SJeff Kirsher 		req->params = htonl(V_L2T_W_IDX(i));
499f7917c00SJeff Kirsher 		t3_mgmt_tx(adap, skb);
500f7917c00SJeff Kirsher 		if (skb == adap->nofail_skb) {
501f7917c00SJeff Kirsher 			await_mgmt_replies(adap, cnt, 16 + i + 1);
502f7917c00SJeff Kirsher 			adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
503f7917c00SJeff Kirsher 			if (!adap->nofail_skb)
504f7917c00SJeff Kirsher 				goto alloc_skb_fail;
505f7917c00SJeff Kirsher 		}
506f7917c00SJeff Kirsher 	}
507f7917c00SJeff Kirsher 
508f7917c00SJeff Kirsher 	for (i = 0; i < 2048; i++) {
509f7917c00SJeff Kirsher 		struct cpl_rte_write_req *req;
510f7917c00SJeff Kirsher 
511f7917c00SJeff Kirsher 		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
512f7917c00SJeff Kirsher 		if (!skb)
513f7917c00SJeff Kirsher 			skb = adap->nofail_skb;
514f7917c00SJeff Kirsher 		if (!skb)
515f7917c00SJeff Kirsher 			goto alloc_skb_fail;
516f7917c00SJeff Kirsher 
517f7917c00SJeff Kirsher 		req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
518f7917c00SJeff Kirsher 		memset(req, 0, sizeof(*req));
519f7917c00SJeff Kirsher 		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
520f7917c00SJeff Kirsher 		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
521f7917c00SJeff Kirsher 		req->l2t_idx = htonl(V_L2T_W_IDX(i));
522f7917c00SJeff Kirsher 		t3_mgmt_tx(adap, skb);
523f7917c00SJeff Kirsher 		if (skb == adap->nofail_skb) {
524f7917c00SJeff Kirsher 			await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
525f7917c00SJeff Kirsher 			adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
526f7917c00SJeff Kirsher 			if (!adap->nofail_skb)
527f7917c00SJeff Kirsher 				goto alloc_skb_fail;
528f7917c00SJeff Kirsher 		}
529f7917c00SJeff Kirsher 	}
530f7917c00SJeff Kirsher 
531f7917c00SJeff Kirsher 	skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
532f7917c00SJeff Kirsher 	if (!skb)
533f7917c00SJeff Kirsher 		skb = adap->nofail_skb;
534f7917c00SJeff Kirsher 	if (!skb)
535f7917c00SJeff Kirsher 		goto alloc_skb_fail;
536f7917c00SJeff Kirsher 
537f7917c00SJeff Kirsher 	greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
538f7917c00SJeff Kirsher 	memset(greq, 0, sizeof(*greq));
539f7917c00SJeff Kirsher 	greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
540f7917c00SJeff Kirsher 	OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
541f7917c00SJeff Kirsher 	greq->mask = cpu_to_be64(1);
542f7917c00SJeff Kirsher 	t3_mgmt_tx(adap, skb);
543f7917c00SJeff Kirsher 
544f7917c00SJeff Kirsher 	i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
545f7917c00SJeff Kirsher 	if (skb == adap->nofail_skb) {
546f7917c00SJeff Kirsher 		i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
547f7917c00SJeff Kirsher 		adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
548f7917c00SJeff Kirsher 	}
549f7917c00SJeff Kirsher 
550f7917c00SJeff Kirsher 	t3_tp_set_offload_mode(adap, 0);
551f7917c00SJeff Kirsher 	return i;
552f7917c00SJeff Kirsher 
553f7917c00SJeff Kirsher alloc_skb_fail:
554f7917c00SJeff Kirsher 	t3_tp_set_offload_mode(adap, 0);
555f7917c00SJeff Kirsher 	return -ENOMEM;
556f7917c00SJeff Kirsher }
557f7917c00SJeff Kirsher 
558f7917c00SJeff Kirsher /**
559f7917c00SJeff Kirsher  *	setup_rss - configure RSS
560f7917c00SJeff Kirsher  *	@adap: the adapter
561f7917c00SJeff Kirsher  *
562f7917c00SJeff Kirsher  *	Sets up RSS to distribute packets to multiple receive queues.  We
563f7917c00SJeff Kirsher  *	configure the RSS CPU lookup table to distribute to the number of HW
564f7917c00SJeff Kirsher  *	receive queues, and the response queue lookup table to narrow that
565f7917c00SJeff Kirsher  *	down to the response queues actually configured for each port.
566f7917c00SJeff Kirsher  *	We always configure the RSS mapping for two ports since the mapping
567f7917c00SJeff Kirsher  *	table has plenty of entries.
568f7917c00SJeff Kirsher  */
569f7917c00SJeff Kirsher static void setup_rss(struct adapter *adap)
570f7917c00SJeff Kirsher {
571f7917c00SJeff Kirsher 	int i;
572f7917c00SJeff Kirsher 	unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
573f7917c00SJeff Kirsher 	unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
574f7917c00SJeff Kirsher 	u8 cpus[SGE_QSETS + 1];
575f7917c00SJeff Kirsher 	u16 rspq_map[RSS_TABLE_SIZE];
576f7917c00SJeff Kirsher 
577f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; ++i)
578f7917c00SJeff Kirsher 		cpus[i] = i;
579f7917c00SJeff Kirsher 	cpus[SGE_QSETS] = 0xff;	/* terminator */
580f7917c00SJeff Kirsher 
581f7917c00SJeff Kirsher 	for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
582f7917c00SJeff Kirsher 		rspq_map[i] = i % nq0;
583f7917c00SJeff Kirsher 		rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
584f7917c00SJeff Kirsher 	}
585f7917c00SJeff Kirsher 
586f7917c00SJeff Kirsher 	t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
587f7917c00SJeff Kirsher 		      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
588f7917c00SJeff Kirsher 		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
589f7917c00SJeff Kirsher }
590f7917c00SJeff Kirsher 
591f7917c00SJeff Kirsher static void ring_dbs(struct adapter *adap)
592f7917c00SJeff Kirsher {
593f7917c00SJeff Kirsher 	int i, j;
594f7917c00SJeff Kirsher 
595f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; i++) {
596f7917c00SJeff Kirsher 		struct sge_qset *qs = &adap->sge.qs[i];
597f7917c00SJeff Kirsher 
598f7917c00SJeff Kirsher 		if (qs->adap)
599f7917c00SJeff Kirsher 			for (j = 0; j < SGE_TXQ_PER_SET; j++)
600f7917c00SJeff Kirsher 				t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
601f7917c00SJeff Kirsher 	}
602f7917c00SJeff Kirsher }
603f7917c00SJeff Kirsher 
604f7917c00SJeff Kirsher static void init_napi(struct adapter *adap)
605f7917c00SJeff Kirsher {
606f7917c00SJeff Kirsher 	int i;
607f7917c00SJeff Kirsher 
608f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; i++) {
609f7917c00SJeff Kirsher 		struct sge_qset *qs = &adap->sge.qs[i];
610f7917c00SJeff Kirsher 
611f7917c00SJeff Kirsher 		if (qs->adap)
612f7917c00SJeff Kirsher 			netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
613f7917c00SJeff Kirsher 				       64);
614f7917c00SJeff Kirsher 	}
615f7917c00SJeff Kirsher 
616f7917c00SJeff Kirsher 	/*
617f7917c00SJeff Kirsher 	 * netif_napi_add() can be called only once per napi_struct because it
618f7917c00SJeff Kirsher 	 * adds each new napi_struct to a list.  Be careful not to call it a
619f7917c00SJeff Kirsher 	 * second time, e.g., during EEH recovery, by making a note of it.
620f7917c00SJeff Kirsher 	 */
621f7917c00SJeff Kirsher 	adap->flags |= NAPI_INIT;
622f7917c00SJeff Kirsher }
623f7917c00SJeff Kirsher 
624f7917c00SJeff Kirsher /*
625f7917c00SJeff Kirsher  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
626f7917c00SJeff Kirsher  * both netdevices representing interfaces and the dummy ones for the extra
627f7917c00SJeff Kirsher  * queues.
628f7917c00SJeff Kirsher  */
629f7917c00SJeff Kirsher static void quiesce_rx(struct adapter *adap)
630f7917c00SJeff Kirsher {
631f7917c00SJeff Kirsher 	int i;
632f7917c00SJeff Kirsher 
633f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; i++)
634f7917c00SJeff Kirsher 		if (adap->sge.qs[i].adap)
635f7917c00SJeff Kirsher 			napi_disable(&adap->sge.qs[i].napi);
636f7917c00SJeff Kirsher }
637f7917c00SJeff Kirsher 
638f7917c00SJeff Kirsher static void enable_all_napi(struct adapter *adap)
639f7917c00SJeff Kirsher {
640f7917c00SJeff Kirsher 	int i;
641f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; i++)
642f7917c00SJeff Kirsher 		if (adap->sge.qs[i].adap)
643f7917c00SJeff Kirsher 			napi_enable(&adap->sge.qs[i].napi);
644f7917c00SJeff Kirsher }
645f7917c00SJeff Kirsher 
646f7917c00SJeff Kirsher /**
647f7917c00SJeff Kirsher  *	setup_sge_qsets - configure SGE Tx/Rx/response queues
648f7917c00SJeff Kirsher  *	@adap: the adapter
649f7917c00SJeff Kirsher  *
650f7917c00SJeff Kirsher  *	Determines how many sets of SGE queues to use and initializes them.
651f7917c00SJeff Kirsher  *	We support multiple queue sets per port if we have MSI-X, otherwise
652f7917c00SJeff Kirsher  *	just one queue set per port.
653f7917c00SJeff Kirsher  */
654f7917c00SJeff Kirsher static int setup_sge_qsets(struct adapter *adap)
655f7917c00SJeff Kirsher {
656f7917c00SJeff Kirsher 	int i, j, err, irq_idx = 0, qset_idx = 0;
657f7917c00SJeff Kirsher 	unsigned int ntxq = SGE_TXQ_PER_SET;
658f7917c00SJeff Kirsher 
659f7917c00SJeff Kirsher 	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
660f7917c00SJeff Kirsher 		irq_idx = -1;
661f7917c00SJeff Kirsher 
662f7917c00SJeff Kirsher 	for_each_port(adap, i) {
663f7917c00SJeff Kirsher 		struct net_device *dev = adap->port[i];
664f7917c00SJeff Kirsher 		struct port_info *pi = netdev_priv(dev);
665f7917c00SJeff Kirsher 
666f7917c00SJeff Kirsher 		pi->qs = &adap->sge.qs[pi->first_qset];
667f7917c00SJeff Kirsher 		for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
668f7917c00SJeff Kirsher 			err = t3_sge_alloc_qset(adap, qset_idx, 1,
669f7917c00SJeff Kirsher 				(adap->flags & USING_MSIX) ? qset_idx + 1 :
670f7917c00SJeff Kirsher 							     irq_idx,
671f7917c00SJeff Kirsher 				&adap->params.sge.qset[qset_idx], ntxq, dev,
672f7917c00SJeff Kirsher 				netdev_get_tx_queue(dev, j));
673f7917c00SJeff Kirsher 			if (err) {
674f7917c00SJeff Kirsher 				t3_free_sge_resources(adap);
675f7917c00SJeff Kirsher 				return err;
676f7917c00SJeff Kirsher 			}
677f7917c00SJeff Kirsher 		}
678f7917c00SJeff Kirsher 	}
679f7917c00SJeff Kirsher 
680f7917c00SJeff Kirsher 	return 0;
681f7917c00SJeff Kirsher }
682f7917c00SJeff Kirsher 
683f7917c00SJeff Kirsher static ssize_t attr_show(struct device *d, char *buf,
684f7917c00SJeff Kirsher 			 ssize_t(*format) (struct net_device *, char *))
685f7917c00SJeff Kirsher {
686f7917c00SJeff Kirsher 	ssize_t len;
687f7917c00SJeff Kirsher 
688f7917c00SJeff Kirsher 	/* Synchronize with ioctls that may shut down the device */
689f7917c00SJeff Kirsher 	rtnl_lock();
690f7917c00SJeff Kirsher 	len = (*format) (to_net_dev(d), buf);
691f7917c00SJeff Kirsher 	rtnl_unlock();
692f7917c00SJeff Kirsher 	return len;
693f7917c00SJeff Kirsher }
694f7917c00SJeff Kirsher 
695f7917c00SJeff Kirsher static ssize_t attr_store(struct device *d,
696f7917c00SJeff Kirsher 			  const char *buf, size_t len,
697f7917c00SJeff Kirsher 			  ssize_t(*set) (struct net_device *, unsigned int),
698f7917c00SJeff Kirsher 			  unsigned int min_val, unsigned int max_val)
699f7917c00SJeff Kirsher {
700f7917c00SJeff Kirsher 	char *endp;
701f7917c00SJeff Kirsher 	ssize_t ret;
702f7917c00SJeff Kirsher 	unsigned int val;
703f7917c00SJeff Kirsher 
704f7917c00SJeff Kirsher 	if (!capable(CAP_NET_ADMIN))
705f7917c00SJeff Kirsher 		return -EPERM;
706f7917c00SJeff Kirsher 
707f7917c00SJeff Kirsher 	val = simple_strtoul(buf, &endp, 0);
708f7917c00SJeff Kirsher 	if (endp == buf || val < min_val || val > max_val)
709f7917c00SJeff Kirsher 		return -EINVAL;
710f7917c00SJeff Kirsher 
711f7917c00SJeff Kirsher 	rtnl_lock();
712f7917c00SJeff Kirsher 	ret = (*set) (to_net_dev(d), val);
713f7917c00SJeff Kirsher 	if (!ret)
714f7917c00SJeff Kirsher 		ret = len;
715f7917c00SJeff Kirsher 	rtnl_unlock();
716f7917c00SJeff Kirsher 	return ret;
717f7917c00SJeff Kirsher }
718f7917c00SJeff Kirsher 
719f7917c00SJeff Kirsher #define CXGB3_SHOW(name, val_expr) \
720f7917c00SJeff Kirsher static ssize_t format_##name(struct net_device *dev, char *buf) \
721f7917c00SJeff Kirsher { \
722f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev); \
723f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter; \
724f7917c00SJeff Kirsher 	return sprintf(buf, "%u\n", val_expr); \
725f7917c00SJeff Kirsher } \
726f7917c00SJeff Kirsher static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
727f7917c00SJeff Kirsher 			   char *buf) \
728f7917c00SJeff Kirsher { \
729f7917c00SJeff Kirsher 	return attr_show(d, buf, format_##name); \
730f7917c00SJeff Kirsher }
731f7917c00SJeff Kirsher 
732f7917c00SJeff Kirsher static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
733f7917c00SJeff Kirsher {
734f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
735f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
736f7917c00SJeff Kirsher 	int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
737f7917c00SJeff Kirsher 
738f7917c00SJeff Kirsher 	if (adap->flags & FULL_INIT_DONE)
739f7917c00SJeff Kirsher 		return -EBUSY;
740f7917c00SJeff Kirsher 	if (val && adap->params.rev == 0)
741f7917c00SJeff Kirsher 		return -EINVAL;
742f7917c00SJeff Kirsher 	if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
743f7917c00SJeff Kirsher 	    min_tids)
744f7917c00SJeff Kirsher 		return -EINVAL;
745f7917c00SJeff Kirsher 	adap->params.mc5.nfilters = val;
746f7917c00SJeff Kirsher 	return 0;
747f7917c00SJeff Kirsher }
748f7917c00SJeff Kirsher 
749f7917c00SJeff Kirsher static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
750f7917c00SJeff Kirsher 			      const char *buf, size_t len)
751f7917c00SJeff Kirsher {
752f7917c00SJeff Kirsher 	return attr_store(d, buf, len, set_nfilters, 0, ~0);
753f7917c00SJeff Kirsher }
754f7917c00SJeff Kirsher 
755f7917c00SJeff Kirsher static ssize_t set_nservers(struct net_device *dev, unsigned int val)
756f7917c00SJeff Kirsher {
757f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
758f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
759f7917c00SJeff Kirsher 
760f7917c00SJeff Kirsher 	if (adap->flags & FULL_INIT_DONE)
761f7917c00SJeff Kirsher 		return -EBUSY;
762f7917c00SJeff Kirsher 	if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
763f7917c00SJeff Kirsher 	    MC5_MIN_TIDS)
764f7917c00SJeff Kirsher 		return -EINVAL;
765f7917c00SJeff Kirsher 	adap->params.mc5.nservers = val;
766f7917c00SJeff Kirsher 	return 0;
767f7917c00SJeff Kirsher }
768f7917c00SJeff Kirsher 
769f7917c00SJeff Kirsher static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
770f7917c00SJeff Kirsher 			      const char *buf, size_t len)
771f7917c00SJeff Kirsher {
772f7917c00SJeff Kirsher 	return attr_store(d, buf, len, set_nservers, 0, ~0);
773f7917c00SJeff Kirsher }
774f7917c00SJeff Kirsher 
775f7917c00SJeff Kirsher #define CXGB3_ATTR_R(name, val_expr) \
776f7917c00SJeff Kirsher CXGB3_SHOW(name, val_expr) \
777f7917c00SJeff Kirsher static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
778f7917c00SJeff Kirsher 
779f7917c00SJeff Kirsher #define CXGB3_ATTR_RW(name, val_expr, store_method) \
780f7917c00SJeff Kirsher CXGB3_SHOW(name, val_expr) \
781f7917c00SJeff Kirsher static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
782f7917c00SJeff Kirsher 
783f7917c00SJeff Kirsher CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
784f7917c00SJeff Kirsher CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
785f7917c00SJeff Kirsher CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
786f7917c00SJeff Kirsher 
787f7917c00SJeff Kirsher static struct attribute *cxgb3_attrs[] = {
788f7917c00SJeff Kirsher 	&dev_attr_cam_size.attr,
789f7917c00SJeff Kirsher 	&dev_attr_nfilters.attr,
790f7917c00SJeff Kirsher 	&dev_attr_nservers.attr,
791f7917c00SJeff Kirsher 	NULL
792f7917c00SJeff Kirsher };
793f7917c00SJeff Kirsher 
794f7917c00SJeff Kirsher static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
795f7917c00SJeff Kirsher 
796f7917c00SJeff Kirsher static ssize_t tm_attr_show(struct device *d,
797f7917c00SJeff Kirsher 			    char *buf, int sched)
798f7917c00SJeff Kirsher {
799f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(to_net_dev(d));
800f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
801f7917c00SJeff Kirsher 	unsigned int v, addr, bpt, cpt;
802f7917c00SJeff Kirsher 	ssize_t len;
803f7917c00SJeff Kirsher 
804f7917c00SJeff Kirsher 	addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
805f7917c00SJeff Kirsher 	rtnl_lock();
806f7917c00SJeff Kirsher 	t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
807f7917c00SJeff Kirsher 	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
808f7917c00SJeff Kirsher 	if (sched & 1)
809f7917c00SJeff Kirsher 		v >>= 16;
810f7917c00SJeff Kirsher 	bpt = (v >> 8) & 0xff;
811f7917c00SJeff Kirsher 	cpt = v & 0xff;
812f7917c00SJeff Kirsher 	if (!cpt)
813f7917c00SJeff Kirsher 		len = sprintf(buf, "disabled\n");
814f7917c00SJeff Kirsher 	else {
815f7917c00SJeff Kirsher 		v = (adap->params.vpd.cclk * 1000) / cpt;
816f7917c00SJeff Kirsher 		len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
817f7917c00SJeff Kirsher 	}
818f7917c00SJeff Kirsher 	rtnl_unlock();
819f7917c00SJeff Kirsher 	return len;
820f7917c00SJeff Kirsher }
821f7917c00SJeff Kirsher 
822f7917c00SJeff Kirsher static ssize_t tm_attr_store(struct device *d,
823f7917c00SJeff Kirsher 			     const char *buf, size_t len, int sched)
824f7917c00SJeff Kirsher {
825f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(to_net_dev(d));
826f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
827f7917c00SJeff Kirsher 	unsigned int val;
828f7917c00SJeff Kirsher 	char *endp;
829f7917c00SJeff Kirsher 	ssize_t ret;
830f7917c00SJeff Kirsher 
831f7917c00SJeff Kirsher 	if (!capable(CAP_NET_ADMIN))
832f7917c00SJeff Kirsher 		return -EPERM;
833f7917c00SJeff Kirsher 
834f7917c00SJeff Kirsher 	val = simple_strtoul(buf, &endp, 0);
835f7917c00SJeff Kirsher 	if (endp == buf || val > 10000000)
836f7917c00SJeff Kirsher 		return -EINVAL;
837f7917c00SJeff Kirsher 
838f7917c00SJeff Kirsher 	rtnl_lock();
839f7917c00SJeff Kirsher 	ret = t3_config_sched(adap, val, sched);
840f7917c00SJeff Kirsher 	if (!ret)
841f7917c00SJeff Kirsher 		ret = len;
842f7917c00SJeff Kirsher 	rtnl_unlock();
843f7917c00SJeff Kirsher 	return ret;
844f7917c00SJeff Kirsher }
845f7917c00SJeff Kirsher 
846f7917c00SJeff Kirsher #define TM_ATTR(name, sched) \
847f7917c00SJeff Kirsher static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
848f7917c00SJeff Kirsher 			   char *buf) \
849f7917c00SJeff Kirsher { \
850f7917c00SJeff Kirsher 	return tm_attr_show(d, buf, sched); \
851f7917c00SJeff Kirsher } \
852f7917c00SJeff Kirsher static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
853f7917c00SJeff Kirsher 			    const char *buf, size_t len) \
854f7917c00SJeff Kirsher { \
855f7917c00SJeff Kirsher 	return tm_attr_store(d, buf, len, sched); \
856f7917c00SJeff Kirsher } \
857f7917c00SJeff Kirsher static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
858f7917c00SJeff Kirsher 
859f7917c00SJeff Kirsher TM_ATTR(sched0, 0);
860f7917c00SJeff Kirsher TM_ATTR(sched1, 1);
861f7917c00SJeff Kirsher TM_ATTR(sched2, 2);
862f7917c00SJeff Kirsher TM_ATTR(sched3, 3);
863f7917c00SJeff Kirsher TM_ATTR(sched4, 4);
864f7917c00SJeff Kirsher TM_ATTR(sched5, 5);
865f7917c00SJeff Kirsher TM_ATTR(sched6, 6);
866f7917c00SJeff Kirsher TM_ATTR(sched7, 7);
867f7917c00SJeff Kirsher 
868f7917c00SJeff Kirsher static struct attribute *offload_attrs[] = {
869f7917c00SJeff Kirsher 	&dev_attr_sched0.attr,
870f7917c00SJeff Kirsher 	&dev_attr_sched1.attr,
871f7917c00SJeff Kirsher 	&dev_attr_sched2.attr,
872f7917c00SJeff Kirsher 	&dev_attr_sched3.attr,
873f7917c00SJeff Kirsher 	&dev_attr_sched4.attr,
874f7917c00SJeff Kirsher 	&dev_attr_sched5.attr,
875f7917c00SJeff Kirsher 	&dev_attr_sched6.attr,
876f7917c00SJeff Kirsher 	&dev_attr_sched7.attr,
877f7917c00SJeff Kirsher 	NULL
878f7917c00SJeff Kirsher };
879f7917c00SJeff Kirsher 
880f7917c00SJeff Kirsher static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
881f7917c00SJeff Kirsher 
882f7917c00SJeff Kirsher /*
883f7917c00SJeff Kirsher  * Sends an sk_buff to an offload queue driver
884f7917c00SJeff Kirsher  * after dealing with any active network taps.
885f7917c00SJeff Kirsher  */
886f7917c00SJeff Kirsher static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
887f7917c00SJeff Kirsher {
888f7917c00SJeff Kirsher 	int ret;
889f7917c00SJeff Kirsher 
890f7917c00SJeff Kirsher 	local_bh_disable();
891f7917c00SJeff Kirsher 	ret = t3_offload_tx(tdev, skb);
892f7917c00SJeff Kirsher 	local_bh_enable();
893f7917c00SJeff Kirsher 	return ret;
894f7917c00SJeff Kirsher }
895f7917c00SJeff Kirsher 
896f7917c00SJeff Kirsher static int write_smt_entry(struct adapter *adapter, int idx)
897f7917c00SJeff Kirsher {
898f7917c00SJeff Kirsher 	struct cpl_smt_write_req *req;
899f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(adapter->port[idx]);
900f7917c00SJeff Kirsher 	struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
901f7917c00SJeff Kirsher 
902f7917c00SJeff Kirsher 	if (!skb)
903f7917c00SJeff Kirsher 		return -ENOMEM;
904f7917c00SJeff Kirsher 
905f7917c00SJeff Kirsher 	req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
906f7917c00SJeff Kirsher 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
907f7917c00SJeff Kirsher 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
908f7917c00SJeff Kirsher 	req->mtu_idx = NMTUS - 1;	/* should be 0 but there's a T3 bug */
909f7917c00SJeff Kirsher 	req->iff = idx;
910f7917c00SJeff Kirsher 	memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
911f7917c00SJeff Kirsher 	memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
912f7917c00SJeff Kirsher 	skb->priority = 1;
913f7917c00SJeff Kirsher 	offload_tx(&adapter->tdev, skb);
914f7917c00SJeff Kirsher 	return 0;
915f7917c00SJeff Kirsher }
916f7917c00SJeff Kirsher 
917f7917c00SJeff Kirsher static int init_smt(struct adapter *adapter)
918f7917c00SJeff Kirsher {
919f7917c00SJeff Kirsher 	int i;
920f7917c00SJeff Kirsher 
921f7917c00SJeff Kirsher 	for_each_port(adapter, i)
922f7917c00SJeff Kirsher 	    write_smt_entry(adapter, i);
923f7917c00SJeff Kirsher 	return 0;
924f7917c00SJeff Kirsher }
925f7917c00SJeff Kirsher 
926f7917c00SJeff Kirsher static void init_port_mtus(struct adapter *adapter)
927f7917c00SJeff Kirsher {
928f7917c00SJeff Kirsher 	unsigned int mtus = adapter->port[0]->mtu;
929f7917c00SJeff Kirsher 
930f7917c00SJeff Kirsher 	if (adapter->port[1])
931f7917c00SJeff Kirsher 		mtus |= adapter->port[1]->mtu << 16;
932f7917c00SJeff Kirsher 	t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
933f7917c00SJeff Kirsher }
934f7917c00SJeff Kirsher 
935f7917c00SJeff Kirsher static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
936f7917c00SJeff Kirsher 			      int hi, int port)
937f7917c00SJeff Kirsher {
938f7917c00SJeff Kirsher 	struct sk_buff *skb;
939f7917c00SJeff Kirsher 	struct mngt_pktsched_wr *req;
940f7917c00SJeff Kirsher 	int ret;
941f7917c00SJeff Kirsher 
942f7917c00SJeff Kirsher 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
943f7917c00SJeff Kirsher 	if (!skb)
944f7917c00SJeff Kirsher 		skb = adap->nofail_skb;
945f7917c00SJeff Kirsher 	if (!skb)
946f7917c00SJeff Kirsher 		return -ENOMEM;
947f7917c00SJeff Kirsher 
948f7917c00SJeff Kirsher 	req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
949f7917c00SJeff Kirsher 	req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
950f7917c00SJeff Kirsher 	req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
951f7917c00SJeff Kirsher 	req->sched = sched;
952f7917c00SJeff Kirsher 	req->idx = qidx;
953f7917c00SJeff Kirsher 	req->min = lo;
954f7917c00SJeff Kirsher 	req->max = hi;
955f7917c00SJeff Kirsher 	req->binding = port;
956f7917c00SJeff Kirsher 	ret = t3_mgmt_tx(adap, skb);
957f7917c00SJeff Kirsher 	if (skb == adap->nofail_skb) {
958f7917c00SJeff Kirsher 		adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
959f7917c00SJeff Kirsher 					     GFP_KERNEL);
960f7917c00SJeff Kirsher 		if (!adap->nofail_skb)
961f7917c00SJeff Kirsher 			ret = -ENOMEM;
962f7917c00SJeff Kirsher 	}
963f7917c00SJeff Kirsher 
964f7917c00SJeff Kirsher 	return ret;
965f7917c00SJeff Kirsher }
966f7917c00SJeff Kirsher 
967f7917c00SJeff Kirsher static int bind_qsets(struct adapter *adap)
968f7917c00SJeff Kirsher {
969f7917c00SJeff Kirsher 	int i, j, err = 0;
970f7917c00SJeff Kirsher 
971f7917c00SJeff Kirsher 	for_each_port(adap, i) {
972f7917c00SJeff Kirsher 		const struct port_info *pi = adap2pinfo(adap, i);
973f7917c00SJeff Kirsher 
974f7917c00SJeff Kirsher 		for (j = 0; j < pi->nqsets; ++j) {
975f7917c00SJeff Kirsher 			int ret = send_pktsched_cmd(adap, 1,
976f7917c00SJeff Kirsher 						    pi->first_qset + j, -1,
977f7917c00SJeff Kirsher 						    -1, i);
978f7917c00SJeff Kirsher 			if (ret)
979f7917c00SJeff Kirsher 				err = ret;
980f7917c00SJeff Kirsher 		}
981f7917c00SJeff Kirsher 	}
982f7917c00SJeff Kirsher 
983f7917c00SJeff Kirsher 	return err;
984f7917c00SJeff Kirsher }
985f7917c00SJeff Kirsher 
986f7917c00SJeff Kirsher #define FW_VERSION __stringify(FW_VERSION_MAJOR) "."			\
987f7917c00SJeff Kirsher 	__stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
988f7917c00SJeff Kirsher #define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
989f7917c00SJeff Kirsher #define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "."		\
990f7917c00SJeff Kirsher 	__stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
991f7917c00SJeff Kirsher #define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
992f7917c00SJeff Kirsher #define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
993f7917c00SJeff Kirsher #define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
994f7917c00SJeff Kirsher #define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
995f7917c00SJeff Kirsher MODULE_FIRMWARE(FW_FNAME);
996f7917c00SJeff Kirsher MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
997f7917c00SJeff Kirsher MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
998f7917c00SJeff Kirsher MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
999f7917c00SJeff Kirsher MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
1000f7917c00SJeff Kirsher MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
1001f7917c00SJeff Kirsher 
1002f7917c00SJeff Kirsher static inline const char *get_edc_fw_name(int edc_idx)
1003f7917c00SJeff Kirsher {
1004f7917c00SJeff Kirsher 	const char *fw_name = NULL;
1005f7917c00SJeff Kirsher 
1006f7917c00SJeff Kirsher 	switch (edc_idx) {
1007f7917c00SJeff Kirsher 	case EDC_OPT_AEL2005:
1008f7917c00SJeff Kirsher 		fw_name = AEL2005_OPT_EDC_NAME;
1009f7917c00SJeff Kirsher 		break;
1010f7917c00SJeff Kirsher 	case EDC_TWX_AEL2005:
1011f7917c00SJeff Kirsher 		fw_name = AEL2005_TWX_EDC_NAME;
1012f7917c00SJeff Kirsher 		break;
1013f7917c00SJeff Kirsher 	case EDC_TWX_AEL2020:
1014f7917c00SJeff Kirsher 		fw_name = AEL2020_TWX_EDC_NAME;
1015f7917c00SJeff Kirsher 		break;
1016f7917c00SJeff Kirsher 	}
1017f7917c00SJeff Kirsher 	return fw_name;
1018f7917c00SJeff Kirsher }
1019f7917c00SJeff Kirsher 
1020f7917c00SJeff Kirsher int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1021f7917c00SJeff Kirsher {
1022f7917c00SJeff Kirsher 	struct adapter *adapter = phy->adapter;
1023f7917c00SJeff Kirsher 	const struct firmware *fw;
1024f7917c00SJeff Kirsher 	char buf[64];
1025f7917c00SJeff Kirsher 	u32 csum;
1026f7917c00SJeff Kirsher 	const __be32 *p;
1027f7917c00SJeff Kirsher 	u16 *cache = phy->phy_cache;
1028f7917c00SJeff Kirsher 	int i, ret;
1029f7917c00SJeff Kirsher 
1030f7917c00SJeff Kirsher 	snprintf(buf, sizeof(buf), get_edc_fw_name(edc_idx));
1031f7917c00SJeff Kirsher 
1032f7917c00SJeff Kirsher 	ret = request_firmware(&fw, buf, &adapter->pdev->dev);
1033f7917c00SJeff Kirsher 	if (ret < 0) {
1034f7917c00SJeff Kirsher 		dev_err(&adapter->pdev->dev,
1035f7917c00SJeff Kirsher 			"could not upgrade firmware: unable to load %s\n",
1036f7917c00SJeff Kirsher 			buf);
1037f7917c00SJeff Kirsher 		return ret;
1038f7917c00SJeff Kirsher 	}
1039f7917c00SJeff Kirsher 
1040f7917c00SJeff Kirsher 	/* check size, take checksum in account */
1041f7917c00SJeff Kirsher 	if (fw->size > size + 4) {
1042f7917c00SJeff Kirsher 		CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1043f7917c00SJeff Kirsher 		       (unsigned int)fw->size, size + 4);
1044f7917c00SJeff Kirsher 		ret = -EINVAL;
1045f7917c00SJeff Kirsher 	}
1046f7917c00SJeff Kirsher 
1047f7917c00SJeff Kirsher 	/* compute checksum */
1048f7917c00SJeff Kirsher 	p = (const __be32 *)fw->data;
1049f7917c00SJeff Kirsher 	for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1050f7917c00SJeff Kirsher 		csum += ntohl(p[i]);
1051f7917c00SJeff Kirsher 
1052f7917c00SJeff Kirsher 	if (csum != 0xffffffff) {
1053f7917c00SJeff Kirsher 		CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1054f7917c00SJeff Kirsher 		       csum);
1055f7917c00SJeff Kirsher 		ret = -EINVAL;
1056f7917c00SJeff Kirsher 	}
1057f7917c00SJeff Kirsher 
1058f7917c00SJeff Kirsher 	for (i = 0; i < size / 4 ; i++) {
1059f7917c00SJeff Kirsher 		*cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1060f7917c00SJeff Kirsher 		*cache++ = be32_to_cpu(p[i]) & 0xffff;
1061f7917c00SJeff Kirsher 	}
1062f7917c00SJeff Kirsher 
1063f7917c00SJeff Kirsher 	release_firmware(fw);
1064f7917c00SJeff Kirsher 
1065f7917c00SJeff Kirsher 	return ret;
1066f7917c00SJeff Kirsher }
1067f7917c00SJeff Kirsher 
1068f7917c00SJeff Kirsher static int upgrade_fw(struct adapter *adap)
1069f7917c00SJeff Kirsher {
1070f7917c00SJeff Kirsher 	int ret;
1071f7917c00SJeff Kirsher 	const struct firmware *fw;
1072f7917c00SJeff Kirsher 	struct device *dev = &adap->pdev->dev;
1073f7917c00SJeff Kirsher 
1074f7917c00SJeff Kirsher 	ret = request_firmware(&fw, FW_FNAME, dev);
1075f7917c00SJeff Kirsher 	if (ret < 0) {
1076f7917c00SJeff Kirsher 		dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1077f7917c00SJeff Kirsher 			FW_FNAME);
1078f7917c00SJeff Kirsher 		return ret;
1079f7917c00SJeff Kirsher 	}
1080f7917c00SJeff Kirsher 	ret = t3_load_fw(adap, fw->data, fw->size);
1081f7917c00SJeff Kirsher 	release_firmware(fw);
1082f7917c00SJeff Kirsher 
1083f7917c00SJeff Kirsher 	if (ret == 0)
1084f7917c00SJeff Kirsher 		dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1085f7917c00SJeff Kirsher 			 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1086f7917c00SJeff Kirsher 	else
1087f7917c00SJeff Kirsher 		dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1088f7917c00SJeff Kirsher 			FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1089f7917c00SJeff Kirsher 
1090f7917c00SJeff Kirsher 	return ret;
1091f7917c00SJeff Kirsher }
1092f7917c00SJeff Kirsher 
1093f7917c00SJeff Kirsher static inline char t3rev2char(struct adapter *adapter)
1094f7917c00SJeff Kirsher {
1095f7917c00SJeff Kirsher 	char rev = 0;
1096f7917c00SJeff Kirsher 
1097f7917c00SJeff Kirsher 	switch(adapter->params.rev) {
1098f7917c00SJeff Kirsher 	case T3_REV_B:
1099f7917c00SJeff Kirsher 	case T3_REV_B2:
1100f7917c00SJeff Kirsher 		rev = 'b';
1101f7917c00SJeff Kirsher 		break;
1102f7917c00SJeff Kirsher 	case T3_REV_C:
1103f7917c00SJeff Kirsher 		rev = 'c';
1104f7917c00SJeff Kirsher 		break;
1105f7917c00SJeff Kirsher 	}
1106f7917c00SJeff Kirsher 	return rev;
1107f7917c00SJeff Kirsher }
1108f7917c00SJeff Kirsher 
1109f7917c00SJeff Kirsher static int update_tpsram(struct adapter *adap)
1110f7917c00SJeff Kirsher {
1111f7917c00SJeff Kirsher 	const struct firmware *tpsram;
1112f7917c00SJeff Kirsher 	char buf[64];
1113f7917c00SJeff Kirsher 	struct device *dev = &adap->pdev->dev;
1114f7917c00SJeff Kirsher 	int ret;
1115f7917c00SJeff Kirsher 	char rev;
1116f7917c00SJeff Kirsher 
1117f7917c00SJeff Kirsher 	rev = t3rev2char(adap);
1118f7917c00SJeff Kirsher 	if (!rev)
1119f7917c00SJeff Kirsher 		return 0;
1120f7917c00SJeff Kirsher 
1121f7917c00SJeff Kirsher 	snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1122f7917c00SJeff Kirsher 
1123f7917c00SJeff Kirsher 	ret = request_firmware(&tpsram, buf, dev);
1124f7917c00SJeff Kirsher 	if (ret < 0) {
1125f7917c00SJeff Kirsher 		dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1126f7917c00SJeff Kirsher 			buf);
1127f7917c00SJeff Kirsher 		return ret;
1128f7917c00SJeff Kirsher 	}
1129f7917c00SJeff Kirsher 
1130f7917c00SJeff Kirsher 	ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1131f7917c00SJeff Kirsher 	if (ret)
1132f7917c00SJeff Kirsher 		goto release_tpsram;
1133f7917c00SJeff Kirsher 
1134f7917c00SJeff Kirsher 	ret = t3_set_proto_sram(adap, tpsram->data);
1135f7917c00SJeff Kirsher 	if (ret == 0)
1136f7917c00SJeff Kirsher 		dev_info(dev,
1137f7917c00SJeff Kirsher 			 "successful update of protocol engine "
1138f7917c00SJeff Kirsher 			 "to %d.%d.%d\n",
1139f7917c00SJeff Kirsher 			 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1140f7917c00SJeff Kirsher 	else
1141f7917c00SJeff Kirsher 		dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1142f7917c00SJeff Kirsher 			TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1143f7917c00SJeff Kirsher 	if (ret)
1144f7917c00SJeff Kirsher 		dev_err(dev, "loading protocol SRAM failed\n");
1145f7917c00SJeff Kirsher 
1146f7917c00SJeff Kirsher release_tpsram:
1147f7917c00SJeff Kirsher 	release_firmware(tpsram);
1148f7917c00SJeff Kirsher 
1149f7917c00SJeff Kirsher 	return ret;
1150f7917c00SJeff Kirsher }
1151f7917c00SJeff Kirsher 
1152f7917c00SJeff Kirsher /**
1153f7917c00SJeff Kirsher  *	cxgb_up - enable the adapter
1154f7917c00SJeff Kirsher  *	@adapter: adapter being enabled
1155f7917c00SJeff Kirsher  *
1156f7917c00SJeff Kirsher  *	Called when the first port is enabled, this function performs the
1157f7917c00SJeff Kirsher  *	actions necessary to make an adapter operational, such as completing
1158f7917c00SJeff Kirsher  *	the initialization of HW modules, and enabling interrupts.
1159f7917c00SJeff Kirsher  *
1160f7917c00SJeff Kirsher  *	Must be called with the rtnl lock held.
1161f7917c00SJeff Kirsher  */
1162f7917c00SJeff Kirsher static int cxgb_up(struct adapter *adap)
1163f7917c00SJeff Kirsher {
1164f7917c00SJeff Kirsher 	int err;
1165f7917c00SJeff Kirsher 
1166f7917c00SJeff Kirsher 	if (!(adap->flags & FULL_INIT_DONE)) {
1167f7917c00SJeff Kirsher 		err = t3_check_fw_version(adap);
1168f7917c00SJeff Kirsher 		if (err == -EINVAL) {
1169f7917c00SJeff Kirsher 			err = upgrade_fw(adap);
1170f7917c00SJeff Kirsher 			CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1171f7917c00SJeff Kirsher 				FW_VERSION_MAJOR, FW_VERSION_MINOR,
1172f7917c00SJeff Kirsher 				FW_VERSION_MICRO, err ? "failed" : "succeeded");
1173f7917c00SJeff Kirsher 		}
1174f7917c00SJeff Kirsher 
1175f7917c00SJeff Kirsher 		err = t3_check_tpsram_version(adap);
1176f7917c00SJeff Kirsher 		if (err == -EINVAL) {
1177f7917c00SJeff Kirsher 			err = update_tpsram(adap);
1178f7917c00SJeff Kirsher 			CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1179f7917c00SJeff Kirsher 				TP_VERSION_MAJOR, TP_VERSION_MINOR,
1180f7917c00SJeff Kirsher 				TP_VERSION_MICRO, err ? "failed" : "succeeded");
1181f7917c00SJeff Kirsher 		}
1182f7917c00SJeff Kirsher 
1183f7917c00SJeff Kirsher 		/*
1184f7917c00SJeff Kirsher 		 * Clear interrupts now to catch errors if t3_init_hw fails.
1185f7917c00SJeff Kirsher 		 * We clear them again later as initialization may trigger
1186f7917c00SJeff Kirsher 		 * conditions that can interrupt.
1187f7917c00SJeff Kirsher 		 */
1188f7917c00SJeff Kirsher 		t3_intr_clear(adap);
1189f7917c00SJeff Kirsher 
1190f7917c00SJeff Kirsher 		err = t3_init_hw(adap, 0);
1191f7917c00SJeff Kirsher 		if (err)
1192f7917c00SJeff Kirsher 			goto out;
1193f7917c00SJeff Kirsher 
1194f7917c00SJeff Kirsher 		t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1195f7917c00SJeff Kirsher 		t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1196f7917c00SJeff Kirsher 
1197f7917c00SJeff Kirsher 		err = setup_sge_qsets(adap);
1198f7917c00SJeff Kirsher 		if (err)
1199f7917c00SJeff Kirsher 			goto out;
1200f7917c00SJeff Kirsher 
1201f7917c00SJeff Kirsher 		setup_rss(adap);
1202f7917c00SJeff Kirsher 		if (!(adap->flags & NAPI_INIT))
1203f7917c00SJeff Kirsher 			init_napi(adap);
1204f7917c00SJeff Kirsher 
1205f7917c00SJeff Kirsher 		t3_start_sge_timers(adap);
1206f7917c00SJeff Kirsher 		adap->flags |= FULL_INIT_DONE;
1207f7917c00SJeff Kirsher 	}
1208f7917c00SJeff Kirsher 
1209f7917c00SJeff Kirsher 	t3_intr_clear(adap);
1210f7917c00SJeff Kirsher 
1211f7917c00SJeff Kirsher 	if (adap->flags & USING_MSIX) {
1212f7917c00SJeff Kirsher 		name_msix_vecs(adap);
1213f7917c00SJeff Kirsher 		err = request_irq(adap->msix_info[0].vec,
1214f7917c00SJeff Kirsher 				  t3_async_intr_handler, 0,
1215f7917c00SJeff Kirsher 				  adap->msix_info[0].desc, adap);
1216f7917c00SJeff Kirsher 		if (err)
1217f7917c00SJeff Kirsher 			goto irq_err;
1218f7917c00SJeff Kirsher 
1219f7917c00SJeff Kirsher 		err = request_msix_data_irqs(adap);
1220f7917c00SJeff Kirsher 		if (err) {
1221f7917c00SJeff Kirsher 			free_irq(adap->msix_info[0].vec, adap);
1222f7917c00SJeff Kirsher 			goto irq_err;
1223f7917c00SJeff Kirsher 		}
1224f7917c00SJeff Kirsher 	} else if ((err = request_irq(adap->pdev->irq,
1225f7917c00SJeff Kirsher 				      t3_intr_handler(adap,
1226f7917c00SJeff Kirsher 						      adap->sge.qs[0].rspq.
1227f7917c00SJeff Kirsher 						      polling),
1228f7917c00SJeff Kirsher 				      (adap->flags & USING_MSI) ?
1229f7917c00SJeff Kirsher 				       0 : IRQF_SHARED,
1230f7917c00SJeff Kirsher 				      adap->name, adap)))
1231f7917c00SJeff Kirsher 		goto irq_err;
1232f7917c00SJeff Kirsher 
1233f7917c00SJeff Kirsher 	enable_all_napi(adap);
1234f7917c00SJeff Kirsher 	t3_sge_start(adap);
1235f7917c00SJeff Kirsher 	t3_intr_enable(adap);
1236f7917c00SJeff Kirsher 
1237f7917c00SJeff Kirsher 	if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1238f7917c00SJeff Kirsher 	    is_offload(adap) && init_tp_parity(adap) == 0)
1239f7917c00SJeff Kirsher 		adap->flags |= TP_PARITY_INIT;
1240f7917c00SJeff Kirsher 
1241f7917c00SJeff Kirsher 	if (adap->flags & TP_PARITY_INIT) {
1242f7917c00SJeff Kirsher 		t3_write_reg(adap, A_TP_INT_CAUSE,
1243f7917c00SJeff Kirsher 			     F_CMCACHEPERR | F_ARPLUTPERR);
1244f7917c00SJeff Kirsher 		t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1245f7917c00SJeff Kirsher 	}
1246f7917c00SJeff Kirsher 
1247f7917c00SJeff Kirsher 	if (!(adap->flags & QUEUES_BOUND)) {
1248f7917c00SJeff Kirsher 		int ret = bind_qsets(adap);
1249f7917c00SJeff Kirsher 
1250f7917c00SJeff Kirsher 		if (ret < 0) {
1251f7917c00SJeff Kirsher 			CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1252f7917c00SJeff Kirsher 			t3_intr_disable(adap);
1253f7917c00SJeff Kirsher 			free_irq_resources(adap);
1254f7917c00SJeff Kirsher 			err = ret;
1255f7917c00SJeff Kirsher 			goto out;
1256f7917c00SJeff Kirsher 		}
1257f7917c00SJeff Kirsher 		adap->flags |= QUEUES_BOUND;
1258f7917c00SJeff Kirsher 	}
1259f7917c00SJeff Kirsher 
1260f7917c00SJeff Kirsher out:
1261f7917c00SJeff Kirsher 	return err;
1262f7917c00SJeff Kirsher irq_err:
1263f7917c00SJeff Kirsher 	CH_ERR(adap, "request_irq failed, err %d\n", err);
1264f7917c00SJeff Kirsher 	goto out;
1265f7917c00SJeff Kirsher }
1266f7917c00SJeff Kirsher 
1267f7917c00SJeff Kirsher /*
1268f7917c00SJeff Kirsher  * Release resources when all the ports and offloading have been stopped.
1269f7917c00SJeff Kirsher  */
1270f7917c00SJeff Kirsher static void cxgb_down(struct adapter *adapter, int on_wq)
1271f7917c00SJeff Kirsher {
1272f7917c00SJeff Kirsher 	t3_sge_stop(adapter);
1273f7917c00SJeff Kirsher 	spin_lock_irq(&adapter->work_lock);	/* sync with PHY intr task */
1274f7917c00SJeff Kirsher 	t3_intr_disable(adapter);
1275f7917c00SJeff Kirsher 	spin_unlock_irq(&adapter->work_lock);
1276f7917c00SJeff Kirsher 
1277f7917c00SJeff Kirsher 	free_irq_resources(adapter);
1278f7917c00SJeff Kirsher 	quiesce_rx(adapter);
1279f7917c00SJeff Kirsher 	t3_sge_stop(adapter);
1280f7917c00SJeff Kirsher 	if (!on_wq)
1281f7917c00SJeff Kirsher 		flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1282f7917c00SJeff Kirsher }
1283f7917c00SJeff Kirsher 
1284f7917c00SJeff Kirsher static void schedule_chk_task(struct adapter *adap)
1285f7917c00SJeff Kirsher {
1286f7917c00SJeff Kirsher 	unsigned int timeo;
1287f7917c00SJeff Kirsher 
1288f7917c00SJeff Kirsher 	timeo = adap->params.linkpoll_period ?
1289f7917c00SJeff Kirsher 	    (HZ * adap->params.linkpoll_period) / 10 :
1290f7917c00SJeff Kirsher 	    adap->params.stats_update_period * HZ;
1291f7917c00SJeff Kirsher 	if (timeo)
1292f7917c00SJeff Kirsher 		queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1293f7917c00SJeff Kirsher }
1294f7917c00SJeff Kirsher 
1295f7917c00SJeff Kirsher static int offload_open(struct net_device *dev)
1296f7917c00SJeff Kirsher {
1297f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1298f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
1299f7917c00SJeff Kirsher 	struct t3cdev *tdev = dev2t3cdev(dev);
1300f7917c00SJeff Kirsher 	int adap_up = adapter->open_device_map & PORT_MASK;
1301f7917c00SJeff Kirsher 	int err;
1302f7917c00SJeff Kirsher 
1303f7917c00SJeff Kirsher 	if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1304f7917c00SJeff Kirsher 		return 0;
1305f7917c00SJeff Kirsher 
1306f7917c00SJeff Kirsher 	if (!adap_up && (err = cxgb_up(adapter)) < 0)
1307f7917c00SJeff Kirsher 		goto out;
1308f7917c00SJeff Kirsher 
1309f7917c00SJeff Kirsher 	t3_tp_set_offload_mode(adapter, 1);
1310f7917c00SJeff Kirsher 	tdev->lldev = adapter->port[0];
1311f7917c00SJeff Kirsher 	err = cxgb3_offload_activate(adapter);
1312f7917c00SJeff Kirsher 	if (err)
1313f7917c00SJeff Kirsher 		goto out;
1314f7917c00SJeff Kirsher 
1315f7917c00SJeff Kirsher 	init_port_mtus(adapter);
1316f7917c00SJeff Kirsher 	t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1317f7917c00SJeff Kirsher 		     adapter->params.b_wnd,
1318f7917c00SJeff Kirsher 		     adapter->params.rev == 0 ?
1319f7917c00SJeff Kirsher 		     adapter->port[0]->mtu : 0xffff);
1320f7917c00SJeff Kirsher 	init_smt(adapter);
1321f7917c00SJeff Kirsher 
1322f7917c00SJeff Kirsher 	if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1323f7917c00SJeff Kirsher 		dev_dbg(&dev->dev, "cannot create sysfs group\n");
1324f7917c00SJeff Kirsher 
1325f7917c00SJeff Kirsher 	/* Call back all registered clients */
1326f7917c00SJeff Kirsher 	cxgb3_add_clients(tdev);
1327f7917c00SJeff Kirsher 
1328f7917c00SJeff Kirsher out:
1329f7917c00SJeff Kirsher 	/* restore them in case the offload module has changed them */
1330f7917c00SJeff Kirsher 	if (err) {
1331f7917c00SJeff Kirsher 		t3_tp_set_offload_mode(adapter, 0);
1332f7917c00SJeff Kirsher 		clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1333f7917c00SJeff Kirsher 		cxgb3_set_dummy_ops(tdev);
1334f7917c00SJeff Kirsher 	}
1335f7917c00SJeff Kirsher 	return err;
1336f7917c00SJeff Kirsher }
1337f7917c00SJeff Kirsher 
1338f7917c00SJeff Kirsher static int offload_close(struct t3cdev *tdev)
1339f7917c00SJeff Kirsher {
1340f7917c00SJeff Kirsher 	struct adapter *adapter = tdev2adap(tdev);
1341f7917c00SJeff Kirsher 	struct t3c_data *td = T3C_DATA(tdev);
1342f7917c00SJeff Kirsher 
1343f7917c00SJeff Kirsher 	if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1344f7917c00SJeff Kirsher 		return 0;
1345f7917c00SJeff Kirsher 
1346f7917c00SJeff Kirsher 	/* Call back all registered clients */
1347f7917c00SJeff Kirsher 	cxgb3_remove_clients(tdev);
1348f7917c00SJeff Kirsher 
1349f7917c00SJeff Kirsher 	sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1350f7917c00SJeff Kirsher 
1351f7917c00SJeff Kirsher 	/* Flush work scheduled while releasing TIDs */
1352f7917c00SJeff Kirsher 	flush_work_sync(&td->tid_release_task);
1353f7917c00SJeff Kirsher 
1354f7917c00SJeff Kirsher 	tdev->lldev = NULL;
1355f7917c00SJeff Kirsher 	cxgb3_set_dummy_ops(tdev);
1356f7917c00SJeff Kirsher 	t3_tp_set_offload_mode(adapter, 0);
1357f7917c00SJeff Kirsher 	clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1358f7917c00SJeff Kirsher 
1359f7917c00SJeff Kirsher 	if (!adapter->open_device_map)
1360f7917c00SJeff Kirsher 		cxgb_down(adapter, 0);
1361f7917c00SJeff Kirsher 
1362f7917c00SJeff Kirsher 	cxgb3_offload_deactivate(adapter);
1363f7917c00SJeff Kirsher 	return 0;
1364f7917c00SJeff Kirsher }
1365f7917c00SJeff Kirsher 
1366f7917c00SJeff Kirsher static int cxgb_open(struct net_device *dev)
1367f7917c00SJeff Kirsher {
1368f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1369f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
1370f7917c00SJeff Kirsher 	int other_ports = adapter->open_device_map & PORT_MASK;
1371f7917c00SJeff Kirsher 	int err;
1372f7917c00SJeff Kirsher 
1373f7917c00SJeff Kirsher 	if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1374f7917c00SJeff Kirsher 		return err;
1375f7917c00SJeff Kirsher 
1376f7917c00SJeff Kirsher 	set_bit(pi->port_id, &adapter->open_device_map);
1377f7917c00SJeff Kirsher 	if (is_offload(adapter) && !ofld_disable) {
1378f7917c00SJeff Kirsher 		err = offload_open(dev);
1379f7917c00SJeff Kirsher 		if (err)
1380f7917c00SJeff Kirsher 			printk(KERN_WARNING
1381f7917c00SJeff Kirsher 			       "Could not initialize offload capabilities\n");
1382f7917c00SJeff Kirsher 	}
1383f7917c00SJeff Kirsher 
1384f7917c00SJeff Kirsher 	netif_set_real_num_tx_queues(dev, pi->nqsets);
1385f7917c00SJeff Kirsher 	err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1386f7917c00SJeff Kirsher 	if (err)
1387f7917c00SJeff Kirsher 		return err;
1388f7917c00SJeff Kirsher 	link_start(dev);
1389f7917c00SJeff Kirsher 	t3_port_intr_enable(adapter, pi->port_id);
1390f7917c00SJeff Kirsher 	netif_tx_start_all_queues(dev);
1391f7917c00SJeff Kirsher 	if (!other_ports)
1392f7917c00SJeff Kirsher 		schedule_chk_task(adapter);
1393f7917c00SJeff Kirsher 
1394f7917c00SJeff Kirsher 	cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1395f7917c00SJeff Kirsher 	return 0;
1396f7917c00SJeff Kirsher }
1397f7917c00SJeff Kirsher 
1398f7917c00SJeff Kirsher static int __cxgb_close(struct net_device *dev, int on_wq)
1399f7917c00SJeff Kirsher {
1400f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1401f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
1402f7917c00SJeff Kirsher 
1403f7917c00SJeff Kirsher 
1404f7917c00SJeff Kirsher 	if (!adapter->open_device_map)
1405f7917c00SJeff Kirsher 		return 0;
1406f7917c00SJeff Kirsher 
1407f7917c00SJeff Kirsher 	/* Stop link fault interrupts */
1408f7917c00SJeff Kirsher 	t3_xgm_intr_disable(adapter, pi->port_id);
1409f7917c00SJeff Kirsher 	t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1410f7917c00SJeff Kirsher 
1411f7917c00SJeff Kirsher 	t3_port_intr_disable(adapter, pi->port_id);
1412f7917c00SJeff Kirsher 	netif_tx_stop_all_queues(dev);
1413f7917c00SJeff Kirsher 	pi->phy.ops->power_down(&pi->phy, 1);
1414f7917c00SJeff Kirsher 	netif_carrier_off(dev);
1415f7917c00SJeff Kirsher 	t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1416f7917c00SJeff Kirsher 
1417f7917c00SJeff Kirsher 	spin_lock_irq(&adapter->work_lock);	/* sync with update task */
1418f7917c00SJeff Kirsher 	clear_bit(pi->port_id, &adapter->open_device_map);
1419f7917c00SJeff Kirsher 	spin_unlock_irq(&adapter->work_lock);
1420f7917c00SJeff Kirsher 
1421f7917c00SJeff Kirsher 	if (!(adapter->open_device_map & PORT_MASK))
1422f7917c00SJeff Kirsher 		cancel_delayed_work_sync(&adapter->adap_check_task);
1423f7917c00SJeff Kirsher 
1424f7917c00SJeff Kirsher 	if (!adapter->open_device_map)
1425f7917c00SJeff Kirsher 		cxgb_down(adapter, on_wq);
1426f7917c00SJeff Kirsher 
1427f7917c00SJeff Kirsher 	cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1428f7917c00SJeff Kirsher 	return 0;
1429f7917c00SJeff Kirsher }
1430f7917c00SJeff Kirsher 
1431f7917c00SJeff Kirsher static int cxgb_close(struct net_device *dev)
1432f7917c00SJeff Kirsher {
1433f7917c00SJeff Kirsher 	return __cxgb_close(dev, 0);
1434f7917c00SJeff Kirsher }
1435f7917c00SJeff Kirsher 
1436f7917c00SJeff Kirsher static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1437f7917c00SJeff Kirsher {
1438f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1439f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
1440f7917c00SJeff Kirsher 	struct net_device_stats *ns = &pi->netstats;
1441f7917c00SJeff Kirsher 	const struct mac_stats *pstats;
1442f7917c00SJeff Kirsher 
1443f7917c00SJeff Kirsher 	spin_lock(&adapter->stats_lock);
1444f7917c00SJeff Kirsher 	pstats = t3_mac_update_stats(&pi->mac);
1445f7917c00SJeff Kirsher 	spin_unlock(&adapter->stats_lock);
1446f7917c00SJeff Kirsher 
1447f7917c00SJeff Kirsher 	ns->tx_bytes = pstats->tx_octets;
1448f7917c00SJeff Kirsher 	ns->tx_packets = pstats->tx_frames;
1449f7917c00SJeff Kirsher 	ns->rx_bytes = pstats->rx_octets;
1450f7917c00SJeff Kirsher 	ns->rx_packets = pstats->rx_frames;
1451f7917c00SJeff Kirsher 	ns->multicast = pstats->rx_mcast_frames;
1452f7917c00SJeff Kirsher 
1453f7917c00SJeff Kirsher 	ns->tx_errors = pstats->tx_underrun;
1454f7917c00SJeff Kirsher 	ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1455f7917c00SJeff Kirsher 	    pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1456f7917c00SJeff Kirsher 	    pstats->rx_fifo_ovfl;
1457f7917c00SJeff Kirsher 
1458f7917c00SJeff Kirsher 	/* detailed rx_errors */
1459f7917c00SJeff Kirsher 	ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1460f7917c00SJeff Kirsher 	ns->rx_over_errors = 0;
1461f7917c00SJeff Kirsher 	ns->rx_crc_errors = pstats->rx_fcs_errs;
1462f7917c00SJeff Kirsher 	ns->rx_frame_errors = pstats->rx_symbol_errs;
1463f7917c00SJeff Kirsher 	ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1464f7917c00SJeff Kirsher 	ns->rx_missed_errors = pstats->rx_cong_drops;
1465f7917c00SJeff Kirsher 
1466f7917c00SJeff Kirsher 	/* detailed tx_errors */
1467f7917c00SJeff Kirsher 	ns->tx_aborted_errors = 0;
1468f7917c00SJeff Kirsher 	ns->tx_carrier_errors = 0;
1469f7917c00SJeff Kirsher 	ns->tx_fifo_errors = pstats->tx_underrun;
1470f7917c00SJeff Kirsher 	ns->tx_heartbeat_errors = 0;
1471f7917c00SJeff Kirsher 	ns->tx_window_errors = 0;
1472f7917c00SJeff Kirsher 	return ns;
1473f7917c00SJeff Kirsher }
1474f7917c00SJeff Kirsher 
1475f7917c00SJeff Kirsher static u32 get_msglevel(struct net_device *dev)
1476f7917c00SJeff Kirsher {
1477f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1478f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
1479f7917c00SJeff Kirsher 
1480f7917c00SJeff Kirsher 	return adapter->msg_enable;
1481f7917c00SJeff Kirsher }
1482f7917c00SJeff Kirsher 
1483f7917c00SJeff Kirsher static void set_msglevel(struct net_device *dev, u32 val)
1484f7917c00SJeff Kirsher {
1485f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1486f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
1487f7917c00SJeff Kirsher 
1488f7917c00SJeff Kirsher 	adapter->msg_enable = val;
1489f7917c00SJeff Kirsher }
1490f7917c00SJeff Kirsher 
1491f7917c00SJeff Kirsher static char stats_strings[][ETH_GSTRING_LEN] = {
1492f7917c00SJeff Kirsher 	"TxOctetsOK         ",
1493f7917c00SJeff Kirsher 	"TxFramesOK         ",
1494f7917c00SJeff Kirsher 	"TxMulticastFramesOK",
1495f7917c00SJeff Kirsher 	"TxBroadcastFramesOK",
1496f7917c00SJeff Kirsher 	"TxPauseFrames      ",
1497f7917c00SJeff Kirsher 	"TxUnderrun         ",
1498f7917c00SJeff Kirsher 	"TxExtUnderrun      ",
1499f7917c00SJeff Kirsher 
1500f7917c00SJeff Kirsher 	"TxFrames64         ",
1501f7917c00SJeff Kirsher 	"TxFrames65To127    ",
1502f7917c00SJeff Kirsher 	"TxFrames128To255   ",
1503f7917c00SJeff Kirsher 	"TxFrames256To511   ",
1504f7917c00SJeff Kirsher 	"TxFrames512To1023  ",
1505f7917c00SJeff Kirsher 	"TxFrames1024To1518 ",
1506f7917c00SJeff Kirsher 	"TxFrames1519ToMax  ",
1507f7917c00SJeff Kirsher 
1508f7917c00SJeff Kirsher 	"RxOctetsOK         ",
1509f7917c00SJeff Kirsher 	"RxFramesOK         ",
1510f7917c00SJeff Kirsher 	"RxMulticastFramesOK",
1511f7917c00SJeff Kirsher 	"RxBroadcastFramesOK",
1512f7917c00SJeff Kirsher 	"RxPauseFrames      ",
1513f7917c00SJeff Kirsher 	"RxFCSErrors        ",
1514f7917c00SJeff Kirsher 	"RxSymbolErrors     ",
1515f7917c00SJeff Kirsher 	"RxShortErrors      ",
1516f7917c00SJeff Kirsher 	"RxJabberErrors     ",
1517f7917c00SJeff Kirsher 	"RxLengthErrors     ",
1518f7917c00SJeff Kirsher 	"RxFIFOoverflow     ",
1519f7917c00SJeff Kirsher 
1520f7917c00SJeff Kirsher 	"RxFrames64         ",
1521f7917c00SJeff Kirsher 	"RxFrames65To127    ",
1522f7917c00SJeff Kirsher 	"RxFrames128To255   ",
1523f7917c00SJeff Kirsher 	"RxFrames256To511   ",
1524f7917c00SJeff Kirsher 	"RxFrames512To1023  ",
1525f7917c00SJeff Kirsher 	"RxFrames1024To1518 ",
1526f7917c00SJeff Kirsher 	"RxFrames1519ToMax  ",
1527f7917c00SJeff Kirsher 
1528f7917c00SJeff Kirsher 	"PhyFIFOErrors      ",
1529f7917c00SJeff Kirsher 	"TSO                ",
1530f7917c00SJeff Kirsher 	"VLANextractions    ",
1531f7917c00SJeff Kirsher 	"VLANinsertions     ",
1532f7917c00SJeff Kirsher 	"TxCsumOffload      ",
1533f7917c00SJeff Kirsher 	"RxCsumGood         ",
1534f7917c00SJeff Kirsher 	"LroAggregated      ",
1535f7917c00SJeff Kirsher 	"LroFlushed         ",
1536f7917c00SJeff Kirsher 	"LroNoDesc          ",
1537f7917c00SJeff Kirsher 	"RxDrops            ",
1538f7917c00SJeff Kirsher 
1539f7917c00SJeff Kirsher 	"CheckTXEnToggled   ",
1540f7917c00SJeff Kirsher 	"CheckResets        ",
1541f7917c00SJeff Kirsher 
1542f7917c00SJeff Kirsher 	"LinkFaults         ",
1543f7917c00SJeff Kirsher };
1544f7917c00SJeff Kirsher 
1545f7917c00SJeff Kirsher static int get_sset_count(struct net_device *dev, int sset)
1546f7917c00SJeff Kirsher {
1547f7917c00SJeff Kirsher 	switch (sset) {
1548f7917c00SJeff Kirsher 	case ETH_SS_STATS:
1549f7917c00SJeff Kirsher 		return ARRAY_SIZE(stats_strings);
1550f7917c00SJeff Kirsher 	default:
1551f7917c00SJeff Kirsher 		return -EOPNOTSUPP;
1552f7917c00SJeff Kirsher 	}
1553f7917c00SJeff Kirsher }
1554f7917c00SJeff Kirsher 
1555f7917c00SJeff Kirsher #define T3_REGMAP_SIZE (3 * 1024)
1556f7917c00SJeff Kirsher 
1557f7917c00SJeff Kirsher static int get_regs_len(struct net_device *dev)
1558f7917c00SJeff Kirsher {
1559f7917c00SJeff Kirsher 	return T3_REGMAP_SIZE;
1560f7917c00SJeff Kirsher }
1561f7917c00SJeff Kirsher 
1562f7917c00SJeff Kirsher static int get_eeprom_len(struct net_device *dev)
1563f7917c00SJeff Kirsher {
1564f7917c00SJeff Kirsher 	return EEPROMSIZE;
1565f7917c00SJeff Kirsher }
1566f7917c00SJeff Kirsher 
1567f7917c00SJeff Kirsher static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1568f7917c00SJeff Kirsher {
1569f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1570f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
1571f7917c00SJeff Kirsher 	u32 fw_vers = 0;
1572f7917c00SJeff Kirsher 	u32 tp_vers = 0;
1573f7917c00SJeff Kirsher 
1574f7917c00SJeff Kirsher 	spin_lock(&adapter->stats_lock);
1575f7917c00SJeff Kirsher 	t3_get_fw_version(adapter, &fw_vers);
1576f7917c00SJeff Kirsher 	t3_get_tp_version(adapter, &tp_vers);
1577f7917c00SJeff Kirsher 	spin_unlock(&adapter->stats_lock);
1578f7917c00SJeff Kirsher 
1579f7917c00SJeff Kirsher 	strcpy(info->driver, DRV_NAME);
1580f7917c00SJeff Kirsher 	strcpy(info->version, DRV_VERSION);
1581f7917c00SJeff Kirsher 	strcpy(info->bus_info, pci_name(adapter->pdev));
1582f7917c00SJeff Kirsher 	if (!fw_vers)
1583f7917c00SJeff Kirsher 		strcpy(info->fw_version, "N/A");
1584f7917c00SJeff Kirsher 	else {
1585f7917c00SJeff Kirsher 		snprintf(info->fw_version, sizeof(info->fw_version),
1586f7917c00SJeff Kirsher 			 "%s %u.%u.%u TP %u.%u.%u",
1587f7917c00SJeff Kirsher 			 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1588f7917c00SJeff Kirsher 			 G_FW_VERSION_MAJOR(fw_vers),
1589f7917c00SJeff Kirsher 			 G_FW_VERSION_MINOR(fw_vers),
1590f7917c00SJeff Kirsher 			 G_FW_VERSION_MICRO(fw_vers),
1591f7917c00SJeff Kirsher 			 G_TP_VERSION_MAJOR(tp_vers),
1592f7917c00SJeff Kirsher 			 G_TP_VERSION_MINOR(tp_vers),
1593f7917c00SJeff Kirsher 			 G_TP_VERSION_MICRO(tp_vers));
1594f7917c00SJeff Kirsher 	}
1595f7917c00SJeff Kirsher }
1596f7917c00SJeff Kirsher 
1597f7917c00SJeff Kirsher static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1598f7917c00SJeff Kirsher {
1599f7917c00SJeff Kirsher 	if (stringset == ETH_SS_STATS)
1600f7917c00SJeff Kirsher 		memcpy(data, stats_strings, sizeof(stats_strings));
1601f7917c00SJeff Kirsher }
1602f7917c00SJeff Kirsher 
1603f7917c00SJeff Kirsher static unsigned long collect_sge_port_stats(struct adapter *adapter,
1604f7917c00SJeff Kirsher 					    struct port_info *p, int idx)
1605f7917c00SJeff Kirsher {
1606f7917c00SJeff Kirsher 	int i;
1607f7917c00SJeff Kirsher 	unsigned long tot = 0;
1608f7917c00SJeff Kirsher 
1609f7917c00SJeff Kirsher 	for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1610f7917c00SJeff Kirsher 		tot += adapter->sge.qs[i].port_stats[idx];
1611f7917c00SJeff Kirsher 	return tot;
1612f7917c00SJeff Kirsher }
1613f7917c00SJeff Kirsher 
1614f7917c00SJeff Kirsher static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1615f7917c00SJeff Kirsher 		      u64 *data)
1616f7917c00SJeff Kirsher {
1617f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1618f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
1619f7917c00SJeff Kirsher 	const struct mac_stats *s;
1620f7917c00SJeff Kirsher 
1621f7917c00SJeff Kirsher 	spin_lock(&adapter->stats_lock);
1622f7917c00SJeff Kirsher 	s = t3_mac_update_stats(&pi->mac);
1623f7917c00SJeff Kirsher 	spin_unlock(&adapter->stats_lock);
1624f7917c00SJeff Kirsher 
1625f7917c00SJeff Kirsher 	*data++ = s->tx_octets;
1626f7917c00SJeff Kirsher 	*data++ = s->tx_frames;
1627f7917c00SJeff Kirsher 	*data++ = s->tx_mcast_frames;
1628f7917c00SJeff Kirsher 	*data++ = s->tx_bcast_frames;
1629f7917c00SJeff Kirsher 	*data++ = s->tx_pause;
1630f7917c00SJeff Kirsher 	*data++ = s->tx_underrun;
1631f7917c00SJeff Kirsher 	*data++ = s->tx_fifo_urun;
1632f7917c00SJeff Kirsher 
1633f7917c00SJeff Kirsher 	*data++ = s->tx_frames_64;
1634f7917c00SJeff Kirsher 	*data++ = s->tx_frames_65_127;
1635f7917c00SJeff Kirsher 	*data++ = s->tx_frames_128_255;
1636f7917c00SJeff Kirsher 	*data++ = s->tx_frames_256_511;
1637f7917c00SJeff Kirsher 	*data++ = s->tx_frames_512_1023;
1638f7917c00SJeff Kirsher 	*data++ = s->tx_frames_1024_1518;
1639f7917c00SJeff Kirsher 	*data++ = s->tx_frames_1519_max;
1640f7917c00SJeff Kirsher 
1641f7917c00SJeff Kirsher 	*data++ = s->rx_octets;
1642f7917c00SJeff Kirsher 	*data++ = s->rx_frames;
1643f7917c00SJeff Kirsher 	*data++ = s->rx_mcast_frames;
1644f7917c00SJeff Kirsher 	*data++ = s->rx_bcast_frames;
1645f7917c00SJeff Kirsher 	*data++ = s->rx_pause;
1646f7917c00SJeff Kirsher 	*data++ = s->rx_fcs_errs;
1647f7917c00SJeff Kirsher 	*data++ = s->rx_symbol_errs;
1648f7917c00SJeff Kirsher 	*data++ = s->rx_short;
1649f7917c00SJeff Kirsher 	*data++ = s->rx_jabber;
1650f7917c00SJeff Kirsher 	*data++ = s->rx_too_long;
1651f7917c00SJeff Kirsher 	*data++ = s->rx_fifo_ovfl;
1652f7917c00SJeff Kirsher 
1653f7917c00SJeff Kirsher 	*data++ = s->rx_frames_64;
1654f7917c00SJeff Kirsher 	*data++ = s->rx_frames_65_127;
1655f7917c00SJeff Kirsher 	*data++ = s->rx_frames_128_255;
1656f7917c00SJeff Kirsher 	*data++ = s->rx_frames_256_511;
1657f7917c00SJeff Kirsher 	*data++ = s->rx_frames_512_1023;
1658f7917c00SJeff Kirsher 	*data++ = s->rx_frames_1024_1518;
1659f7917c00SJeff Kirsher 	*data++ = s->rx_frames_1519_max;
1660f7917c00SJeff Kirsher 
1661f7917c00SJeff Kirsher 	*data++ = pi->phy.fifo_errors;
1662f7917c00SJeff Kirsher 
1663f7917c00SJeff Kirsher 	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1664f7917c00SJeff Kirsher 	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1665f7917c00SJeff Kirsher 	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1666f7917c00SJeff Kirsher 	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1667f7917c00SJeff Kirsher 	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1668f7917c00SJeff Kirsher 	*data++ = 0;
1669f7917c00SJeff Kirsher 	*data++ = 0;
1670f7917c00SJeff Kirsher 	*data++ = 0;
1671f7917c00SJeff Kirsher 	*data++ = s->rx_cong_drops;
1672f7917c00SJeff Kirsher 
1673f7917c00SJeff Kirsher 	*data++ = s->num_toggled;
1674f7917c00SJeff Kirsher 	*data++ = s->num_resets;
1675f7917c00SJeff Kirsher 
1676f7917c00SJeff Kirsher 	*data++ = s->link_faults;
1677f7917c00SJeff Kirsher }
1678f7917c00SJeff Kirsher 
1679f7917c00SJeff Kirsher static inline void reg_block_dump(struct adapter *ap, void *buf,
1680f7917c00SJeff Kirsher 				  unsigned int start, unsigned int end)
1681f7917c00SJeff Kirsher {
1682f7917c00SJeff Kirsher 	u32 *p = buf + start;
1683f7917c00SJeff Kirsher 
1684f7917c00SJeff Kirsher 	for (; start <= end; start += sizeof(u32))
1685f7917c00SJeff Kirsher 		*p++ = t3_read_reg(ap, start);
1686f7917c00SJeff Kirsher }
1687f7917c00SJeff Kirsher 
1688f7917c00SJeff Kirsher static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1689f7917c00SJeff Kirsher 		     void *buf)
1690f7917c00SJeff Kirsher {
1691f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1692f7917c00SJeff Kirsher 	struct adapter *ap = pi->adapter;
1693f7917c00SJeff Kirsher 
1694f7917c00SJeff Kirsher 	/*
1695f7917c00SJeff Kirsher 	 * Version scheme:
1696f7917c00SJeff Kirsher 	 * bits 0..9: chip version
1697f7917c00SJeff Kirsher 	 * bits 10..15: chip revision
1698f7917c00SJeff Kirsher 	 * bit 31: set for PCIe cards
1699f7917c00SJeff Kirsher 	 */
1700f7917c00SJeff Kirsher 	regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1701f7917c00SJeff Kirsher 
1702f7917c00SJeff Kirsher 	/*
1703f7917c00SJeff Kirsher 	 * We skip the MAC statistics registers because they are clear-on-read.
1704f7917c00SJeff Kirsher 	 * Also reading multi-register stats would need to synchronize with the
1705f7917c00SJeff Kirsher 	 * periodic mac stats accumulation.  Hard to justify the complexity.
1706f7917c00SJeff Kirsher 	 */
1707f7917c00SJeff Kirsher 	memset(buf, 0, T3_REGMAP_SIZE);
1708f7917c00SJeff Kirsher 	reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1709f7917c00SJeff Kirsher 	reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1710f7917c00SJeff Kirsher 	reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1711f7917c00SJeff Kirsher 	reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1712f7917c00SJeff Kirsher 	reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1713f7917c00SJeff Kirsher 	reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1714f7917c00SJeff Kirsher 		       XGM_REG(A_XGM_SERDES_STAT3, 1));
1715f7917c00SJeff Kirsher 	reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1716f7917c00SJeff Kirsher 		       XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1717f7917c00SJeff Kirsher }
1718f7917c00SJeff Kirsher 
1719f7917c00SJeff Kirsher static int restart_autoneg(struct net_device *dev)
1720f7917c00SJeff Kirsher {
1721f7917c00SJeff Kirsher 	struct port_info *p = netdev_priv(dev);
1722f7917c00SJeff Kirsher 
1723f7917c00SJeff Kirsher 	if (!netif_running(dev))
1724f7917c00SJeff Kirsher 		return -EAGAIN;
1725f7917c00SJeff Kirsher 	if (p->link_config.autoneg != AUTONEG_ENABLE)
1726f7917c00SJeff Kirsher 		return -EINVAL;
1727f7917c00SJeff Kirsher 	p->phy.ops->autoneg_restart(&p->phy);
1728f7917c00SJeff Kirsher 	return 0;
1729f7917c00SJeff Kirsher }
1730f7917c00SJeff Kirsher 
1731f7917c00SJeff Kirsher static int set_phys_id(struct net_device *dev,
1732f7917c00SJeff Kirsher 		       enum ethtool_phys_id_state state)
1733f7917c00SJeff Kirsher {
1734f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1735f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
1736f7917c00SJeff Kirsher 
1737f7917c00SJeff Kirsher 	switch (state) {
1738f7917c00SJeff Kirsher 	case ETHTOOL_ID_ACTIVE:
1739f7917c00SJeff Kirsher 		return 1;	/* cycle on/off once per second */
1740f7917c00SJeff Kirsher 
1741f7917c00SJeff Kirsher 	case ETHTOOL_ID_OFF:
1742f7917c00SJeff Kirsher 		t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
1743f7917c00SJeff Kirsher 		break;
1744f7917c00SJeff Kirsher 
1745f7917c00SJeff Kirsher 	case ETHTOOL_ID_ON:
1746f7917c00SJeff Kirsher 	case ETHTOOL_ID_INACTIVE:
1747f7917c00SJeff Kirsher 		t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1748f7917c00SJeff Kirsher 			 F_GPIO0_OUT_VAL);
1749f7917c00SJeff Kirsher 	}
1750f7917c00SJeff Kirsher 
1751f7917c00SJeff Kirsher 	return 0;
1752f7917c00SJeff Kirsher }
1753f7917c00SJeff Kirsher 
1754f7917c00SJeff Kirsher static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1755f7917c00SJeff Kirsher {
1756f7917c00SJeff Kirsher 	struct port_info *p = netdev_priv(dev);
1757f7917c00SJeff Kirsher 
1758f7917c00SJeff Kirsher 	cmd->supported = p->link_config.supported;
1759f7917c00SJeff Kirsher 	cmd->advertising = p->link_config.advertising;
1760f7917c00SJeff Kirsher 
1761f7917c00SJeff Kirsher 	if (netif_carrier_ok(dev)) {
1762f7917c00SJeff Kirsher 		ethtool_cmd_speed_set(cmd, p->link_config.speed);
1763f7917c00SJeff Kirsher 		cmd->duplex = p->link_config.duplex;
1764f7917c00SJeff Kirsher 	} else {
1765f7917c00SJeff Kirsher 		ethtool_cmd_speed_set(cmd, -1);
1766f7917c00SJeff Kirsher 		cmd->duplex = -1;
1767f7917c00SJeff Kirsher 	}
1768f7917c00SJeff Kirsher 
1769f7917c00SJeff Kirsher 	cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1770f7917c00SJeff Kirsher 	cmd->phy_address = p->phy.mdio.prtad;
1771f7917c00SJeff Kirsher 	cmd->transceiver = XCVR_EXTERNAL;
1772f7917c00SJeff Kirsher 	cmd->autoneg = p->link_config.autoneg;
1773f7917c00SJeff Kirsher 	cmd->maxtxpkt = 0;
1774f7917c00SJeff Kirsher 	cmd->maxrxpkt = 0;
1775f7917c00SJeff Kirsher 	return 0;
1776f7917c00SJeff Kirsher }
1777f7917c00SJeff Kirsher 
1778f7917c00SJeff Kirsher static int speed_duplex_to_caps(int speed, int duplex)
1779f7917c00SJeff Kirsher {
1780f7917c00SJeff Kirsher 	int cap = 0;
1781f7917c00SJeff Kirsher 
1782f7917c00SJeff Kirsher 	switch (speed) {
1783f7917c00SJeff Kirsher 	case SPEED_10:
1784f7917c00SJeff Kirsher 		if (duplex == DUPLEX_FULL)
1785f7917c00SJeff Kirsher 			cap = SUPPORTED_10baseT_Full;
1786f7917c00SJeff Kirsher 		else
1787f7917c00SJeff Kirsher 			cap = SUPPORTED_10baseT_Half;
1788f7917c00SJeff Kirsher 		break;
1789f7917c00SJeff Kirsher 	case SPEED_100:
1790f7917c00SJeff Kirsher 		if (duplex == DUPLEX_FULL)
1791f7917c00SJeff Kirsher 			cap = SUPPORTED_100baseT_Full;
1792f7917c00SJeff Kirsher 		else
1793f7917c00SJeff Kirsher 			cap = SUPPORTED_100baseT_Half;
1794f7917c00SJeff Kirsher 		break;
1795f7917c00SJeff Kirsher 	case SPEED_1000:
1796f7917c00SJeff Kirsher 		if (duplex == DUPLEX_FULL)
1797f7917c00SJeff Kirsher 			cap = SUPPORTED_1000baseT_Full;
1798f7917c00SJeff Kirsher 		else
1799f7917c00SJeff Kirsher 			cap = SUPPORTED_1000baseT_Half;
1800f7917c00SJeff Kirsher 		break;
1801f7917c00SJeff Kirsher 	case SPEED_10000:
1802f7917c00SJeff Kirsher 		if (duplex == DUPLEX_FULL)
1803f7917c00SJeff Kirsher 			cap = SUPPORTED_10000baseT_Full;
1804f7917c00SJeff Kirsher 	}
1805f7917c00SJeff Kirsher 	return cap;
1806f7917c00SJeff Kirsher }
1807f7917c00SJeff Kirsher 
1808f7917c00SJeff Kirsher #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1809f7917c00SJeff Kirsher 		      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1810f7917c00SJeff Kirsher 		      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1811f7917c00SJeff Kirsher 		      ADVERTISED_10000baseT_Full)
1812f7917c00SJeff Kirsher 
1813f7917c00SJeff Kirsher static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1814f7917c00SJeff Kirsher {
1815f7917c00SJeff Kirsher 	struct port_info *p = netdev_priv(dev);
1816f7917c00SJeff Kirsher 	struct link_config *lc = &p->link_config;
1817f7917c00SJeff Kirsher 
1818f7917c00SJeff Kirsher 	if (!(lc->supported & SUPPORTED_Autoneg)) {
1819f7917c00SJeff Kirsher 		/*
1820f7917c00SJeff Kirsher 		 * PHY offers a single speed/duplex.  See if that's what's
1821f7917c00SJeff Kirsher 		 * being requested.
1822f7917c00SJeff Kirsher 		 */
1823f7917c00SJeff Kirsher 		if (cmd->autoneg == AUTONEG_DISABLE) {
1824f7917c00SJeff Kirsher 			u32 speed = ethtool_cmd_speed(cmd);
1825f7917c00SJeff Kirsher 			int cap = speed_duplex_to_caps(speed, cmd->duplex);
1826f7917c00SJeff Kirsher 			if (lc->supported & cap)
1827f7917c00SJeff Kirsher 				return 0;
1828f7917c00SJeff Kirsher 		}
1829f7917c00SJeff Kirsher 		return -EINVAL;
1830f7917c00SJeff Kirsher 	}
1831f7917c00SJeff Kirsher 
1832f7917c00SJeff Kirsher 	if (cmd->autoneg == AUTONEG_DISABLE) {
1833f7917c00SJeff Kirsher 		u32 speed = ethtool_cmd_speed(cmd);
1834f7917c00SJeff Kirsher 		int cap = speed_duplex_to_caps(speed, cmd->duplex);
1835f7917c00SJeff Kirsher 
1836f7917c00SJeff Kirsher 		if (!(lc->supported & cap) || (speed == SPEED_1000))
1837f7917c00SJeff Kirsher 			return -EINVAL;
1838f7917c00SJeff Kirsher 		lc->requested_speed = speed;
1839f7917c00SJeff Kirsher 		lc->requested_duplex = cmd->duplex;
1840f7917c00SJeff Kirsher 		lc->advertising = 0;
1841f7917c00SJeff Kirsher 	} else {
1842f7917c00SJeff Kirsher 		cmd->advertising &= ADVERTISED_MASK;
1843f7917c00SJeff Kirsher 		cmd->advertising &= lc->supported;
1844f7917c00SJeff Kirsher 		if (!cmd->advertising)
1845f7917c00SJeff Kirsher 			return -EINVAL;
1846f7917c00SJeff Kirsher 		lc->requested_speed = SPEED_INVALID;
1847f7917c00SJeff Kirsher 		lc->requested_duplex = DUPLEX_INVALID;
1848f7917c00SJeff Kirsher 		lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1849f7917c00SJeff Kirsher 	}
1850f7917c00SJeff Kirsher 	lc->autoneg = cmd->autoneg;
1851f7917c00SJeff Kirsher 	if (netif_running(dev))
1852f7917c00SJeff Kirsher 		t3_link_start(&p->phy, &p->mac, lc);
1853f7917c00SJeff Kirsher 	return 0;
1854f7917c00SJeff Kirsher }
1855f7917c00SJeff Kirsher 
1856f7917c00SJeff Kirsher static void get_pauseparam(struct net_device *dev,
1857f7917c00SJeff Kirsher 			   struct ethtool_pauseparam *epause)
1858f7917c00SJeff Kirsher {
1859f7917c00SJeff Kirsher 	struct port_info *p = netdev_priv(dev);
1860f7917c00SJeff Kirsher 
1861f7917c00SJeff Kirsher 	epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1862f7917c00SJeff Kirsher 	epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1863f7917c00SJeff Kirsher 	epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1864f7917c00SJeff Kirsher }
1865f7917c00SJeff Kirsher 
1866f7917c00SJeff Kirsher static int set_pauseparam(struct net_device *dev,
1867f7917c00SJeff Kirsher 			  struct ethtool_pauseparam *epause)
1868f7917c00SJeff Kirsher {
1869f7917c00SJeff Kirsher 	struct port_info *p = netdev_priv(dev);
1870f7917c00SJeff Kirsher 	struct link_config *lc = &p->link_config;
1871f7917c00SJeff Kirsher 
1872f7917c00SJeff Kirsher 	if (epause->autoneg == AUTONEG_DISABLE)
1873f7917c00SJeff Kirsher 		lc->requested_fc = 0;
1874f7917c00SJeff Kirsher 	else if (lc->supported & SUPPORTED_Autoneg)
1875f7917c00SJeff Kirsher 		lc->requested_fc = PAUSE_AUTONEG;
1876f7917c00SJeff Kirsher 	else
1877f7917c00SJeff Kirsher 		return -EINVAL;
1878f7917c00SJeff Kirsher 
1879f7917c00SJeff Kirsher 	if (epause->rx_pause)
1880f7917c00SJeff Kirsher 		lc->requested_fc |= PAUSE_RX;
1881f7917c00SJeff Kirsher 	if (epause->tx_pause)
1882f7917c00SJeff Kirsher 		lc->requested_fc |= PAUSE_TX;
1883f7917c00SJeff Kirsher 	if (lc->autoneg == AUTONEG_ENABLE) {
1884f7917c00SJeff Kirsher 		if (netif_running(dev))
1885f7917c00SJeff Kirsher 			t3_link_start(&p->phy, &p->mac, lc);
1886f7917c00SJeff Kirsher 	} else {
1887f7917c00SJeff Kirsher 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1888f7917c00SJeff Kirsher 		if (netif_running(dev))
1889f7917c00SJeff Kirsher 			t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1890f7917c00SJeff Kirsher 	}
1891f7917c00SJeff Kirsher 	return 0;
1892f7917c00SJeff Kirsher }
1893f7917c00SJeff Kirsher 
1894f7917c00SJeff Kirsher static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1895f7917c00SJeff Kirsher {
1896f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1897f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
1898f7917c00SJeff Kirsher 	const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1899f7917c00SJeff Kirsher 
1900f7917c00SJeff Kirsher 	e->rx_max_pending = MAX_RX_BUFFERS;
1901f7917c00SJeff Kirsher 	e->rx_mini_max_pending = 0;
1902f7917c00SJeff Kirsher 	e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1903f7917c00SJeff Kirsher 	e->tx_max_pending = MAX_TXQ_ENTRIES;
1904f7917c00SJeff Kirsher 
1905f7917c00SJeff Kirsher 	e->rx_pending = q->fl_size;
1906f7917c00SJeff Kirsher 	e->rx_mini_pending = q->rspq_size;
1907f7917c00SJeff Kirsher 	e->rx_jumbo_pending = q->jumbo_size;
1908f7917c00SJeff Kirsher 	e->tx_pending = q->txq_size[0];
1909f7917c00SJeff Kirsher }
1910f7917c00SJeff Kirsher 
1911f7917c00SJeff Kirsher static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1912f7917c00SJeff Kirsher {
1913f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1914f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
1915f7917c00SJeff Kirsher 	struct qset_params *q;
1916f7917c00SJeff Kirsher 	int i;
1917f7917c00SJeff Kirsher 
1918f7917c00SJeff Kirsher 	if (e->rx_pending > MAX_RX_BUFFERS ||
1919f7917c00SJeff Kirsher 	    e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1920f7917c00SJeff Kirsher 	    e->tx_pending > MAX_TXQ_ENTRIES ||
1921f7917c00SJeff Kirsher 	    e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1922f7917c00SJeff Kirsher 	    e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1923f7917c00SJeff Kirsher 	    e->rx_pending < MIN_FL_ENTRIES ||
1924f7917c00SJeff Kirsher 	    e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1925f7917c00SJeff Kirsher 	    e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1926f7917c00SJeff Kirsher 		return -EINVAL;
1927f7917c00SJeff Kirsher 
1928f7917c00SJeff Kirsher 	if (adapter->flags & FULL_INIT_DONE)
1929f7917c00SJeff Kirsher 		return -EBUSY;
1930f7917c00SJeff Kirsher 
1931f7917c00SJeff Kirsher 	q = &adapter->params.sge.qset[pi->first_qset];
1932f7917c00SJeff Kirsher 	for (i = 0; i < pi->nqsets; ++i, ++q) {
1933f7917c00SJeff Kirsher 		q->rspq_size = e->rx_mini_pending;
1934f7917c00SJeff Kirsher 		q->fl_size = e->rx_pending;
1935f7917c00SJeff Kirsher 		q->jumbo_size = e->rx_jumbo_pending;
1936f7917c00SJeff Kirsher 		q->txq_size[0] = e->tx_pending;
1937f7917c00SJeff Kirsher 		q->txq_size[1] = e->tx_pending;
1938f7917c00SJeff Kirsher 		q->txq_size[2] = e->tx_pending;
1939f7917c00SJeff Kirsher 	}
1940f7917c00SJeff Kirsher 	return 0;
1941f7917c00SJeff Kirsher }
1942f7917c00SJeff Kirsher 
1943f7917c00SJeff Kirsher static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1944f7917c00SJeff Kirsher {
1945f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1946f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
1947f7917c00SJeff Kirsher 	struct qset_params *qsp;
1948f7917c00SJeff Kirsher 	struct sge_qset *qs;
1949f7917c00SJeff Kirsher 	int i;
1950f7917c00SJeff Kirsher 
1951f7917c00SJeff Kirsher 	if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1952f7917c00SJeff Kirsher 		return -EINVAL;
1953f7917c00SJeff Kirsher 
1954f7917c00SJeff Kirsher 	for (i = 0; i < pi->nqsets; i++) {
1955f7917c00SJeff Kirsher 		qsp = &adapter->params.sge.qset[i];
1956f7917c00SJeff Kirsher 		qs = &adapter->sge.qs[i];
1957f7917c00SJeff Kirsher 		qsp->coalesce_usecs = c->rx_coalesce_usecs;
1958f7917c00SJeff Kirsher 		t3_update_qset_coalesce(qs, qsp);
1959f7917c00SJeff Kirsher 	}
1960f7917c00SJeff Kirsher 
1961f7917c00SJeff Kirsher 	return 0;
1962f7917c00SJeff Kirsher }
1963f7917c00SJeff Kirsher 
1964f7917c00SJeff Kirsher static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1965f7917c00SJeff Kirsher {
1966f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1967f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
1968f7917c00SJeff Kirsher 	struct qset_params *q = adapter->params.sge.qset;
1969f7917c00SJeff Kirsher 
1970f7917c00SJeff Kirsher 	c->rx_coalesce_usecs = q->coalesce_usecs;
1971f7917c00SJeff Kirsher 	return 0;
1972f7917c00SJeff Kirsher }
1973f7917c00SJeff Kirsher 
1974f7917c00SJeff Kirsher static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1975f7917c00SJeff Kirsher 		      u8 * data)
1976f7917c00SJeff Kirsher {
1977f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1978f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
1979f7917c00SJeff Kirsher 	int i, err = 0;
1980f7917c00SJeff Kirsher 
1981f7917c00SJeff Kirsher 	u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1982f7917c00SJeff Kirsher 	if (!buf)
1983f7917c00SJeff Kirsher 		return -ENOMEM;
1984f7917c00SJeff Kirsher 
1985f7917c00SJeff Kirsher 	e->magic = EEPROM_MAGIC;
1986f7917c00SJeff Kirsher 	for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1987f7917c00SJeff Kirsher 		err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1988f7917c00SJeff Kirsher 
1989f7917c00SJeff Kirsher 	if (!err)
1990f7917c00SJeff Kirsher 		memcpy(data, buf + e->offset, e->len);
1991f7917c00SJeff Kirsher 	kfree(buf);
1992f7917c00SJeff Kirsher 	return err;
1993f7917c00SJeff Kirsher }
1994f7917c00SJeff Kirsher 
1995f7917c00SJeff Kirsher static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1996f7917c00SJeff Kirsher 		      u8 * data)
1997f7917c00SJeff Kirsher {
1998f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1999f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
2000f7917c00SJeff Kirsher 	u32 aligned_offset, aligned_len;
2001f7917c00SJeff Kirsher 	__le32 *p;
2002f7917c00SJeff Kirsher 	u8 *buf;
2003f7917c00SJeff Kirsher 	int err;
2004f7917c00SJeff Kirsher 
2005f7917c00SJeff Kirsher 	if (eeprom->magic != EEPROM_MAGIC)
2006f7917c00SJeff Kirsher 		return -EINVAL;
2007f7917c00SJeff Kirsher 
2008f7917c00SJeff Kirsher 	aligned_offset = eeprom->offset & ~3;
2009f7917c00SJeff Kirsher 	aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2010f7917c00SJeff Kirsher 
2011f7917c00SJeff Kirsher 	if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2012f7917c00SJeff Kirsher 		buf = kmalloc(aligned_len, GFP_KERNEL);
2013f7917c00SJeff Kirsher 		if (!buf)
2014f7917c00SJeff Kirsher 			return -ENOMEM;
2015f7917c00SJeff Kirsher 		err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2016f7917c00SJeff Kirsher 		if (!err && aligned_len > 4)
2017f7917c00SJeff Kirsher 			err = t3_seeprom_read(adapter,
2018f7917c00SJeff Kirsher 					      aligned_offset + aligned_len - 4,
2019f7917c00SJeff Kirsher 					      (__le32 *) & buf[aligned_len - 4]);
2020f7917c00SJeff Kirsher 		if (err)
2021f7917c00SJeff Kirsher 			goto out;
2022f7917c00SJeff Kirsher 		memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2023f7917c00SJeff Kirsher 	} else
2024f7917c00SJeff Kirsher 		buf = data;
2025f7917c00SJeff Kirsher 
2026f7917c00SJeff Kirsher 	err = t3_seeprom_wp(adapter, 0);
2027f7917c00SJeff Kirsher 	if (err)
2028f7917c00SJeff Kirsher 		goto out;
2029f7917c00SJeff Kirsher 
2030f7917c00SJeff Kirsher 	for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2031f7917c00SJeff Kirsher 		err = t3_seeprom_write(adapter, aligned_offset, *p);
2032f7917c00SJeff Kirsher 		aligned_offset += 4;
2033f7917c00SJeff Kirsher 	}
2034f7917c00SJeff Kirsher 
2035f7917c00SJeff Kirsher 	if (!err)
2036f7917c00SJeff Kirsher 		err = t3_seeprom_wp(adapter, 1);
2037f7917c00SJeff Kirsher out:
2038f7917c00SJeff Kirsher 	if (buf != data)
2039f7917c00SJeff Kirsher 		kfree(buf);
2040f7917c00SJeff Kirsher 	return err;
2041f7917c00SJeff Kirsher }
2042f7917c00SJeff Kirsher 
2043f7917c00SJeff Kirsher static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2044f7917c00SJeff Kirsher {
2045f7917c00SJeff Kirsher 	wol->supported = 0;
2046f7917c00SJeff Kirsher 	wol->wolopts = 0;
2047f7917c00SJeff Kirsher 	memset(&wol->sopass, 0, sizeof(wol->sopass));
2048f7917c00SJeff Kirsher }
2049f7917c00SJeff Kirsher 
2050f7917c00SJeff Kirsher static const struct ethtool_ops cxgb_ethtool_ops = {
2051f7917c00SJeff Kirsher 	.get_settings = get_settings,
2052f7917c00SJeff Kirsher 	.set_settings = set_settings,
2053f7917c00SJeff Kirsher 	.get_drvinfo = get_drvinfo,
2054f7917c00SJeff Kirsher 	.get_msglevel = get_msglevel,
2055f7917c00SJeff Kirsher 	.set_msglevel = set_msglevel,
2056f7917c00SJeff Kirsher 	.get_ringparam = get_sge_param,
2057f7917c00SJeff Kirsher 	.set_ringparam = set_sge_param,
2058f7917c00SJeff Kirsher 	.get_coalesce = get_coalesce,
2059f7917c00SJeff Kirsher 	.set_coalesce = set_coalesce,
2060f7917c00SJeff Kirsher 	.get_eeprom_len = get_eeprom_len,
2061f7917c00SJeff Kirsher 	.get_eeprom = get_eeprom,
2062f7917c00SJeff Kirsher 	.set_eeprom = set_eeprom,
2063f7917c00SJeff Kirsher 	.get_pauseparam = get_pauseparam,
2064f7917c00SJeff Kirsher 	.set_pauseparam = set_pauseparam,
2065f7917c00SJeff Kirsher 	.get_link = ethtool_op_get_link,
2066f7917c00SJeff Kirsher 	.get_strings = get_strings,
2067f7917c00SJeff Kirsher 	.set_phys_id = set_phys_id,
2068f7917c00SJeff Kirsher 	.nway_reset = restart_autoneg,
2069f7917c00SJeff Kirsher 	.get_sset_count = get_sset_count,
2070f7917c00SJeff Kirsher 	.get_ethtool_stats = get_stats,
2071f7917c00SJeff Kirsher 	.get_regs_len = get_regs_len,
2072f7917c00SJeff Kirsher 	.get_regs = get_regs,
2073f7917c00SJeff Kirsher 	.get_wol = get_wol,
2074f7917c00SJeff Kirsher };
2075f7917c00SJeff Kirsher 
2076f7917c00SJeff Kirsher static int in_range(int val, int lo, int hi)
2077f7917c00SJeff Kirsher {
2078f7917c00SJeff Kirsher 	return val < 0 || (val <= hi && val >= lo);
2079f7917c00SJeff Kirsher }
2080f7917c00SJeff Kirsher 
2081f7917c00SJeff Kirsher static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2082f7917c00SJeff Kirsher {
2083f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
2084f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
2085f7917c00SJeff Kirsher 	u32 cmd;
2086f7917c00SJeff Kirsher 	int ret;
2087f7917c00SJeff Kirsher 
2088f7917c00SJeff Kirsher 	if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2089f7917c00SJeff Kirsher 		return -EFAULT;
2090f7917c00SJeff Kirsher 
2091f7917c00SJeff Kirsher 	switch (cmd) {
2092f7917c00SJeff Kirsher 	case CHELSIO_SET_QSET_PARAMS:{
2093f7917c00SJeff Kirsher 		int i;
2094f7917c00SJeff Kirsher 		struct qset_params *q;
2095f7917c00SJeff Kirsher 		struct ch_qset_params t;
2096f7917c00SJeff Kirsher 		int q1 = pi->first_qset;
2097f7917c00SJeff Kirsher 		int nqsets = pi->nqsets;
2098f7917c00SJeff Kirsher 
2099f7917c00SJeff Kirsher 		if (!capable(CAP_NET_ADMIN))
2100f7917c00SJeff Kirsher 			return -EPERM;
2101f7917c00SJeff Kirsher 		if (copy_from_user(&t, useraddr, sizeof(t)))
2102f7917c00SJeff Kirsher 			return -EFAULT;
2103f7917c00SJeff Kirsher 		if (t.qset_idx >= SGE_QSETS)
2104f7917c00SJeff Kirsher 			return -EINVAL;
2105f7917c00SJeff Kirsher 		if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2106f7917c00SJeff Kirsher 		    !in_range(t.cong_thres, 0, 255) ||
2107f7917c00SJeff Kirsher 		    !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2108f7917c00SJeff Kirsher 			      MAX_TXQ_ENTRIES) ||
2109f7917c00SJeff Kirsher 		    !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2110f7917c00SJeff Kirsher 			      MAX_TXQ_ENTRIES) ||
2111f7917c00SJeff Kirsher 		    !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2112f7917c00SJeff Kirsher 			      MAX_CTRL_TXQ_ENTRIES) ||
2113f7917c00SJeff Kirsher 		    !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2114f7917c00SJeff Kirsher 			      MAX_RX_BUFFERS) ||
2115f7917c00SJeff Kirsher 		    !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2116f7917c00SJeff Kirsher 			      MAX_RX_JUMBO_BUFFERS) ||
2117f7917c00SJeff Kirsher 		    !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2118f7917c00SJeff Kirsher 			      MAX_RSPQ_ENTRIES))
2119f7917c00SJeff Kirsher 			return -EINVAL;
2120f7917c00SJeff Kirsher 
2121f7917c00SJeff Kirsher 		if ((adapter->flags & FULL_INIT_DONE) &&
2122f7917c00SJeff Kirsher 			(t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2123f7917c00SJeff Kirsher 			t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2124f7917c00SJeff Kirsher 			t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2125f7917c00SJeff Kirsher 			t.polling >= 0 || t.cong_thres >= 0))
2126f7917c00SJeff Kirsher 			return -EBUSY;
2127f7917c00SJeff Kirsher 
2128f7917c00SJeff Kirsher 		/* Allow setting of any available qset when offload enabled */
2129f7917c00SJeff Kirsher 		if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2130f7917c00SJeff Kirsher 			q1 = 0;
2131f7917c00SJeff Kirsher 			for_each_port(adapter, i) {
2132f7917c00SJeff Kirsher 				pi = adap2pinfo(adapter, i);
2133f7917c00SJeff Kirsher 				nqsets += pi->first_qset + pi->nqsets;
2134f7917c00SJeff Kirsher 			}
2135f7917c00SJeff Kirsher 		}
2136f7917c00SJeff Kirsher 
2137f7917c00SJeff Kirsher 		if (t.qset_idx < q1)
2138f7917c00SJeff Kirsher 			return -EINVAL;
2139f7917c00SJeff Kirsher 		if (t.qset_idx > q1 + nqsets - 1)
2140f7917c00SJeff Kirsher 			return -EINVAL;
2141f7917c00SJeff Kirsher 
2142f7917c00SJeff Kirsher 		q = &adapter->params.sge.qset[t.qset_idx];
2143f7917c00SJeff Kirsher 
2144f7917c00SJeff Kirsher 		if (t.rspq_size >= 0)
2145f7917c00SJeff Kirsher 			q->rspq_size = t.rspq_size;
2146f7917c00SJeff Kirsher 		if (t.fl_size[0] >= 0)
2147f7917c00SJeff Kirsher 			q->fl_size = t.fl_size[0];
2148f7917c00SJeff Kirsher 		if (t.fl_size[1] >= 0)
2149f7917c00SJeff Kirsher 			q->jumbo_size = t.fl_size[1];
2150f7917c00SJeff Kirsher 		if (t.txq_size[0] >= 0)
2151f7917c00SJeff Kirsher 			q->txq_size[0] = t.txq_size[0];
2152f7917c00SJeff Kirsher 		if (t.txq_size[1] >= 0)
2153f7917c00SJeff Kirsher 			q->txq_size[1] = t.txq_size[1];
2154f7917c00SJeff Kirsher 		if (t.txq_size[2] >= 0)
2155f7917c00SJeff Kirsher 			q->txq_size[2] = t.txq_size[2];
2156f7917c00SJeff Kirsher 		if (t.cong_thres >= 0)
2157f7917c00SJeff Kirsher 			q->cong_thres = t.cong_thres;
2158f7917c00SJeff Kirsher 		if (t.intr_lat >= 0) {
2159f7917c00SJeff Kirsher 			struct sge_qset *qs =
2160f7917c00SJeff Kirsher 				&adapter->sge.qs[t.qset_idx];
2161f7917c00SJeff Kirsher 
2162f7917c00SJeff Kirsher 			q->coalesce_usecs = t.intr_lat;
2163f7917c00SJeff Kirsher 			t3_update_qset_coalesce(qs, q);
2164f7917c00SJeff Kirsher 		}
2165f7917c00SJeff Kirsher 		if (t.polling >= 0) {
2166f7917c00SJeff Kirsher 			if (adapter->flags & USING_MSIX)
2167f7917c00SJeff Kirsher 				q->polling = t.polling;
2168f7917c00SJeff Kirsher 			else {
2169f7917c00SJeff Kirsher 				/* No polling with INTx for T3A */
2170f7917c00SJeff Kirsher 				if (adapter->params.rev == 0 &&
2171f7917c00SJeff Kirsher 					!(adapter->flags & USING_MSI))
2172f7917c00SJeff Kirsher 					t.polling = 0;
2173f7917c00SJeff Kirsher 
2174f7917c00SJeff Kirsher 				for (i = 0; i < SGE_QSETS; i++) {
2175f7917c00SJeff Kirsher 					q = &adapter->params.sge.
2176f7917c00SJeff Kirsher 						qset[i];
2177f7917c00SJeff Kirsher 					q->polling = t.polling;
2178f7917c00SJeff Kirsher 				}
2179f7917c00SJeff Kirsher 			}
2180f7917c00SJeff Kirsher 		}
2181f7917c00SJeff Kirsher 
2182f7917c00SJeff Kirsher 		if (t.lro >= 0) {
2183f7917c00SJeff Kirsher 			if (t.lro)
2184f7917c00SJeff Kirsher 				dev->wanted_features |= NETIF_F_GRO;
2185f7917c00SJeff Kirsher 			else
2186f7917c00SJeff Kirsher 				dev->wanted_features &= ~NETIF_F_GRO;
2187f7917c00SJeff Kirsher 			netdev_update_features(dev);
2188f7917c00SJeff Kirsher 		}
2189f7917c00SJeff Kirsher 
2190f7917c00SJeff Kirsher 		break;
2191f7917c00SJeff Kirsher 	}
2192f7917c00SJeff Kirsher 	case CHELSIO_GET_QSET_PARAMS:{
2193f7917c00SJeff Kirsher 		struct qset_params *q;
2194f7917c00SJeff Kirsher 		struct ch_qset_params t;
2195f7917c00SJeff Kirsher 		int q1 = pi->first_qset;
2196f7917c00SJeff Kirsher 		int nqsets = pi->nqsets;
2197f7917c00SJeff Kirsher 		int i;
2198f7917c00SJeff Kirsher 
2199f7917c00SJeff Kirsher 		if (copy_from_user(&t, useraddr, sizeof(t)))
2200f7917c00SJeff Kirsher 			return -EFAULT;
2201f7917c00SJeff Kirsher 
2202f7917c00SJeff Kirsher 		/* Display qsets for all ports when offload enabled */
2203f7917c00SJeff Kirsher 		if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2204f7917c00SJeff Kirsher 			q1 = 0;
2205f7917c00SJeff Kirsher 			for_each_port(adapter, i) {
2206f7917c00SJeff Kirsher 				pi = adap2pinfo(adapter, i);
2207f7917c00SJeff Kirsher 				nqsets = pi->first_qset + pi->nqsets;
2208f7917c00SJeff Kirsher 			}
2209f7917c00SJeff Kirsher 		}
2210f7917c00SJeff Kirsher 
2211f7917c00SJeff Kirsher 		if (t.qset_idx >= nqsets)
2212f7917c00SJeff Kirsher 			return -EINVAL;
2213f7917c00SJeff Kirsher 
2214f7917c00SJeff Kirsher 		q = &adapter->params.sge.qset[q1 + t.qset_idx];
2215f7917c00SJeff Kirsher 		t.rspq_size = q->rspq_size;
2216f7917c00SJeff Kirsher 		t.txq_size[0] = q->txq_size[0];
2217f7917c00SJeff Kirsher 		t.txq_size[1] = q->txq_size[1];
2218f7917c00SJeff Kirsher 		t.txq_size[2] = q->txq_size[2];
2219f7917c00SJeff Kirsher 		t.fl_size[0] = q->fl_size;
2220f7917c00SJeff Kirsher 		t.fl_size[1] = q->jumbo_size;
2221f7917c00SJeff Kirsher 		t.polling = q->polling;
2222f7917c00SJeff Kirsher 		t.lro = !!(dev->features & NETIF_F_GRO);
2223f7917c00SJeff Kirsher 		t.intr_lat = q->coalesce_usecs;
2224f7917c00SJeff Kirsher 		t.cong_thres = q->cong_thres;
2225f7917c00SJeff Kirsher 		t.qnum = q1;
2226f7917c00SJeff Kirsher 
2227f7917c00SJeff Kirsher 		if (adapter->flags & USING_MSIX)
2228f7917c00SJeff Kirsher 			t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2229f7917c00SJeff Kirsher 		else
2230f7917c00SJeff Kirsher 			t.vector = adapter->pdev->irq;
2231f7917c00SJeff Kirsher 
2232f7917c00SJeff Kirsher 		if (copy_to_user(useraddr, &t, sizeof(t)))
2233f7917c00SJeff Kirsher 			return -EFAULT;
2234f7917c00SJeff Kirsher 		break;
2235f7917c00SJeff Kirsher 	}
2236f7917c00SJeff Kirsher 	case CHELSIO_SET_QSET_NUM:{
2237f7917c00SJeff Kirsher 		struct ch_reg edata;
2238f7917c00SJeff Kirsher 		unsigned int i, first_qset = 0, other_qsets = 0;
2239f7917c00SJeff Kirsher 
2240f7917c00SJeff Kirsher 		if (!capable(CAP_NET_ADMIN))
2241f7917c00SJeff Kirsher 			return -EPERM;
2242f7917c00SJeff Kirsher 		if (adapter->flags & FULL_INIT_DONE)
2243f7917c00SJeff Kirsher 			return -EBUSY;
2244f7917c00SJeff Kirsher 		if (copy_from_user(&edata, useraddr, sizeof(edata)))
2245f7917c00SJeff Kirsher 			return -EFAULT;
2246f7917c00SJeff Kirsher 		if (edata.val < 1 ||
2247f7917c00SJeff Kirsher 			(edata.val > 1 && !(adapter->flags & USING_MSIX)))
2248f7917c00SJeff Kirsher 			return -EINVAL;
2249f7917c00SJeff Kirsher 
2250f7917c00SJeff Kirsher 		for_each_port(adapter, i)
2251f7917c00SJeff Kirsher 			if (adapter->port[i] && adapter->port[i] != dev)
2252f7917c00SJeff Kirsher 				other_qsets += adap2pinfo(adapter, i)->nqsets;
2253f7917c00SJeff Kirsher 
2254f7917c00SJeff Kirsher 		if (edata.val + other_qsets > SGE_QSETS)
2255f7917c00SJeff Kirsher 			return -EINVAL;
2256f7917c00SJeff Kirsher 
2257f7917c00SJeff Kirsher 		pi->nqsets = edata.val;
2258f7917c00SJeff Kirsher 
2259f7917c00SJeff Kirsher 		for_each_port(adapter, i)
2260f7917c00SJeff Kirsher 			if (adapter->port[i]) {
2261f7917c00SJeff Kirsher 				pi = adap2pinfo(adapter, i);
2262f7917c00SJeff Kirsher 				pi->first_qset = first_qset;
2263f7917c00SJeff Kirsher 				first_qset += pi->nqsets;
2264f7917c00SJeff Kirsher 			}
2265f7917c00SJeff Kirsher 		break;
2266f7917c00SJeff Kirsher 	}
2267f7917c00SJeff Kirsher 	case CHELSIO_GET_QSET_NUM:{
2268f7917c00SJeff Kirsher 		struct ch_reg edata;
2269f7917c00SJeff Kirsher 
2270f7917c00SJeff Kirsher 		memset(&edata, 0, sizeof(struct ch_reg));
2271f7917c00SJeff Kirsher 
2272f7917c00SJeff Kirsher 		edata.cmd = CHELSIO_GET_QSET_NUM;
2273f7917c00SJeff Kirsher 		edata.val = pi->nqsets;
2274f7917c00SJeff Kirsher 		if (copy_to_user(useraddr, &edata, sizeof(edata)))
2275f7917c00SJeff Kirsher 			return -EFAULT;
2276f7917c00SJeff Kirsher 		break;
2277f7917c00SJeff Kirsher 	}
2278f7917c00SJeff Kirsher 	case CHELSIO_LOAD_FW:{
2279f7917c00SJeff Kirsher 		u8 *fw_data;
2280f7917c00SJeff Kirsher 		struct ch_mem_range t;
2281f7917c00SJeff Kirsher 
2282f7917c00SJeff Kirsher 		if (!capable(CAP_SYS_RAWIO))
2283f7917c00SJeff Kirsher 			return -EPERM;
2284f7917c00SJeff Kirsher 		if (copy_from_user(&t, useraddr, sizeof(t)))
2285f7917c00SJeff Kirsher 			return -EFAULT;
2286f7917c00SJeff Kirsher 		/* Check t.len sanity ? */
2287f7917c00SJeff Kirsher 		fw_data = memdup_user(useraddr + sizeof(t), t.len);
2288f7917c00SJeff Kirsher 		if (IS_ERR(fw_data))
2289f7917c00SJeff Kirsher 			return PTR_ERR(fw_data);
2290f7917c00SJeff Kirsher 
2291f7917c00SJeff Kirsher 		ret = t3_load_fw(adapter, fw_data, t.len);
2292f7917c00SJeff Kirsher 		kfree(fw_data);
2293f7917c00SJeff Kirsher 		if (ret)
2294f7917c00SJeff Kirsher 			return ret;
2295f7917c00SJeff Kirsher 		break;
2296f7917c00SJeff Kirsher 	}
2297f7917c00SJeff Kirsher 	case CHELSIO_SETMTUTAB:{
2298f7917c00SJeff Kirsher 		struct ch_mtus m;
2299f7917c00SJeff Kirsher 		int i;
2300f7917c00SJeff Kirsher 
2301f7917c00SJeff Kirsher 		if (!is_offload(adapter))
2302f7917c00SJeff Kirsher 			return -EOPNOTSUPP;
2303f7917c00SJeff Kirsher 		if (!capable(CAP_NET_ADMIN))
2304f7917c00SJeff Kirsher 			return -EPERM;
2305f7917c00SJeff Kirsher 		if (offload_running(adapter))
2306f7917c00SJeff Kirsher 			return -EBUSY;
2307f7917c00SJeff Kirsher 		if (copy_from_user(&m, useraddr, sizeof(m)))
2308f7917c00SJeff Kirsher 			return -EFAULT;
2309f7917c00SJeff Kirsher 		if (m.nmtus != NMTUS)
2310f7917c00SJeff Kirsher 			return -EINVAL;
2311f7917c00SJeff Kirsher 		if (m.mtus[0] < 81)	/* accommodate SACK */
2312f7917c00SJeff Kirsher 			return -EINVAL;
2313f7917c00SJeff Kirsher 
2314f7917c00SJeff Kirsher 		/* MTUs must be in ascending order */
2315f7917c00SJeff Kirsher 		for (i = 1; i < NMTUS; ++i)
2316f7917c00SJeff Kirsher 			if (m.mtus[i] < m.mtus[i - 1])
2317f7917c00SJeff Kirsher 				return -EINVAL;
2318f7917c00SJeff Kirsher 
2319f7917c00SJeff Kirsher 		memcpy(adapter->params.mtus, m.mtus,
2320f7917c00SJeff Kirsher 			sizeof(adapter->params.mtus));
2321f7917c00SJeff Kirsher 		break;
2322f7917c00SJeff Kirsher 	}
2323f7917c00SJeff Kirsher 	case CHELSIO_GET_PM:{
2324f7917c00SJeff Kirsher 		struct tp_params *p = &adapter->params.tp;
2325f7917c00SJeff Kirsher 		struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2326f7917c00SJeff Kirsher 
2327f7917c00SJeff Kirsher 		if (!is_offload(adapter))
2328f7917c00SJeff Kirsher 			return -EOPNOTSUPP;
2329f7917c00SJeff Kirsher 		m.tx_pg_sz = p->tx_pg_size;
2330f7917c00SJeff Kirsher 		m.tx_num_pg = p->tx_num_pgs;
2331f7917c00SJeff Kirsher 		m.rx_pg_sz = p->rx_pg_size;
2332f7917c00SJeff Kirsher 		m.rx_num_pg = p->rx_num_pgs;
2333f7917c00SJeff Kirsher 		m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2334f7917c00SJeff Kirsher 		if (copy_to_user(useraddr, &m, sizeof(m)))
2335f7917c00SJeff Kirsher 			return -EFAULT;
2336f7917c00SJeff Kirsher 		break;
2337f7917c00SJeff Kirsher 	}
2338f7917c00SJeff Kirsher 	case CHELSIO_SET_PM:{
2339f7917c00SJeff Kirsher 		struct ch_pm m;
2340f7917c00SJeff Kirsher 		struct tp_params *p = &adapter->params.tp;
2341f7917c00SJeff Kirsher 
2342f7917c00SJeff Kirsher 		if (!is_offload(adapter))
2343f7917c00SJeff Kirsher 			return -EOPNOTSUPP;
2344f7917c00SJeff Kirsher 		if (!capable(CAP_NET_ADMIN))
2345f7917c00SJeff Kirsher 			return -EPERM;
2346f7917c00SJeff Kirsher 		if (adapter->flags & FULL_INIT_DONE)
2347f7917c00SJeff Kirsher 			return -EBUSY;
2348f7917c00SJeff Kirsher 		if (copy_from_user(&m, useraddr, sizeof(m)))
2349f7917c00SJeff Kirsher 			return -EFAULT;
2350f7917c00SJeff Kirsher 		if (!is_power_of_2(m.rx_pg_sz) ||
2351f7917c00SJeff Kirsher 			!is_power_of_2(m.tx_pg_sz))
2352f7917c00SJeff Kirsher 			return -EINVAL;	/* not power of 2 */
2353f7917c00SJeff Kirsher 		if (!(m.rx_pg_sz & 0x14000))
2354f7917c00SJeff Kirsher 			return -EINVAL;	/* not 16KB or 64KB */
2355f7917c00SJeff Kirsher 		if (!(m.tx_pg_sz & 0x1554000))
2356f7917c00SJeff Kirsher 			return -EINVAL;
2357f7917c00SJeff Kirsher 		if (m.tx_num_pg == -1)
2358f7917c00SJeff Kirsher 			m.tx_num_pg = p->tx_num_pgs;
2359f7917c00SJeff Kirsher 		if (m.rx_num_pg == -1)
2360f7917c00SJeff Kirsher 			m.rx_num_pg = p->rx_num_pgs;
2361f7917c00SJeff Kirsher 		if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2362f7917c00SJeff Kirsher 			return -EINVAL;
2363f7917c00SJeff Kirsher 		if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2364f7917c00SJeff Kirsher 			m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2365f7917c00SJeff Kirsher 			return -EINVAL;
2366f7917c00SJeff Kirsher 		p->rx_pg_size = m.rx_pg_sz;
2367f7917c00SJeff Kirsher 		p->tx_pg_size = m.tx_pg_sz;
2368f7917c00SJeff Kirsher 		p->rx_num_pgs = m.rx_num_pg;
2369f7917c00SJeff Kirsher 		p->tx_num_pgs = m.tx_num_pg;
2370f7917c00SJeff Kirsher 		break;
2371f7917c00SJeff Kirsher 	}
2372f7917c00SJeff Kirsher 	case CHELSIO_GET_MEM:{
2373f7917c00SJeff Kirsher 		struct ch_mem_range t;
2374f7917c00SJeff Kirsher 		struct mc7 *mem;
2375f7917c00SJeff Kirsher 		u64 buf[32];
2376f7917c00SJeff Kirsher 
2377f7917c00SJeff Kirsher 		if (!is_offload(adapter))
2378f7917c00SJeff Kirsher 			return -EOPNOTSUPP;
2379f7917c00SJeff Kirsher 		if (!(adapter->flags & FULL_INIT_DONE))
2380f7917c00SJeff Kirsher 			return -EIO;	/* need the memory controllers */
2381f7917c00SJeff Kirsher 		if (copy_from_user(&t, useraddr, sizeof(t)))
2382f7917c00SJeff Kirsher 			return -EFAULT;
2383f7917c00SJeff Kirsher 		if ((t.addr & 7) || (t.len & 7))
2384f7917c00SJeff Kirsher 			return -EINVAL;
2385f7917c00SJeff Kirsher 		if (t.mem_id == MEM_CM)
2386f7917c00SJeff Kirsher 			mem = &adapter->cm;
2387f7917c00SJeff Kirsher 		else if (t.mem_id == MEM_PMRX)
2388f7917c00SJeff Kirsher 			mem = &adapter->pmrx;
2389f7917c00SJeff Kirsher 		else if (t.mem_id == MEM_PMTX)
2390f7917c00SJeff Kirsher 			mem = &adapter->pmtx;
2391f7917c00SJeff Kirsher 		else
2392f7917c00SJeff Kirsher 			return -EINVAL;
2393f7917c00SJeff Kirsher 
2394f7917c00SJeff Kirsher 		/*
2395f7917c00SJeff Kirsher 		 * Version scheme:
2396f7917c00SJeff Kirsher 		 * bits 0..9: chip version
2397f7917c00SJeff Kirsher 		 * bits 10..15: chip revision
2398f7917c00SJeff Kirsher 		 */
2399f7917c00SJeff Kirsher 		t.version = 3 | (adapter->params.rev << 10);
2400f7917c00SJeff Kirsher 		if (copy_to_user(useraddr, &t, sizeof(t)))
2401f7917c00SJeff Kirsher 			return -EFAULT;
2402f7917c00SJeff Kirsher 
2403f7917c00SJeff Kirsher 		/*
2404f7917c00SJeff Kirsher 		 * Read 256 bytes at a time as len can be large and we don't
2405f7917c00SJeff Kirsher 		 * want to use huge intermediate buffers.
2406f7917c00SJeff Kirsher 		 */
2407f7917c00SJeff Kirsher 		useraddr += sizeof(t);	/* advance to start of buffer */
2408f7917c00SJeff Kirsher 		while (t.len) {
2409f7917c00SJeff Kirsher 			unsigned int chunk =
2410f7917c00SJeff Kirsher 				min_t(unsigned int, t.len, sizeof(buf));
2411f7917c00SJeff Kirsher 
2412f7917c00SJeff Kirsher 			ret =
2413f7917c00SJeff Kirsher 				t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2414f7917c00SJeff Kirsher 						buf);
2415f7917c00SJeff Kirsher 			if (ret)
2416f7917c00SJeff Kirsher 				return ret;
2417f7917c00SJeff Kirsher 			if (copy_to_user(useraddr, buf, chunk))
2418f7917c00SJeff Kirsher 				return -EFAULT;
2419f7917c00SJeff Kirsher 			useraddr += chunk;
2420f7917c00SJeff Kirsher 			t.addr += chunk;
2421f7917c00SJeff Kirsher 			t.len -= chunk;
2422f7917c00SJeff Kirsher 		}
2423f7917c00SJeff Kirsher 		break;
2424f7917c00SJeff Kirsher 	}
2425f7917c00SJeff Kirsher 	case CHELSIO_SET_TRACE_FILTER:{
2426f7917c00SJeff Kirsher 		struct ch_trace t;
2427f7917c00SJeff Kirsher 		const struct trace_params *tp;
2428f7917c00SJeff Kirsher 
2429f7917c00SJeff Kirsher 		if (!capable(CAP_NET_ADMIN))
2430f7917c00SJeff Kirsher 			return -EPERM;
2431f7917c00SJeff Kirsher 		if (!offload_running(adapter))
2432f7917c00SJeff Kirsher 			return -EAGAIN;
2433f7917c00SJeff Kirsher 		if (copy_from_user(&t, useraddr, sizeof(t)))
2434f7917c00SJeff Kirsher 			return -EFAULT;
2435f7917c00SJeff Kirsher 
2436f7917c00SJeff Kirsher 		tp = (const struct trace_params *)&t.sip;
2437f7917c00SJeff Kirsher 		if (t.config_tx)
2438f7917c00SJeff Kirsher 			t3_config_trace_filter(adapter, tp, 0,
2439f7917c00SJeff Kirsher 						t.invert_match,
2440f7917c00SJeff Kirsher 						t.trace_tx);
2441f7917c00SJeff Kirsher 		if (t.config_rx)
2442f7917c00SJeff Kirsher 			t3_config_trace_filter(adapter, tp, 1,
2443f7917c00SJeff Kirsher 						t.invert_match,
2444f7917c00SJeff Kirsher 						t.trace_rx);
2445f7917c00SJeff Kirsher 		break;
2446f7917c00SJeff Kirsher 	}
2447f7917c00SJeff Kirsher 	default:
2448f7917c00SJeff Kirsher 		return -EOPNOTSUPP;
2449f7917c00SJeff Kirsher 	}
2450f7917c00SJeff Kirsher 	return 0;
2451f7917c00SJeff Kirsher }
2452f7917c00SJeff Kirsher 
2453f7917c00SJeff Kirsher static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2454f7917c00SJeff Kirsher {
2455f7917c00SJeff Kirsher 	struct mii_ioctl_data *data = if_mii(req);
2456f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
2457f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
2458f7917c00SJeff Kirsher 
2459f7917c00SJeff Kirsher 	switch (cmd) {
2460f7917c00SJeff Kirsher 	case SIOCGMIIREG:
2461f7917c00SJeff Kirsher 	case SIOCSMIIREG:
2462f7917c00SJeff Kirsher 		/* Convert phy_id from older PRTAD/DEVAD format */
2463f7917c00SJeff Kirsher 		if (is_10G(adapter) &&
2464f7917c00SJeff Kirsher 		    !mdio_phy_id_is_c45(data->phy_id) &&
2465f7917c00SJeff Kirsher 		    (data->phy_id & 0x1f00) &&
2466f7917c00SJeff Kirsher 		    !(data->phy_id & 0xe0e0))
2467f7917c00SJeff Kirsher 			data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2468f7917c00SJeff Kirsher 						       data->phy_id & 0x1f);
2469f7917c00SJeff Kirsher 		/* FALLTHRU */
2470f7917c00SJeff Kirsher 	case SIOCGMIIPHY:
2471f7917c00SJeff Kirsher 		return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2472f7917c00SJeff Kirsher 	case SIOCCHIOCTL:
2473f7917c00SJeff Kirsher 		return cxgb_extension_ioctl(dev, req->ifr_data);
2474f7917c00SJeff Kirsher 	default:
2475f7917c00SJeff Kirsher 		return -EOPNOTSUPP;
2476f7917c00SJeff Kirsher 	}
2477f7917c00SJeff Kirsher }
2478f7917c00SJeff Kirsher 
2479f7917c00SJeff Kirsher static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2480f7917c00SJeff Kirsher {
2481f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
2482f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
2483f7917c00SJeff Kirsher 	int ret;
2484f7917c00SJeff Kirsher 
2485f7917c00SJeff Kirsher 	if (new_mtu < 81)	/* accommodate SACK */
2486f7917c00SJeff Kirsher 		return -EINVAL;
2487f7917c00SJeff Kirsher 	if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2488f7917c00SJeff Kirsher 		return ret;
2489f7917c00SJeff Kirsher 	dev->mtu = new_mtu;
2490f7917c00SJeff Kirsher 	init_port_mtus(adapter);
2491f7917c00SJeff Kirsher 	if (adapter->params.rev == 0 && offload_running(adapter))
2492f7917c00SJeff Kirsher 		t3_load_mtus(adapter, adapter->params.mtus,
2493f7917c00SJeff Kirsher 			     adapter->params.a_wnd, adapter->params.b_wnd,
2494f7917c00SJeff Kirsher 			     adapter->port[0]->mtu);
2495f7917c00SJeff Kirsher 	return 0;
2496f7917c00SJeff Kirsher }
2497f7917c00SJeff Kirsher 
2498f7917c00SJeff Kirsher static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2499f7917c00SJeff Kirsher {
2500f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
2501f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
2502f7917c00SJeff Kirsher 	struct sockaddr *addr = p;
2503f7917c00SJeff Kirsher 
2504f7917c00SJeff Kirsher 	if (!is_valid_ether_addr(addr->sa_data))
2505f7917c00SJeff Kirsher 		return -EINVAL;
2506f7917c00SJeff Kirsher 
2507f7917c00SJeff Kirsher 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2508f7917c00SJeff Kirsher 	t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2509f7917c00SJeff Kirsher 	if (offload_running(adapter))
2510f7917c00SJeff Kirsher 		write_smt_entry(adapter, pi->port_id);
2511f7917c00SJeff Kirsher 	return 0;
2512f7917c00SJeff Kirsher }
2513f7917c00SJeff Kirsher 
2514f7917c00SJeff Kirsher /**
2515f7917c00SJeff Kirsher  * t3_synchronize_rx - wait for current Rx processing on a port to complete
2516f7917c00SJeff Kirsher  * @adap: the adapter
2517f7917c00SJeff Kirsher  * @p: the port
2518f7917c00SJeff Kirsher  *
2519f7917c00SJeff Kirsher  * Ensures that current Rx processing on any of the queues associated with
2520f7917c00SJeff Kirsher  * the given port completes before returning.  We do this by acquiring and
2521f7917c00SJeff Kirsher  * releasing the locks of the response queues associated with the port.
2522f7917c00SJeff Kirsher  */
2523f7917c00SJeff Kirsher static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2524f7917c00SJeff Kirsher {
2525f7917c00SJeff Kirsher 	int i;
2526f7917c00SJeff Kirsher 
2527f7917c00SJeff Kirsher 	for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2528f7917c00SJeff Kirsher 		struct sge_rspq *q = &adap->sge.qs[i].rspq;
2529f7917c00SJeff Kirsher 
2530f7917c00SJeff Kirsher 		spin_lock_irq(&q->lock);
2531f7917c00SJeff Kirsher 		spin_unlock_irq(&q->lock);
2532f7917c00SJeff Kirsher 	}
2533f7917c00SJeff Kirsher }
2534f7917c00SJeff Kirsher 
2535f7917c00SJeff Kirsher static void cxgb_vlan_mode(struct net_device *dev, u32 features)
2536f7917c00SJeff Kirsher {
2537f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
2538f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
2539f7917c00SJeff Kirsher 
2540f7917c00SJeff Kirsher 	if (adapter->params.rev > 0) {
2541f7917c00SJeff Kirsher 		t3_set_vlan_accel(adapter, 1 << pi->port_id,
2542f7917c00SJeff Kirsher 				  features & NETIF_F_HW_VLAN_RX);
2543f7917c00SJeff Kirsher 	} else {
2544f7917c00SJeff Kirsher 		/* single control for all ports */
2545f7917c00SJeff Kirsher 		unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_RX;
2546f7917c00SJeff Kirsher 
2547f7917c00SJeff Kirsher 		for_each_port(adapter, i)
2548f7917c00SJeff Kirsher 			have_vlans |=
2549f7917c00SJeff Kirsher 				adapter->port[i]->features & NETIF_F_HW_VLAN_RX;
2550f7917c00SJeff Kirsher 
2551f7917c00SJeff Kirsher 		t3_set_vlan_accel(adapter, 1, have_vlans);
2552f7917c00SJeff Kirsher 	}
2553f7917c00SJeff Kirsher 	t3_synchronize_rx(adapter, pi);
2554f7917c00SJeff Kirsher }
2555f7917c00SJeff Kirsher 
2556f7917c00SJeff Kirsher static u32 cxgb_fix_features(struct net_device *dev, u32 features)
2557f7917c00SJeff Kirsher {
2558f7917c00SJeff Kirsher 	/*
2559f7917c00SJeff Kirsher 	 * Since there is no support for separate rx/tx vlan accel
2560f7917c00SJeff Kirsher 	 * enable/disable make sure tx flag is always in same state as rx.
2561f7917c00SJeff Kirsher 	 */
2562f7917c00SJeff Kirsher 	if (features & NETIF_F_HW_VLAN_RX)
2563f7917c00SJeff Kirsher 		features |= NETIF_F_HW_VLAN_TX;
2564f7917c00SJeff Kirsher 	else
2565f7917c00SJeff Kirsher 		features &= ~NETIF_F_HW_VLAN_TX;
2566f7917c00SJeff Kirsher 
2567f7917c00SJeff Kirsher 	return features;
2568f7917c00SJeff Kirsher }
2569f7917c00SJeff Kirsher 
2570f7917c00SJeff Kirsher static int cxgb_set_features(struct net_device *dev, u32 features)
2571f7917c00SJeff Kirsher {
2572f7917c00SJeff Kirsher 	u32 changed = dev->features ^ features;
2573f7917c00SJeff Kirsher 
2574f7917c00SJeff Kirsher 	if (changed & NETIF_F_HW_VLAN_RX)
2575f7917c00SJeff Kirsher 		cxgb_vlan_mode(dev, features);
2576f7917c00SJeff Kirsher 
2577f7917c00SJeff Kirsher 	return 0;
2578f7917c00SJeff Kirsher }
2579f7917c00SJeff Kirsher 
2580f7917c00SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
2581f7917c00SJeff Kirsher static void cxgb_netpoll(struct net_device *dev)
2582f7917c00SJeff Kirsher {
2583f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
2584f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
2585f7917c00SJeff Kirsher 	int qidx;
2586f7917c00SJeff Kirsher 
2587f7917c00SJeff Kirsher 	for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2588f7917c00SJeff Kirsher 		struct sge_qset *qs = &adapter->sge.qs[qidx];
2589f7917c00SJeff Kirsher 		void *source;
2590f7917c00SJeff Kirsher 
2591f7917c00SJeff Kirsher 		if (adapter->flags & USING_MSIX)
2592f7917c00SJeff Kirsher 			source = qs;
2593f7917c00SJeff Kirsher 		else
2594f7917c00SJeff Kirsher 			source = adapter;
2595f7917c00SJeff Kirsher 
2596f7917c00SJeff Kirsher 		t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2597f7917c00SJeff Kirsher 	}
2598f7917c00SJeff Kirsher }
2599f7917c00SJeff Kirsher #endif
2600f7917c00SJeff Kirsher 
2601f7917c00SJeff Kirsher /*
2602f7917c00SJeff Kirsher  * Periodic accumulation of MAC statistics.
2603f7917c00SJeff Kirsher  */
2604f7917c00SJeff Kirsher static void mac_stats_update(struct adapter *adapter)
2605f7917c00SJeff Kirsher {
2606f7917c00SJeff Kirsher 	int i;
2607f7917c00SJeff Kirsher 
2608f7917c00SJeff Kirsher 	for_each_port(adapter, i) {
2609f7917c00SJeff Kirsher 		struct net_device *dev = adapter->port[i];
2610f7917c00SJeff Kirsher 		struct port_info *p = netdev_priv(dev);
2611f7917c00SJeff Kirsher 
2612f7917c00SJeff Kirsher 		if (netif_running(dev)) {
2613f7917c00SJeff Kirsher 			spin_lock(&adapter->stats_lock);
2614f7917c00SJeff Kirsher 			t3_mac_update_stats(&p->mac);
2615f7917c00SJeff Kirsher 			spin_unlock(&adapter->stats_lock);
2616f7917c00SJeff Kirsher 		}
2617f7917c00SJeff Kirsher 	}
2618f7917c00SJeff Kirsher }
2619f7917c00SJeff Kirsher 
2620f7917c00SJeff Kirsher static void check_link_status(struct adapter *adapter)
2621f7917c00SJeff Kirsher {
2622f7917c00SJeff Kirsher 	int i;
2623f7917c00SJeff Kirsher 
2624f7917c00SJeff Kirsher 	for_each_port(adapter, i) {
2625f7917c00SJeff Kirsher 		struct net_device *dev = adapter->port[i];
2626f7917c00SJeff Kirsher 		struct port_info *p = netdev_priv(dev);
2627f7917c00SJeff Kirsher 		int link_fault;
2628f7917c00SJeff Kirsher 
2629f7917c00SJeff Kirsher 		spin_lock_irq(&adapter->work_lock);
2630f7917c00SJeff Kirsher 		link_fault = p->link_fault;
2631f7917c00SJeff Kirsher 		spin_unlock_irq(&adapter->work_lock);
2632f7917c00SJeff Kirsher 
2633f7917c00SJeff Kirsher 		if (link_fault) {
2634f7917c00SJeff Kirsher 			t3_link_fault(adapter, i);
2635f7917c00SJeff Kirsher 			continue;
2636f7917c00SJeff Kirsher 		}
2637f7917c00SJeff Kirsher 
2638f7917c00SJeff Kirsher 		if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2639f7917c00SJeff Kirsher 			t3_xgm_intr_disable(adapter, i);
2640f7917c00SJeff Kirsher 			t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2641f7917c00SJeff Kirsher 
2642f7917c00SJeff Kirsher 			t3_link_changed(adapter, i);
2643f7917c00SJeff Kirsher 			t3_xgm_intr_enable(adapter, i);
2644f7917c00SJeff Kirsher 		}
2645f7917c00SJeff Kirsher 	}
2646f7917c00SJeff Kirsher }
2647f7917c00SJeff Kirsher 
2648f7917c00SJeff Kirsher static void check_t3b2_mac(struct adapter *adapter)
2649f7917c00SJeff Kirsher {
2650f7917c00SJeff Kirsher 	int i;
2651f7917c00SJeff Kirsher 
2652f7917c00SJeff Kirsher 	if (!rtnl_trylock())	/* synchronize with ifdown */
2653f7917c00SJeff Kirsher 		return;
2654f7917c00SJeff Kirsher 
2655f7917c00SJeff Kirsher 	for_each_port(adapter, i) {
2656f7917c00SJeff Kirsher 		struct net_device *dev = adapter->port[i];
2657f7917c00SJeff Kirsher 		struct port_info *p = netdev_priv(dev);
2658f7917c00SJeff Kirsher 		int status;
2659f7917c00SJeff Kirsher 
2660f7917c00SJeff Kirsher 		if (!netif_running(dev))
2661f7917c00SJeff Kirsher 			continue;
2662f7917c00SJeff Kirsher 
2663f7917c00SJeff Kirsher 		status = 0;
2664f7917c00SJeff Kirsher 		if (netif_running(dev) && netif_carrier_ok(dev))
2665f7917c00SJeff Kirsher 			status = t3b2_mac_watchdog_task(&p->mac);
2666f7917c00SJeff Kirsher 		if (status == 1)
2667f7917c00SJeff Kirsher 			p->mac.stats.num_toggled++;
2668f7917c00SJeff Kirsher 		else if (status == 2) {
2669f7917c00SJeff Kirsher 			struct cmac *mac = &p->mac;
2670f7917c00SJeff Kirsher 
2671f7917c00SJeff Kirsher 			t3_mac_set_mtu(mac, dev->mtu);
2672f7917c00SJeff Kirsher 			t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2673f7917c00SJeff Kirsher 			cxgb_set_rxmode(dev);
2674f7917c00SJeff Kirsher 			t3_link_start(&p->phy, mac, &p->link_config);
2675f7917c00SJeff Kirsher 			t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2676f7917c00SJeff Kirsher 			t3_port_intr_enable(adapter, p->port_id);
2677f7917c00SJeff Kirsher 			p->mac.stats.num_resets++;
2678f7917c00SJeff Kirsher 		}
2679f7917c00SJeff Kirsher 	}
2680f7917c00SJeff Kirsher 	rtnl_unlock();
2681f7917c00SJeff Kirsher }
2682f7917c00SJeff Kirsher 
2683f7917c00SJeff Kirsher 
2684f7917c00SJeff Kirsher static void t3_adap_check_task(struct work_struct *work)
2685f7917c00SJeff Kirsher {
2686f7917c00SJeff Kirsher 	struct adapter *adapter = container_of(work, struct adapter,
2687f7917c00SJeff Kirsher 					       adap_check_task.work);
2688f7917c00SJeff Kirsher 	const struct adapter_params *p = &adapter->params;
2689f7917c00SJeff Kirsher 	int port;
2690f7917c00SJeff Kirsher 	unsigned int v, status, reset;
2691f7917c00SJeff Kirsher 
2692f7917c00SJeff Kirsher 	adapter->check_task_cnt++;
2693f7917c00SJeff Kirsher 
2694f7917c00SJeff Kirsher 	check_link_status(adapter);
2695f7917c00SJeff Kirsher 
2696f7917c00SJeff Kirsher 	/* Accumulate MAC stats if needed */
2697f7917c00SJeff Kirsher 	if (!p->linkpoll_period ||
2698f7917c00SJeff Kirsher 	    (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2699f7917c00SJeff Kirsher 	    p->stats_update_period) {
2700f7917c00SJeff Kirsher 		mac_stats_update(adapter);
2701f7917c00SJeff Kirsher 		adapter->check_task_cnt = 0;
2702f7917c00SJeff Kirsher 	}
2703f7917c00SJeff Kirsher 
2704f7917c00SJeff Kirsher 	if (p->rev == T3_REV_B2)
2705f7917c00SJeff Kirsher 		check_t3b2_mac(adapter);
2706f7917c00SJeff Kirsher 
2707f7917c00SJeff Kirsher 	/*
2708f7917c00SJeff Kirsher 	 * Scan the XGMAC's to check for various conditions which we want to
2709f7917c00SJeff Kirsher 	 * monitor in a periodic polling manner rather than via an interrupt
2710f7917c00SJeff Kirsher 	 * condition.  This is used for conditions which would otherwise flood
2711f7917c00SJeff Kirsher 	 * the system with interrupts and we only really need to know that the
2712f7917c00SJeff Kirsher 	 * conditions are "happening" ...  For each condition we count the
2713f7917c00SJeff Kirsher 	 * detection of the condition and reset it for the next polling loop.
2714f7917c00SJeff Kirsher 	 */
2715f7917c00SJeff Kirsher 	for_each_port(adapter, port) {
2716f7917c00SJeff Kirsher 		struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
2717f7917c00SJeff Kirsher 		u32 cause;
2718f7917c00SJeff Kirsher 
2719f7917c00SJeff Kirsher 		cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2720f7917c00SJeff Kirsher 		reset = 0;
2721f7917c00SJeff Kirsher 		if (cause & F_RXFIFO_OVERFLOW) {
2722f7917c00SJeff Kirsher 			mac->stats.rx_fifo_ovfl++;
2723f7917c00SJeff Kirsher 			reset |= F_RXFIFO_OVERFLOW;
2724f7917c00SJeff Kirsher 		}
2725f7917c00SJeff Kirsher 
2726f7917c00SJeff Kirsher 		t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2727f7917c00SJeff Kirsher 	}
2728f7917c00SJeff Kirsher 
2729f7917c00SJeff Kirsher 	/*
2730f7917c00SJeff Kirsher 	 * We do the same as above for FL_EMPTY interrupts.
2731f7917c00SJeff Kirsher 	 */
2732f7917c00SJeff Kirsher 	status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2733f7917c00SJeff Kirsher 	reset = 0;
2734f7917c00SJeff Kirsher 
2735f7917c00SJeff Kirsher 	if (status & F_FLEMPTY) {
2736f7917c00SJeff Kirsher 		struct sge_qset *qs = &adapter->sge.qs[0];
2737f7917c00SJeff Kirsher 		int i = 0;
2738f7917c00SJeff Kirsher 
2739f7917c00SJeff Kirsher 		reset |= F_FLEMPTY;
2740f7917c00SJeff Kirsher 
2741f7917c00SJeff Kirsher 		v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2742f7917c00SJeff Kirsher 		    0xffff;
2743f7917c00SJeff Kirsher 
2744f7917c00SJeff Kirsher 		while (v) {
2745f7917c00SJeff Kirsher 			qs->fl[i].empty += (v & 1);
2746f7917c00SJeff Kirsher 			if (i)
2747f7917c00SJeff Kirsher 				qs++;
2748f7917c00SJeff Kirsher 			i ^= 1;
2749f7917c00SJeff Kirsher 			v >>= 1;
2750f7917c00SJeff Kirsher 		}
2751f7917c00SJeff Kirsher 	}
2752f7917c00SJeff Kirsher 
2753f7917c00SJeff Kirsher 	t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2754f7917c00SJeff Kirsher 
2755f7917c00SJeff Kirsher 	/* Schedule the next check update if any port is active. */
2756f7917c00SJeff Kirsher 	spin_lock_irq(&adapter->work_lock);
2757f7917c00SJeff Kirsher 	if (adapter->open_device_map & PORT_MASK)
2758f7917c00SJeff Kirsher 		schedule_chk_task(adapter);
2759f7917c00SJeff Kirsher 	spin_unlock_irq(&adapter->work_lock);
2760f7917c00SJeff Kirsher }
2761f7917c00SJeff Kirsher 
2762f7917c00SJeff Kirsher static void db_full_task(struct work_struct *work)
2763f7917c00SJeff Kirsher {
2764f7917c00SJeff Kirsher 	struct adapter *adapter = container_of(work, struct adapter,
2765f7917c00SJeff Kirsher 					       db_full_task);
2766f7917c00SJeff Kirsher 
2767f7917c00SJeff Kirsher 	cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2768f7917c00SJeff Kirsher }
2769f7917c00SJeff Kirsher 
2770f7917c00SJeff Kirsher static void db_empty_task(struct work_struct *work)
2771f7917c00SJeff Kirsher {
2772f7917c00SJeff Kirsher 	struct adapter *adapter = container_of(work, struct adapter,
2773f7917c00SJeff Kirsher 					       db_empty_task);
2774f7917c00SJeff Kirsher 
2775f7917c00SJeff Kirsher 	cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2776f7917c00SJeff Kirsher }
2777f7917c00SJeff Kirsher 
2778f7917c00SJeff Kirsher static void db_drop_task(struct work_struct *work)
2779f7917c00SJeff Kirsher {
2780f7917c00SJeff Kirsher 	struct adapter *adapter = container_of(work, struct adapter,
2781f7917c00SJeff Kirsher 					       db_drop_task);
2782f7917c00SJeff Kirsher 	unsigned long delay = 1000;
2783f7917c00SJeff Kirsher 	unsigned short r;
2784f7917c00SJeff Kirsher 
2785f7917c00SJeff Kirsher 	cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2786f7917c00SJeff Kirsher 
2787f7917c00SJeff Kirsher 	/*
2788f7917c00SJeff Kirsher 	 * Sleep a while before ringing the driver qset dbs.
2789f7917c00SJeff Kirsher 	 * The delay is between 1000-2023 usecs.
2790f7917c00SJeff Kirsher 	 */
2791f7917c00SJeff Kirsher 	get_random_bytes(&r, 2);
2792f7917c00SJeff Kirsher 	delay += r & 1023;
2793f7917c00SJeff Kirsher 	set_current_state(TASK_UNINTERRUPTIBLE);
2794f7917c00SJeff Kirsher 	schedule_timeout(usecs_to_jiffies(delay));
2795f7917c00SJeff Kirsher 	ring_dbs(adapter);
2796f7917c00SJeff Kirsher }
2797f7917c00SJeff Kirsher 
2798f7917c00SJeff Kirsher /*
2799f7917c00SJeff Kirsher  * Processes external (PHY) interrupts in process context.
2800f7917c00SJeff Kirsher  */
2801f7917c00SJeff Kirsher static void ext_intr_task(struct work_struct *work)
2802f7917c00SJeff Kirsher {
2803f7917c00SJeff Kirsher 	struct adapter *adapter = container_of(work, struct adapter,
2804f7917c00SJeff Kirsher 					       ext_intr_handler_task);
2805f7917c00SJeff Kirsher 	int i;
2806f7917c00SJeff Kirsher 
2807f7917c00SJeff Kirsher 	/* Disable link fault interrupts */
2808f7917c00SJeff Kirsher 	for_each_port(adapter, i) {
2809f7917c00SJeff Kirsher 		struct net_device *dev = adapter->port[i];
2810f7917c00SJeff Kirsher 		struct port_info *p = netdev_priv(dev);
2811f7917c00SJeff Kirsher 
2812f7917c00SJeff Kirsher 		t3_xgm_intr_disable(adapter, i);
2813f7917c00SJeff Kirsher 		t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2814f7917c00SJeff Kirsher 	}
2815f7917c00SJeff Kirsher 
2816f7917c00SJeff Kirsher 	/* Re-enable link fault interrupts */
2817f7917c00SJeff Kirsher 	t3_phy_intr_handler(adapter);
2818f7917c00SJeff Kirsher 
2819f7917c00SJeff Kirsher 	for_each_port(adapter, i)
2820f7917c00SJeff Kirsher 		t3_xgm_intr_enable(adapter, i);
2821f7917c00SJeff Kirsher 
2822f7917c00SJeff Kirsher 	/* Now reenable external interrupts */
2823f7917c00SJeff Kirsher 	spin_lock_irq(&adapter->work_lock);
2824f7917c00SJeff Kirsher 	if (adapter->slow_intr_mask) {
2825f7917c00SJeff Kirsher 		adapter->slow_intr_mask |= F_T3DBG;
2826f7917c00SJeff Kirsher 		t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2827f7917c00SJeff Kirsher 		t3_write_reg(adapter, A_PL_INT_ENABLE0,
2828f7917c00SJeff Kirsher 			     adapter->slow_intr_mask);
2829f7917c00SJeff Kirsher 	}
2830f7917c00SJeff Kirsher 	spin_unlock_irq(&adapter->work_lock);
2831f7917c00SJeff Kirsher }
2832f7917c00SJeff Kirsher 
2833f7917c00SJeff Kirsher /*
2834f7917c00SJeff Kirsher  * Interrupt-context handler for external (PHY) interrupts.
2835f7917c00SJeff Kirsher  */
2836f7917c00SJeff Kirsher void t3_os_ext_intr_handler(struct adapter *adapter)
2837f7917c00SJeff Kirsher {
2838f7917c00SJeff Kirsher 	/*
2839f7917c00SJeff Kirsher 	 * Schedule a task to handle external interrupts as they may be slow
2840f7917c00SJeff Kirsher 	 * and we use a mutex to protect MDIO registers.  We disable PHY
2841f7917c00SJeff Kirsher 	 * interrupts in the meantime and let the task reenable them when
2842f7917c00SJeff Kirsher 	 * it's done.
2843f7917c00SJeff Kirsher 	 */
2844f7917c00SJeff Kirsher 	spin_lock(&adapter->work_lock);
2845f7917c00SJeff Kirsher 	if (adapter->slow_intr_mask) {
2846f7917c00SJeff Kirsher 		adapter->slow_intr_mask &= ~F_T3DBG;
2847f7917c00SJeff Kirsher 		t3_write_reg(adapter, A_PL_INT_ENABLE0,
2848f7917c00SJeff Kirsher 			     adapter->slow_intr_mask);
2849f7917c00SJeff Kirsher 		queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2850f7917c00SJeff Kirsher 	}
2851f7917c00SJeff Kirsher 	spin_unlock(&adapter->work_lock);
2852f7917c00SJeff Kirsher }
2853f7917c00SJeff Kirsher 
2854f7917c00SJeff Kirsher void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2855f7917c00SJeff Kirsher {
2856f7917c00SJeff Kirsher 	struct net_device *netdev = adapter->port[port_id];
2857f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(netdev);
2858f7917c00SJeff Kirsher 
2859f7917c00SJeff Kirsher 	spin_lock(&adapter->work_lock);
2860f7917c00SJeff Kirsher 	pi->link_fault = 1;
2861f7917c00SJeff Kirsher 	spin_unlock(&adapter->work_lock);
2862f7917c00SJeff Kirsher }
2863f7917c00SJeff Kirsher 
2864f7917c00SJeff Kirsher static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2865f7917c00SJeff Kirsher {
2866f7917c00SJeff Kirsher 	int i, ret = 0;
2867f7917c00SJeff Kirsher 
2868f7917c00SJeff Kirsher 	if (is_offload(adapter) &&
2869f7917c00SJeff Kirsher 	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2870f7917c00SJeff Kirsher 		cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2871f7917c00SJeff Kirsher 		offload_close(&adapter->tdev);
2872f7917c00SJeff Kirsher 	}
2873f7917c00SJeff Kirsher 
2874f7917c00SJeff Kirsher 	/* Stop all ports */
2875f7917c00SJeff Kirsher 	for_each_port(adapter, i) {
2876f7917c00SJeff Kirsher 		struct net_device *netdev = adapter->port[i];
2877f7917c00SJeff Kirsher 
2878f7917c00SJeff Kirsher 		if (netif_running(netdev))
2879f7917c00SJeff Kirsher 			__cxgb_close(netdev, on_wq);
2880f7917c00SJeff Kirsher 	}
2881f7917c00SJeff Kirsher 
2882f7917c00SJeff Kirsher 	/* Stop SGE timers */
2883f7917c00SJeff Kirsher 	t3_stop_sge_timers(adapter);
2884f7917c00SJeff Kirsher 
2885f7917c00SJeff Kirsher 	adapter->flags &= ~FULL_INIT_DONE;
2886f7917c00SJeff Kirsher 
2887f7917c00SJeff Kirsher 	if (reset)
2888f7917c00SJeff Kirsher 		ret = t3_reset_adapter(adapter);
2889f7917c00SJeff Kirsher 
2890f7917c00SJeff Kirsher 	pci_disable_device(adapter->pdev);
2891f7917c00SJeff Kirsher 
2892f7917c00SJeff Kirsher 	return ret;
2893f7917c00SJeff Kirsher }
2894f7917c00SJeff Kirsher 
2895f7917c00SJeff Kirsher static int t3_reenable_adapter(struct adapter *adapter)
2896f7917c00SJeff Kirsher {
2897f7917c00SJeff Kirsher 	if (pci_enable_device(adapter->pdev)) {
2898f7917c00SJeff Kirsher 		dev_err(&adapter->pdev->dev,
2899f7917c00SJeff Kirsher 			"Cannot re-enable PCI device after reset.\n");
2900f7917c00SJeff Kirsher 		goto err;
2901f7917c00SJeff Kirsher 	}
2902f7917c00SJeff Kirsher 	pci_set_master(adapter->pdev);
2903f7917c00SJeff Kirsher 	pci_restore_state(adapter->pdev);
2904f7917c00SJeff Kirsher 	pci_save_state(adapter->pdev);
2905f7917c00SJeff Kirsher 
2906f7917c00SJeff Kirsher 	/* Free sge resources */
2907f7917c00SJeff Kirsher 	t3_free_sge_resources(adapter);
2908f7917c00SJeff Kirsher 
2909f7917c00SJeff Kirsher 	if (t3_replay_prep_adapter(adapter))
2910f7917c00SJeff Kirsher 		goto err;
2911f7917c00SJeff Kirsher 
2912f7917c00SJeff Kirsher 	return 0;
2913f7917c00SJeff Kirsher err:
2914f7917c00SJeff Kirsher 	return -1;
2915f7917c00SJeff Kirsher }
2916f7917c00SJeff Kirsher 
2917f7917c00SJeff Kirsher static void t3_resume_ports(struct adapter *adapter)
2918f7917c00SJeff Kirsher {
2919f7917c00SJeff Kirsher 	int i;
2920f7917c00SJeff Kirsher 
2921f7917c00SJeff Kirsher 	/* Restart the ports */
2922f7917c00SJeff Kirsher 	for_each_port(adapter, i) {
2923f7917c00SJeff Kirsher 		struct net_device *netdev = adapter->port[i];
2924f7917c00SJeff Kirsher 
2925f7917c00SJeff Kirsher 		if (netif_running(netdev)) {
2926f7917c00SJeff Kirsher 			if (cxgb_open(netdev)) {
2927f7917c00SJeff Kirsher 				dev_err(&adapter->pdev->dev,
2928f7917c00SJeff Kirsher 					"can't bring device back up"
2929f7917c00SJeff Kirsher 					" after reset\n");
2930f7917c00SJeff Kirsher 				continue;
2931f7917c00SJeff Kirsher 			}
2932f7917c00SJeff Kirsher 		}
2933f7917c00SJeff Kirsher 	}
2934f7917c00SJeff Kirsher 
2935f7917c00SJeff Kirsher 	if (is_offload(adapter) && !ofld_disable)
2936f7917c00SJeff Kirsher 		cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2937f7917c00SJeff Kirsher }
2938f7917c00SJeff Kirsher 
2939f7917c00SJeff Kirsher /*
2940f7917c00SJeff Kirsher  * processes a fatal error.
2941f7917c00SJeff Kirsher  * Bring the ports down, reset the chip, bring the ports back up.
2942f7917c00SJeff Kirsher  */
2943f7917c00SJeff Kirsher static void fatal_error_task(struct work_struct *work)
2944f7917c00SJeff Kirsher {
2945f7917c00SJeff Kirsher 	struct adapter *adapter = container_of(work, struct adapter,
2946f7917c00SJeff Kirsher 					       fatal_error_handler_task);
2947f7917c00SJeff Kirsher 	int err = 0;
2948f7917c00SJeff Kirsher 
2949f7917c00SJeff Kirsher 	rtnl_lock();
2950f7917c00SJeff Kirsher 	err = t3_adapter_error(adapter, 1, 1);
2951f7917c00SJeff Kirsher 	if (!err)
2952f7917c00SJeff Kirsher 		err = t3_reenable_adapter(adapter);
2953f7917c00SJeff Kirsher 	if (!err)
2954f7917c00SJeff Kirsher 		t3_resume_ports(adapter);
2955f7917c00SJeff Kirsher 
2956f7917c00SJeff Kirsher 	CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2957f7917c00SJeff Kirsher 	rtnl_unlock();
2958f7917c00SJeff Kirsher }
2959f7917c00SJeff Kirsher 
2960f7917c00SJeff Kirsher void t3_fatal_err(struct adapter *adapter)
2961f7917c00SJeff Kirsher {
2962f7917c00SJeff Kirsher 	unsigned int fw_status[4];
2963f7917c00SJeff Kirsher 
2964f7917c00SJeff Kirsher 	if (adapter->flags & FULL_INIT_DONE) {
2965f7917c00SJeff Kirsher 		t3_sge_stop(adapter);
2966f7917c00SJeff Kirsher 		t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2967f7917c00SJeff Kirsher 		t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2968f7917c00SJeff Kirsher 		t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2969f7917c00SJeff Kirsher 		t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2970f7917c00SJeff Kirsher 
2971f7917c00SJeff Kirsher 		spin_lock(&adapter->work_lock);
2972f7917c00SJeff Kirsher 		t3_intr_disable(adapter);
2973f7917c00SJeff Kirsher 		queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2974f7917c00SJeff Kirsher 		spin_unlock(&adapter->work_lock);
2975f7917c00SJeff Kirsher 	}
2976f7917c00SJeff Kirsher 	CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2977f7917c00SJeff Kirsher 	if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2978f7917c00SJeff Kirsher 		CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2979f7917c00SJeff Kirsher 			 fw_status[0], fw_status[1],
2980f7917c00SJeff Kirsher 			 fw_status[2], fw_status[3]);
2981f7917c00SJeff Kirsher }
2982f7917c00SJeff Kirsher 
2983f7917c00SJeff Kirsher /**
2984f7917c00SJeff Kirsher  * t3_io_error_detected - called when PCI error is detected
2985f7917c00SJeff Kirsher  * @pdev: Pointer to PCI device
2986f7917c00SJeff Kirsher  * @state: The current pci connection state
2987f7917c00SJeff Kirsher  *
2988f7917c00SJeff Kirsher  * This function is called after a PCI bus error affecting
2989f7917c00SJeff Kirsher  * this device has been detected.
2990f7917c00SJeff Kirsher  */
2991f7917c00SJeff Kirsher static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2992f7917c00SJeff Kirsher 					     pci_channel_state_t state)
2993f7917c00SJeff Kirsher {
2994f7917c00SJeff Kirsher 	struct adapter *adapter = pci_get_drvdata(pdev);
2995f7917c00SJeff Kirsher 
2996f7917c00SJeff Kirsher 	if (state == pci_channel_io_perm_failure)
2997f7917c00SJeff Kirsher 		return PCI_ERS_RESULT_DISCONNECT;
2998f7917c00SJeff Kirsher 
2999f7917c00SJeff Kirsher 	t3_adapter_error(adapter, 0, 0);
3000f7917c00SJeff Kirsher 
3001f7917c00SJeff Kirsher 	/* Request a slot reset. */
3002f7917c00SJeff Kirsher 	return PCI_ERS_RESULT_NEED_RESET;
3003f7917c00SJeff Kirsher }
3004f7917c00SJeff Kirsher 
3005f7917c00SJeff Kirsher /**
3006f7917c00SJeff Kirsher  * t3_io_slot_reset - called after the pci bus has been reset.
3007f7917c00SJeff Kirsher  * @pdev: Pointer to PCI device
3008f7917c00SJeff Kirsher  *
3009f7917c00SJeff Kirsher  * Restart the card from scratch, as if from a cold-boot.
3010f7917c00SJeff Kirsher  */
3011f7917c00SJeff Kirsher static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3012f7917c00SJeff Kirsher {
3013f7917c00SJeff Kirsher 	struct adapter *adapter = pci_get_drvdata(pdev);
3014f7917c00SJeff Kirsher 
3015f7917c00SJeff Kirsher 	if (!t3_reenable_adapter(adapter))
3016f7917c00SJeff Kirsher 		return PCI_ERS_RESULT_RECOVERED;
3017f7917c00SJeff Kirsher 
3018f7917c00SJeff Kirsher 	return PCI_ERS_RESULT_DISCONNECT;
3019f7917c00SJeff Kirsher }
3020f7917c00SJeff Kirsher 
3021f7917c00SJeff Kirsher /**
3022f7917c00SJeff Kirsher  * t3_io_resume - called when traffic can start flowing again.
3023f7917c00SJeff Kirsher  * @pdev: Pointer to PCI device
3024f7917c00SJeff Kirsher  *
3025f7917c00SJeff Kirsher  * This callback is called when the error recovery driver tells us that
3026f7917c00SJeff Kirsher  * its OK to resume normal operation.
3027f7917c00SJeff Kirsher  */
3028f7917c00SJeff Kirsher static void t3_io_resume(struct pci_dev *pdev)
3029f7917c00SJeff Kirsher {
3030f7917c00SJeff Kirsher 	struct adapter *adapter = pci_get_drvdata(pdev);
3031f7917c00SJeff Kirsher 
3032f7917c00SJeff Kirsher 	CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3033f7917c00SJeff Kirsher 		 t3_read_reg(adapter, A_PCIE_PEX_ERR));
3034f7917c00SJeff Kirsher 
3035f7917c00SJeff Kirsher 	t3_resume_ports(adapter);
3036f7917c00SJeff Kirsher }
3037f7917c00SJeff Kirsher 
3038f7917c00SJeff Kirsher static struct pci_error_handlers t3_err_handler = {
3039f7917c00SJeff Kirsher 	.error_detected = t3_io_error_detected,
3040f7917c00SJeff Kirsher 	.slot_reset = t3_io_slot_reset,
3041f7917c00SJeff Kirsher 	.resume = t3_io_resume,
3042f7917c00SJeff Kirsher };
3043f7917c00SJeff Kirsher 
3044f7917c00SJeff Kirsher /*
3045f7917c00SJeff Kirsher  * Set the number of qsets based on the number of CPUs and the number of ports,
3046f7917c00SJeff Kirsher  * not to exceed the number of available qsets, assuming there are enough qsets
3047f7917c00SJeff Kirsher  * per port in HW.
3048f7917c00SJeff Kirsher  */
3049f7917c00SJeff Kirsher static void set_nqsets(struct adapter *adap)
3050f7917c00SJeff Kirsher {
3051f7917c00SJeff Kirsher 	int i, j = 0;
3052f7917c00SJeff Kirsher 	int num_cpus = num_online_cpus();
3053f7917c00SJeff Kirsher 	int hwports = adap->params.nports;
3054f7917c00SJeff Kirsher 	int nqsets = adap->msix_nvectors - 1;
3055f7917c00SJeff Kirsher 
3056f7917c00SJeff Kirsher 	if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3057f7917c00SJeff Kirsher 		if (hwports == 2 &&
3058f7917c00SJeff Kirsher 		    (hwports * nqsets > SGE_QSETS ||
3059f7917c00SJeff Kirsher 		     num_cpus >= nqsets / hwports))
3060f7917c00SJeff Kirsher 			nqsets /= hwports;
3061f7917c00SJeff Kirsher 		if (nqsets > num_cpus)
3062f7917c00SJeff Kirsher 			nqsets = num_cpus;
3063f7917c00SJeff Kirsher 		if (nqsets < 1 || hwports == 4)
3064f7917c00SJeff Kirsher 			nqsets = 1;
3065f7917c00SJeff Kirsher 	} else
3066f7917c00SJeff Kirsher 		nqsets = 1;
3067f7917c00SJeff Kirsher 
3068f7917c00SJeff Kirsher 	for_each_port(adap, i) {
3069f7917c00SJeff Kirsher 		struct port_info *pi = adap2pinfo(adap, i);
3070f7917c00SJeff Kirsher 
3071f7917c00SJeff Kirsher 		pi->first_qset = j;
3072f7917c00SJeff Kirsher 		pi->nqsets = nqsets;
3073f7917c00SJeff Kirsher 		j = pi->first_qset + nqsets;
3074f7917c00SJeff Kirsher 
3075f7917c00SJeff Kirsher 		dev_info(&adap->pdev->dev,
3076f7917c00SJeff Kirsher 			 "Port %d using %d queue sets.\n", i, nqsets);
3077f7917c00SJeff Kirsher 	}
3078f7917c00SJeff Kirsher }
3079f7917c00SJeff Kirsher 
3080f7917c00SJeff Kirsher static int __devinit cxgb_enable_msix(struct adapter *adap)
3081f7917c00SJeff Kirsher {
3082f7917c00SJeff Kirsher 	struct msix_entry entries[SGE_QSETS + 1];
3083f7917c00SJeff Kirsher 	int vectors;
3084f7917c00SJeff Kirsher 	int i, err;
3085f7917c00SJeff Kirsher 
3086f7917c00SJeff Kirsher 	vectors = ARRAY_SIZE(entries);
3087f7917c00SJeff Kirsher 	for (i = 0; i < vectors; ++i)
3088f7917c00SJeff Kirsher 		entries[i].entry = i;
3089f7917c00SJeff Kirsher 
3090f7917c00SJeff Kirsher 	while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
3091f7917c00SJeff Kirsher 		vectors = err;
3092f7917c00SJeff Kirsher 
3093f7917c00SJeff Kirsher 	if (err < 0)
3094f7917c00SJeff Kirsher 		pci_disable_msix(adap->pdev);
3095f7917c00SJeff Kirsher 
3096f7917c00SJeff Kirsher 	if (!err && vectors < (adap->params.nports + 1)) {
3097f7917c00SJeff Kirsher 		pci_disable_msix(adap->pdev);
3098f7917c00SJeff Kirsher 		err = -1;
3099f7917c00SJeff Kirsher 	}
3100f7917c00SJeff Kirsher 
3101f7917c00SJeff Kirsher 	if (!err) {
3102f7917c00SJeff Kirsher 		for (i = 0; i < vectors; ++i)
3103f7917c00SJeff Kirsher 			adap->msix_info[i].vec = entries[i].vector;
3104f7917c00SJeff Kirsher 		adap->msix_nvectors = vectors;
3105f7917c00SJeff Kirsher 	}
3106f7917c00SJeff Kirsher 
3107f7917c00SJeff Kirsher 	return err;
3108f7917c00SJeff Kirsher }
3109f7917c00SJeff Kirsher 
3110f7917c00SJeff Kirsher static void __devinit print_port_info(struct adapter *adap,
3111f7917c00SJeff Kirsher 				      const struct adapter_info *ai)
3112f7917c00SJeff Kirsher {
3113f7917c00SJeff Kirsher 	static const char *pci_variant[] = {
3114f7917c00SJeff Kirsher 		"PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3115f7917c00SJeff Kirsher 	};
3116f7917c00SJeff Kirsher 
3117f7917c00SJeff Kirsher 	int i;
3118f7917c00SJeff Kirsher 	char buf[80];
3119f7917c00SJeff Kirsher 
3120f7917c00SJeff Kirsher 	if (is_pcie(adap))
3121f7917c00SJeff Kirsher 		snprintf(buf, sizeof(buf), "%s x%d",
3122f7917c00SJeff Kirsher 			 pci_variant[adap->params.pci.variant],
3123f7917c00SJeff Kirsher 			 adap->params.pci.width);
3124f7917c00SJeff Kirsher 	else
3125f7917c00SJeff Kirsher 		snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3126f7917c00SJeff Kirsher 			 pci_variant[adap->params.pci.variant],
3127f7917c00SJeff Kirsher 			 adap->params.pci.speed, adap->params.pci.width);
3128f7917c00SJeff Kirsher 
3129f7917c00SJeff Kirsher 	for_each_port(adap, i) {
3130f7917c00SJeff Kirsher 		struct net_device *dev = adap->port[i];
3131f7917c00SJeff Kirsher 		const struct port_info *pi = netdev_priv(dev);
3132f7917c00SJeff Kirsher 
3133f7917c00SJeff Kirsher 		if (!test_bit(i, &adap->registered_device_map))
3134f7917c00SJeff Kirsher 			continue;
3135f7917c00SJeff Kirsher 		printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
3136f7917c00SJeff Kirsher 		       dev->name, ai->desc, pi->phy.desc,
3137f7917c00SJeff Kirsher 		       is_offload(adap) ? "R" : "", adap->params.rev, buf,
3138f7917c00SJeff Kirsher 		       (adap->flags & USING_MSIX) ? " MSI-X" :
3139f7917c00SJeff Kirsher 		       (adap->flags & USING_MSI) ? " MSI" : "");
3140f7917c00SJeff Kirsher 		if (adap->name == dev->name && adap->params.vpd.mclk)
3141f7917c00SJeff Kirsher 			printk(KERN_INFO
3142f7917c00SJeff Kirsher 			       "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3143f7917c00SJeff Kirsher 			       adap->name, t3_mc7_size(&adap->cm) >> 20,
3144f7917c00SJeff Kirsher 			       t3_mc7_size(&adap->pmtx) >> 20,
3145f7917c00SJeff Kirsher 			       t3_mc7_size(&adap->pmrx) >> 20,
3146f7917c00SJeff Kirsher 			       adap->params.vpd.sn);
3147f7917c00SJeff Kirsher 	}
3148f7917c00SJeff Kirsher }
3149f7917c00SJeff Kirsher 
3150f7917c00SJeff Kirsher static const struct net_device_ops cxgb_netdev_ops = {
3151f7917c00SJeff Kirsher 	.ndo_open		= cxgb_open,
3152f7917c00SJeff Kirsher 	.ndo_stop		= cxgb_close,
3153f7917c00SJeff Kirsher 	.ndo_start_xmit		= t3_eth_xmit,
3154f7917c00SJeff Kirsher 	.ndo_get_stats		= cxgb_get_stats,
3155f7917c00SJeff Kirsher 	.ndo_validate_addr	= eth_validate_addr,
3156afc4b13dSJiri Pirko 	.ndo_set_rx_mode	= cxgb_set_rxmode,
3157f7917c00SJeff Kirsher 	.ndo_do_ioctl		= cxgb_ioctl,
3158f7917c00SJeff Kirsher 	.ndo_change_mtu		= cxgb_change_mtu,
3159f7917c00SJeff Kirsher 	.ndo_set_mac_address	= cxgb_set_mac_addr,
3160f7917c00SJeff Kirsher 	.ndo_fix_features	= cxgb_fix_features,
3161f7917c00SJeff Kirsher 	.ndo_set_features	= cxgb_set_features,
3162f7917c00SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
3163f7917c00SJeff Kirsher 	.ndo_poll_controller	= cxgb_netpoll,
3164f7917c00SJeff Kirsher #endif
3165f7917c00SJeff Kirsher };
3166f7917c00SJeff Kirsher 
3167f7917c00SJeff Kirsher static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev)
3168f7917c00SJeff Kirsher {
3169f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
3170f7917c00SJeff Kirsher 
3171f7917c00SJeff Kirsher 	memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3172f7917c00SJeff Kirsher 	pi->iscsic.mac_addr[3] |= 0x80;
3173f7917c00SJeff Kirsher }
3174f7917c00SJeff Kirsher 
3175f7917c00SJeff Kirsher static int __devinit init_one(struct pci_dev *pdev,
3176f7917c00SJeff Kirsher 			      const struct pci_device_id *ent)
3177f7917c00SJeff Kirsher {
3178f7917c00SJeff Kirsher 	static int version_printed;
3179f7917c00SJeff Kirsher 
3180f7917c00SJeff Kirsher 	int i, err, pci_using_dac = 0;
3181f7917c00SJeff Kirsher 	resource_size_t mmio_start, mmio_len;
3182f7917c00SJeff Kirsher 	const struct adapter_info *ai;
3183f7917c00SJeff Kirsher 	struct adapter *adapter = NULL;
3184f7917c00SJeff Kirsher 	struct port_info *pi;
3185f7917c00SJeff Kirsher 
3186f7917c00SJeff Kirsher 	if (!version_printed) {
3187f7917c00SJeff Kirsher 		printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3188f7917c00SJeff Kirsher 		++version_printed;
3189f7917c00SJeff Kirsher 	}
3190f7917c00SJeff Kirsher 
3191f7917c00SJeff Kirsher 	if (!cxgb3_wq) {
3192f7917c00SJeff Kirsher 		cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3193f7917c00SJeff Kirsher 		if (!cxgb3_wq) {
3194f7917c00SJeff Kirsher 			printk(KERN_ERR DRV_NAME
3195f7917c00SJeff Kirsher 			       ": cannot initialize work queue\n");
3196f7917c00SJeff Kirsher 			return -ENOMEM;
3197f7917c00SJeff Kirsher 		}
3198f7917c00SJeff Kirsher 	}
3199f7917c00SJeff Kirsher 
3200f7917c00SJeff Kirsher 	err = pci_enable_device(pdev);
3201f7917c00SJeff Kirsher 	if (err) {
3202f7917c00SJeff Kirsher 		dev_err(&pdev->dev, "cannot enable PCI device\n");
3203f7917c00SJeff Kirsher 		goto out;
3204f7917c00SJeff Kirsher 	}
3205f7917c00SJeff Kirsher 
3206f7917c00SJeff Kirsher 	err = pci_request_regions(pdev, DRV_NAME);
3207f7917c00SJeff Kirsher 	if (err) {
3208f7917c00SJeff Kirsher 		/* Just info, some other driver may have claimed the device. */
3209f7917c00SJeff Kirsher 		dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3210f7917c00SJeff Kirsher 		goto out_disable_device;
3211f7917c00SJeff Kirsher 	}
3212f7917c00SJeff Kirsher 
3213f7917c00SJeff Kirsher 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3214f7917c00SJeff Kirsher 		pci_using_dac = 1;
3215f7917c00SJeff Kirsher 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3216f7917c00SJeff Kirsher 		if (err) {
3217f7917c00SJeff Kirsher 			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3218f7917c00SJeff Kirsher 			       "coherent allocations\n");
3219f7917c00SJeff Kirsher 			goto out_release_regions;
3220f7917c00SJeff Kirsher 		}
3221f7917c00SJeff Kirsher 	} else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3222f7917c00SJeff Kirsher 		dev_err(&pdev->dev, "no usable DMA configuration\n");
3223f7917c00SJeff Kirsher 		goto out_release_regions;
3224f7917c00SJeff Kirsher 	}
3225f7917c00SJeff Kirsher 
3226f7917c00SJeff Kirsher 	pci_set_master(pdev);
3227f7917c00SJeff Kirsher 	pci_save_state(pdev);
3228f7917c00SJeff Kirsher 
3229f7917c00SJeff Kirsher 	mmio_start = pci_resource_start(pdev, 0);
3230f7917c00SJeff Kirsher 	mmio_len = pci_resource_len(pdev, 0);
3231f7917c00SJeff Kirsher 	ai = t3_get_adapter_info(ent->driver_data);
3232f7917c00SJeff Kirsher 
3233f7917c00SJeff Kirsher 	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3234f7917c00SJeff Kirsher 	if (!adapter) {
3235f7917c00SJeff Kirsher 		err = -ENOMEM;
3236f7917c00SJeff Kirsher 		goto out_release_regions;
3237f7917c00SJeff Kirsher 	}
3238f7917c00SJeff Kirsher 
3239f7917c00SJeff Kirsher 	adapter->nofail_skb =
3240f7917c00SJeff Kirsher 		alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3241f7917c00SJeff Kirsher 	if (!adapter->nofail_skb) {
3242f7917c00SJeff Kirsher 		dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3243f7917c00SJeff Kirsher 		err = -ENOMEM;
3244f7917c00SJeff Kirsher 		goto out_free_adapter;
3245f7917c00SJeff Kirsher 	}
3246f7917c00SJeff Kirsher 
3247f7917c00SJeff Kirsher 	adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3248f7917c00SJeff Kirsher 	if (!adapter->regs) {
3249f7917c00SJeff Kirsher 		dev_err(&pdev->dev, "cannot map device registers\n");
3250f7917c00SJeff Kirsher 		err = -ENOMEM;
3251f7917c00SJeff Kirsher 		goto out_free_adapter;
3252f7917c00SJeff Kirsher 	}
3253f7917c00SJeff Kirsher 
3254f7917c00SJeff Kirsher 	adapter->pdev = pdev;
3255f7917c00SJeff Kirsher 	adapter->name = pci_name(pdev);
3256f7917c00SJeff Kirsher 	adapter->msg_enable = dflt_msg_enable;
3257f7917c00SJeff Kirsher 	adapter->mmio_len = mmio_len;
3258f7917c00SJeff Kirsher 
3259f7917c00SJeff Kirsher 	mutex_init(&adapter->mdio_lock);
3260f7917c00SJeff Kirsher 	spin_lock_init(&adapter->work_lock);
3261f7917c00SJeff Kirsher 	spin_lock_init(&adapter->stats_lock);
3262f7917c00SJeff Kirsher 
3263f7917c00SJeff Kirsher 	INIT_LIST_HEAD(&adapter->adapter_list);
3264f7917c00SJeff Kirsher 	INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3265f7917c00SJeff Kirsher 	INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3266f7917c00SJeff Kirsher 
3267f7917c00SJeff Kirsher 	INIT_WORK(&adapter->db_full_task, db_full_task);
3268f7917c00SJeff Kirsher 	INIT_WORK(&adapter->db_empty_task, db_empty_task);
3269f7917c00SJeff Kirsher 	INIT_WORK(&adapter->db_drop_task, db_drop_task);
3270f7917c00SJeff Kirsher 
3271f7917c00SJeff Kirsher 	INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3272f7917c00SJeff Kirsher 
3273f7917c00SJeff Kirsher 	for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3274f7917c00SJeff Kirsher 		struct net_device *netdev;
3275f7917c00SJeff Kirsher 
3276f7917c00SJeff Kirsher 		netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3277f7917c00SJeff Kirsher 		if (!netdev) {
3278f7917c00SJeff Kirsher 			err = -ENOMEM;
3279f7917c00SJeff Kirsher 			goto out_free_dev;
3280f7917c00SJeff Kirsher 		}
3281f7917c00SJeff Kirsher 
3282f7917c00SJeff Kirsher 		SET_NETDEV_DEV(netdev, &pdev->dev);
3283f7917c00SJeff Kirsher 
3284f7917c00SJeff Kirsher 		adapter->port[i] = netdev;
3285f7917c00SJeff Kirsher 		pi = netdev_priv(netdev);
3286f7917c00SJeff Kirsher 		pi->adapter = adapter;
3287f7917c00SJeff Kirsher 		pi->port_id = i;
3288f7917c00SJeff Kirsher 		netif_carrier_off(netdev);
3289f7917c00SJeff Kirsher 		netdev->irq = pdev->irq;
3290f7917c00SJeff Kirsher 		netdev->mem_start = mmio_start;
3291f7917c00SJeff Kirsher 		netdev->mem_end = mmio_start + mmio_len - 1;
3292f7917c00SJeff Kirsher 		netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3293f7917c00SJeff Kirsher 			NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX;
3294f7917c00SJeff Kirsher 		netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_TX;
3295f7917c00SJeff Kirsher 		if (pci_using_dac)
3296f7917c00SJeff Kirsher 			netdev->features |= NETIF_F_HIGHDMA;
3297f7917c00SJeff Kirsher 
3298f7917c00SJeff Kirsher 		netdev->netdev_ops = &cxgb_netdev_ops;
3299f7917c00SJeff Kirsher 		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3300f7917c00SJeff Kirsher 	}
3301f7917c00SJeff Kirsher 
3302f7917c00SJeff Kirsher 	pci_set_drvdata(pdev, adapter);
3303f7917c00SJeff Kirsher 	if (t3_prep_adapter(adapter, ai, 1) < 0) {
3304f7917c00SJeff Kirsher 		err = -ENODEV;
3305f7917c00SJeff Kirsher 		goto out_free_dev;
3306f7917c00SJeff Kirsher 	}
3307f7917c00SJeff Kirsher 
3308f7917c00SJeff Kirsher 	/*
3309f7917c00SJeff Kirsher 	 * The card is now ready to go.  If any errors occur during device
3310f7917c00SJeff Kirsher 	 * registration we do not fail the whole card but rather proceed only
3311f7917c00SJeff Kirsher 	 * with the ports we manage to register successfully.  However we must
3312f7917c00SJeff Kirsher 	 * register at least one net device.
3313f7917c00SJeff Kirsher 	 */
3314f7917c00SJeff Kirsher 	for_each_port(adapter, i) {
3315f7917c00SJeff Kirsher 		err = register_netdev(adapter->port[i]);
3316f7917c00SJeff Kirsher 		if (err)
3317f7917c00SJeff Kirsher 			dev_warn(&pdev->dev,
3318f7917c00SJeff Kirsher 				 "cannot register net device %s, skipping\n",
3319f7917c00SJeff Kirsher 				 adapter->port[i]->name);
3320f7917c00SJeff Kirsher 		else {
3321f7917c00SJeff Kirsher 			/*
3322f7917c00SJeff Kirsher 			 * Change the name we use for messages to the name of
3323f7917c00SJeff Kirsher 			 * the first successfully registered interface.
3324f7917c00SJeff Kirsher 			 */
3325f7917c00SJeff Kirsher 			if (!adapter->registered_device_map)
3326f7917c00SJeff Kirsher 				adapter->name = adapter->port[i]->name;
3327f7917c00SJeff Kirsher 
3328f7917c00SJeff Kirsher 			__set_bit(i, &adapter->registered_device_map);
3329f7917c00SJeff Kirsher 		}
3330f7917c00SJeff Kirsher 	}
3331f7917c00SJeff Kirsher 	if (!adapter->registered_device_map) {
3332f7917c00SJeff Kirsher 		dev_err(&pdev->dev, "could not register any net devices\n");
3333f7917c00SJeff Kirsher 		goto out_free_dev;
3334f7917c00SJeff Kirsher 	}
3335f7917c00SJeff Kirsher 
3336f7917c00SJeff Kirsher 	for_each_port(adapter, i)
3337f7917c00SJeff Kirsher 		cxgb3_init_iscsi_mac(adapter->port[i]);
3338f7917c00SJeff Kirsher 
3339f7917c00SJeff Kirsher 	/* Driver's ready. Reflect it on LEDs */
3340f7917c00SJeff Kirsher 	t3_led_ready(adapter);
3341f7917c00SJeff Kirsher 
3342f7917c00SJeff Kirsher 	if (is_offload(adapter)) {
3343f7917c00SJeff Kirsher 		__set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3344f7917c00SJeff Kirsher 		cxgb3_adapter_ofld(adapter);
3345f7917c00SJeff Kirsher 	}
3346f7917c00SJeff Kirsher 
3347f7917c00SJeff Kirsher 	/* See what interrupts we'll be using */
3348f7917c00SJeff Kirsher 	if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3349f7917c00SJeff Kirsher 		adapter->flags |= USING_MSIX;
3350f7917c00SJeff Kirsher 	else if (msi > 0 && pci_enable_msi(pdev) == 0)
3351f7917c00SJeff Kirsher 		adapter->flags |= USING_MSI;
3352f7917c00SJeff Kirsher 
3353f7917c00SJeff Kirsher 	set_nqsets(adapter);
3354f7917c00SJeff Kirsher 
3355f7917c00SJeff Kirsher 	err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3356f7917c00SJeff Kirsher 				 &cxgb3_attr_group);
3357f7917c00SJeff Kirsher 
3358f7917c00SJeff Kirsher 	for_each_port(adapter, i)
3359f7917c00SJeff Kirsher 		cxgb_vlan_mode(adapter->port[i], adapter->port[i]->features);
3360f7917c00SJeff Kirsher 
3361f7917c00SJeff Kirsher 	print_port_info(adapter, ai);
3362f7917c00SJeff Kirsher 	return 0;
3363f7917c00SJeff Kirsher 
3364f7917c00SJeff Kirsher out_free_dev:
3365f7917c00SJeff Kirsher 	iounmap(adapter->regs);
3366f7917c00SJeff Kirsher 	for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3367f7917c00SJeff Kirsher 		if (adapter->port[i])
3368f7917c00SJeff Kirsher 			free_netdev(adapter->port[i]);
3369f7917c00SJeff Kirsher 
3370f7917c00SJeff Kirsher out_free_adapter:
3371f7917c00SJeff Kirsher 	kfree(adapter);
3372f7917c00SJeff Kirsher 
3373f7917c00SJeff Kirsher out_release_regions:
3374f7917c00SJeff Kirsher 	pci_release_regions(pdev);
3375f7917c00SJeff Kirsher out_disable_device:
3376f7917c00SJeff Kirsher 	pci_disable_device(pdev);
3377f7917c00SJeff Kirsher 	pci_set_drvdata(pdev, NULL);
3378f7917c00SJeff Kirsher out:
3379f7917c00SJeff Kirsher 	return err;
3380f7917c00SJeff Kirsher }
3381f7917c00SJeff Kirsher 
3382f7917c00SJeff Kirsher static void __devexit remove_one(struct pci_dev *pdev)
3383f7917c00SJeff Kirsher {
3384f7917c00SJeff Kirsher 	struct adapter *adapter = pci_get_drvdata(pdev);
3385f7917c00SJeff Kirsher 
3386f7917c00SJeff Kirsher 	if (adapter) {
3387f7917c00SJeff Kirsher 		int i;
3388f7917c00SJeff Kirsher 
3389f7917c00SJeff Kirsher 		t3_sge_stop(adapter);
3390f7917c00SJeff Kirsher 		sysfs_remove_group(&adapter->port[0]->dev.kobj,
3391f7917c00SJeff Kirsher 				   &cxgb3_attr_group);
3392f7917c00SJeff Kirsher 
3393f7917c00SJeff Kirsher 		if (is_offload(adapter)) {
3394f7917c00SJeff Kirsher 			cxgb3_adapter_unofld(adapter);
3395f7917c00SJeff Kirsher 			if (test_bit(OFFLOAD_DEVMAP_BIT,
3396f7917c00SJeff Kirsher 				     &adapter->open_device_map))
3397f7917c00SJeff Kirsher 				offload_close(&adapter->tdev);
3398f7917c00SJeff Kirsher 		}
3399f7917c00SJeff Kirsher 
3400f7917c00SJeff Kirsher 		for_each_port(adapter, i)
3401f7917c00SJeff Kirsher 		    if (test_bit(i, &adapter->registered_device_map))
3402f7917c00SJeff Kirsher 			unregister_netdev(adapter->port[i]);
3403f7917c00SJeff Kirsher 
3404f7917c00SJeff Kirsher 		t3_stop_sge_timers(adapter);
3405f7917c00SJeff Kirsher 		t3_free_sge_resources(adapter);
3406f7917c00SJeff Kirsher 		cxgb_disable_msi(adapter);
3407f7917c00SJeff Kirsher 
3408f7917c00SJeff Kirsher 		for_each_port(adapter, i)
3409f7917c00SJeff Kirsher 			if (adapter->port[i])
3410f7917c00SJeff Kirsher 				free_netdev(adapter->port[i]);
3411f7917c00SJeff Kirsher 
3412f7917c00SJeff Kirsher 		iounmap(adapter->regs);
3413f7917c00SJeff Kirsher 		if (adapter->nofail_skb)
3414f7917c00SJeff Kirsher 			kfree_skb(adapter->nofail_skb);
3415f7917c00SJeff Kirsher 		kfree(adapter);
3416f7917c00SJeff Kirsher 		pci_release_regions(pdev);
3417f7917c00SJeff Kirsher 		pci_disable_device(pdev);
3418f7917c00SJeff Kirsher 		pci_set_drvdata(pdev, NULL);
3419f7917c00SJeff Kirsher 	}
3420f7917c00SJeff Kirsher }
3421f7917c00SJeff Kirsher 
3422f7917c00SJeff Kirsher static struct pci_driver driver = {
3423f7917c00SJeff Kirsher 	.name = DRV_NAME,
3424f7917c00SJeff Kirsher 	.id_table = cxgb3_pci_tbl,
3425f7917c00SJeff Kirsher 	.probe = init_one,
3426f7917c00SJeff Kirsher 	.remove = __devexit_p(remove_one),
3427f7917c00SJeff Kirsher 	.err_handler = &t3_err_handler,
3428f7917c00SJeff Kirsher };
3429f7917c00SJeff Kirsher 
3430f7917c00SJeff Kirsher static int __init cxgb3_init_module(void)
3431f7917c00SJeff Kirsher {
3432f7917c00SJeff Kirsher 	int ret;
3433f7917c00SJeff Kirsher 
3434f7917c00SJeff Kirsher 	cxgb3_offload_init();
3435f7917c00SJeff Kirsher 
3436f7917c00SJeff Kirsher 	ret = pci_register_driver(&driver);
3437f7917c00SJeff Kirsher 	return ret;
3438f7917c00SJeff Kirsher }
3439f7917c00SJeff Kirsher 
3440f7917c00SJeff Kirsher static void __exit cxgb3_cleanup_module(void)
3441f7917c00SJeff Kirsher {
3442f7917c00SJeff Kirsher 	pci_unregister_driver(&driver);
3443f7917c00SJeff Kirsher 	if (cxgb3_wq)
3444f7917c00SJeff Kirsher 		destroy_workqueue(cxgb3_wq);
3445f7917c00SJeff Kirsher }
3446f7917c00SJeff Kirsher 
3447f7917c00SJeff Kirsher module_init(cxgb3_init_module);
3448f7917c00SJeff Kirsher module_exit(cxgb3_cleanup_module);
3449