1f7917c00SJeff Kirsher /*
2f7917c00SJeff Kirsher  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3f7917c00SJeff Kirsher  *
4f7917c00SJeff Kirsher  * This software is available to you under a choice of one of two
5f7917c00SJeff Kirsher  * licenses.  You may choose to be licensed under the terms of the GNU
6f7917c00SJeff Kirsher  * General Public License (GPL) Version 2, available from the file
7f7917c00SJeff Kirsher  * COPYING in the main directory of this source tree, or the
8f7917c00SJeff Kirsher  * OpenIB.org BSD license below:
9f7917c00SJeff Kirsher  *
10f7917c00SJeff Kirsher  *     Redistribution and use in source and binary forms, with or
11f7917c00SJeff Kirsher  *     without modification, are permitted provided that the following
12f7917c00SJeff Kirsher  *     conditions are met:
13f7917c00SJeff Kirsher  *
14f7917c00SJeff Kirsher  *      - Redistributions of source code must retain the above
15f7917c00SJeff Kirsher  *        copyright notice, this list of conditions and the following
16f7917c00SJeff Kirsher  *        disclaimer.
17f7917c00SJeff Kirsher  *
18f7917c00SJeff Kirsher  *      - Redistributions in binary form must reproduce the above
19f7917c00SJeff Kirsher  *        copyright notice, this list of conditions and the following
20f7917c00SJeff Kirsher  *        disclaimer in the documentation and/or other materials
21f7917c00SJeff Kirsher  *        provided with the distribution.
22f7917c00SJeff Kirsher  *
23f7917c00SJeff Kirsher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24f7917c00SJeff Kirsher  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25f7917c00SJeff Kirsher  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26f7917c00SJeff Kirsher  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27f7917c00SJeff Kirsher  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28f7917c00SJeff Kirsher  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29f7917c00SJeff Kirsher  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30f7917c00SJeff Kirsher  * SOFTWARE.
31f7917c00SJeff Kirsher  */
32428ac43fSJoe Perches 
33428ac43fSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34428ac43fSJoe Perches 
35f7917c00SJeff Kirsher #include <linux/module.h>
36f7917c00SJeff Kirsher #include <linux/moduleparam.h>
37f7917c00SJeff Kirsher #include <linux/init.h>
38f7917c00SJeff Kirsher #include <linux/pci.h>
39f7917c00SJeff Kirsher #include <linux/dma-mapping.h>
40f7917c00SJeff Kirsher #include <linux/netdevice.h>
41f7917c00SJeff Kirsher #include <linux/etherdevice.h>
42f7917c00SJeff Kirsher #include <linux/if_vlan.h>
43f7917c00SJeff Kirsher #include <linux/mdio.h>
44f7917c00SJeff Kirsher #include <linux/sockios.h>
45f7917c00SJeff Kirsher #include <linux/workqueue.h>
46f7917c00SJeff Kirsher #include <linux/proc_fs.h>
47f7917c00SJeff Kirsher #include <linux/rtnetlink.h>
48f7917c00SJeff Kirsher #include <linux/firmware.h>
49f7917c00SJeff Kirsher #include <linux/log2.h>
50f7917c00SJeff Kirsher #include <linux/stringify.h>
51f7917c00SJeff Kirsher #include <linux/sched.h>
52f7917c00SJeff Kirsher #include <linux/slab.h>
537c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
54676bcfecSGustavo A. R. Silva #include <linux/nospec.h>
55f7917c00SJeff Kirsher 
56f7917c00SJeff Kirsher #include "common.h"
57f7917c00SJeff Kirsher #include "cxgb3_ioctl.h"
58f7917c00SJeff Kirsher #include "regs.h"
59f7917c00SJeff Kirsher #include "cxgb3_offload.h"
60f7917c00SJeff Kirsher #include "version.h"
61f7917c00SJeff Kirsher 
62f7917c00SJeff Kirsher #include "cxgb3_ctl_defs.h"
63f7917c00SJeff Kirsher #include "t3_cpl.h"
64f7917c00SJeff Kirsher #include "firmware_exports.h"
65f7917c00SJeff Kirsher 
66f7917c00SJeff Kirsher enum {
67f7917c00SJeff Kirsher 	MAX_TXQ_ENTRIES = 16384,
68f7917c00SJeff Kirsher 	MAX_CTRL_TXQ_ENTRIES = 1024,
69f7917c00SJeff Kirsher 	MAX_RSPQ_ENTRIES = 16384,
70f7917c00SJeff Kirsher 	MAX_RX_BUFFERS = 16384,
71f7917c00SJeff Kirsher 	MAX_RX_JUMBO_BUFFERS = 16384,
72f7917c00SJeff Kirsher 	MIN_TXQ_ENTRIES = 4,
73f7917c00SJeff Kirsher 	MIN_CTRL_TXQ_ENTRIES = 4,
74f7917c00SJeff Kirsher 	MIN_RSPQ_ENTRIES = 32,
75f7917c00SJeff Kirsher 	MIN_FL_ENTRIES = 32
76f7917c00SJeff Kirsher };
77f7917c00SJeff Kirsher 
78f7917c00SJeff Kirsher #define PORT_MASK ((1 << MAX_NPORTS) - 1)
79f7917c00SJeff Kirsher 
80f7917c00SJeff Kirsher #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
81f7917c00SJeff Kirsher 			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
82f7917c00SJeff Kirsher 			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
83f7917c00SJeff Kirsher 
84f7917c00SJeff Kirsher #define EEPROM_MAGIC 0x38E2F10C
85f7917c00SJeff Kirsher 
86f7917c00SJeff Kirsher #define CH_DEVICE(devid, idx) \
87f7917c00SJeff Kirsher 	{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
88f7917c00SJeff Kirsher 
899baa3c34SBenoit Taine static const struct pci_device_id cxgb3_pci_tbl[] = {
90f7917c00SJeff Kirsher 	CH_DEVICE(0x20, 0),	/* PE9000 */
91f7917c00SJeff Kirsher 	CH_DEVICE(0x21, 1),	/* T302E */
92f7917c00SJeff Kirsher 	CH_DEVICE(0x22, 2),	/* T310E */
93f7917c00SJeff Kirsher 	CH_DEVICE(0x23, 3),	/* T320X */
94f7917c00SJeff Kirsher 	CH_DEVICE(0x24, 1),	/* T302X */
95f7917c00SJeff Kirsher 	CH_DEVICE(0x25, 3),	/* T320E */
96f7917c00SJeff Kirsher 	CH_DEVICE(0x26, 2),	/* T310X */
97f7917c00SJeff Kirsher 	CH_DEVICE(0x30, 2),	/* T3B10 */
98f7917c00SJeff Kirsher 	CH_DEVICE(0x31, 3),	/* T3B20 */
99f7917c00SJeff Kirsher 	CH_DEVICE(0x32, 1),	/* T3B02 */
100f7917c00SJeff Kirsher 	CH_DEVICE(0x35, 6),	/* T3C20-derived T3C10 */
101f7917c00SJeff Kirsher 	CH_DEVICE(0x36, 3),	/* S320E-CR */
102f7917c00SJeff Kirsher 	CH_DEVICE(0x37, 7),	/* N320E-G2 */
103f7917c00SJeff Kirsher 	{0,}
104f7917c00SJeff Kirsher };
105f7917c00SJeff Kirsher 
106f7917c00SJeff Kirsher MODULE_DESCRIPTION(DRV_DESC);
107f7917c00SJeff Kirsher MODULE_AUTHOR("Chelsio Communications");
108f7917c00SJeff Kirsher MODULE_LICENSE("Dual BSD/GPL");
109f7917c00SJeff Kirsher MODULE_VERSION(DRV_VERSION);
110f7917c00SJeff Kirsher MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
111f7917c00SJeff Kirsher 
112f7917c00SJeff Kirsher static int dflt_msg_enable = DFLT_MSG_ENABLE;
113f7917c00SJeff Kirsher 
114f7917c00SJeff Kirsher module_param(dflt_msg_enable, int, 0644);
115f7917c00SJeff Kirsher MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
116f7917c00SJeff Kirsher 
117f7917c00SJeff Kirsher /*
118f7917c00SJeff Kirsher  * The driver uses the best interrupt scheme available on a platform in the
119f7917c00SJeff Kirsher  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
120f7917c00SJeff Kirsher  * of these schemes the driver may consider as follows:
121f7917c00SJeff Kirsher  *
122f7917c00SJeff Kirsher  * msi = 2: choose from among all three options
123f7917c00SJeff Kirsher  * msi = 1: only consider MSI and pin interrupts
124f7917c00SJeff Kirsher  * msi = 0: force pin interrupts
125f7917c00SJeff Kirsher  */
126f7917c00SJeff Kirsher static int msi = 2;
127f7917c00SJeff Kirsher 
128f7917c00SJeff Kirsher module_param(msi, int, 0644);
129f7917c00SJeff Kirsher MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
130f7917c00SJeff Kirsher 
131f7917c00SJeff Kirsher /*
132f7917c00SJeff Kirsher  * The driver enables offload as a default.
133f7917c00SJeff Kirsher  * To disable it, use ofld_disable = 1.
134f7917c00SJeff Kirsher  */
135f7917c00SJeff Kirsher 
136f7917c00SJeff Kirsher static int ofld_disable = 0;
137f7917c00SJeff Kirsher 
138f7917c00SJeff Kirsher module_param(ofld_disable, int, 0644);
139f7917c00SJeff Kirsher MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
140f7917c00SJeff Kirsher 
141f7917c00SJeff Kirsher /*
142f7917c00SJeff Kirsher  * We have work elements that we need to cancel when an interface is taken
143f7917c00SJeff Kirsher  * down.  Normally the work elements would be executed by keventd but that
144f7917c00SJeff Kirsher  * can deadlock because of linkwatch.  If our close method takes the rtnl
145f7917c00SJeff Kirsher  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
146f7917c00SJeff Kirsher  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
147f7917c00SJeff Kirsher  * for our work to complete.  Get our own work queue to solve this.
148f7917c00SJeff Kirsher  */
149f7917c00SJeff Kirsher struct workqueue_struct *cxgb3_wq;
150f7917c00SJeff Kirsher 
151f7917c00SJeff Kirsher /**
152f7917c00SJeff Kirsher  *	link_report - show link status and link speed/duplex
153f7917c00SJeff Kirsher  *	@p: the port whose settings are to be reported
154f7917c00SJeff Kirsher  *
155f7917c00SJeff Kirsher  *	Shows the link status, speed, and duplex of a port.
156f7917c00SJeff Kirsher  */
157f7917c00SJeff Kirsher static void link_report(struct net_device *dev)
158f7917c00SJeff Kirsher {
159f7917c00SJeff Kirsher 	if (!netif_carrier_ok(dev))
160428ac43fSJoe Perches 		netdev_info(dev, "link down\n");
161f7917c00SJeff Kirsher 	else {
162f7917c00SJeff Kirsher 		const char *s = "10Mbps";
163f7917c00SJeff Kirsher 		const struct port_info *p = netdev_priv(dev);
164f7917c00SJeff Kirsher 
165f7917c00SJeff Kirsher 		switch (p->link_config.speed) {
166f7917c00SJeff Kirsher 		case SPEED_10000:
167f7917c00SJeff Kirsher 			s = "10Gbps";
168f7917c00SJeff Kirsher 			break;
169f7917c00SJeff Kirsher 		case SPEED_1000:
170f7917c00SJeff Kirsher 			s = "1000Mbps";
171f7917c00SJeff Kirsher 			break;
172f7917c00SJeff Kirsher 		case SPEED_100:
173f7917c00SJeff Kirsher 			s = "100Mbps";
174f7917c00SJeff Kirsher 			break;
175f7917c00SJeff Kirsher 		}
176f7917c00SJeff Kirsher 
177428ac43fSJoe Perches 		netdev_info(dev, "link up, %s, %s-duplex\n",
178428ac43fSJoe Perches 			    s, p->link_config.duplex == DUPLEX_FULL
179428ac43fSJoe Perches 			    ? "full" : "half");
180f7917c00SJeff Kirsher 	}
181f7917c00SJeff Kirsher }
182f7917c00SJeff Kirsher 
183f7917c00SJeff Kirsher static void enable_tx_fifo_drain(struct adapter *adapter,
184f7917c00SJeff Kirsher 				 struct port_info *pi)
185f7917c00SJeff Kirsher {
186f7917c00SJeff Kirsher 	t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
187f7917c00SJeff Kirsher 			 F_ENDROPPKT);
188f7917c00SJeff Kirsher 	t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
189f7917c00SJeff Kirsher 	t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
190f7917c00SJeff Kirsher 	t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
191f7917c00SJeff Kirsher }
192f7917c00SJeff Kirsher 
193f7917c00SJeff Kirsher static void disable_tx_fifo_drain(struct adapter *adapter,
194f7917c00SJeff Kirsher 				  struct port_info *pi)
195f7917c00SJeff Kirsher {
196f7917c00SJeff Kirsher 	t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
197f7917c00SJeff Kirsher 			 F_ENDROPPKT, 0);
198f7917c00SJeff Kirsher }
199f7917c00SJeff Kirsher 
200f7917c00SJeff Kirsher void t3_os_link_fault(struct adapter *adap, int port_id, int state)
201f7917c00SJeff Kirsher {
202f7917c00SJeff Kirsher 	struct net_device *dev = adap->port[port_id];
203f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
204f7917c00SJeff Kirsher 
205f7917c00SJeff Kirsher 	if (state == netif_carrier_ok(dev))
206f7917c00SJeff Kirsher 		return;
207f7917c00SJeff Kirsher 
208f7917c00SJeff Kirsher 	if (state) {
209f7917c00SJeff Kirsher 		struct cmac *mac = &pi->mac;
210f7917c00SJeff Kirsher 
211f7917c00SJeff Kirsher 		netif_carrier_on(dev);
212f7917c00SJeff Kirsher 
213f7917c00SJeff Kirsher 		disable_tx_fifo_drain(adap, pi);
214f7917c00SJeff Kirsher 
215f7917c00SJeff Kirsher 		/* Clear local faults */
216f7917c00SJeff Kirsher 		t3_xgm_intr_disable(adap, pi->port_id);
217f7917c00SJeff Kirsher 		t3_read_reg(adap, A_XGM_INT_STATUS +
218f7917c00SJeff Kirsher 				    pi->mac.offset);
219f7917c00SJeff Kirsher 		t3_write_reg(adap,
220f7917c00SJeff Kirsher 			     A_XGM_INT_CAUSE + pi->mac.offset,
221f7917c00SJeff Kirsher 			     F_XGM_INT);
222f7917c00SJeff Kirsher 
223f7917c00SJeff Kirsher 		t3_set_reg_field(adap,
224f7917c00SJeff Kirsher 				 A_XGM_INT_ENABLE +
225f7917c00SJeff Kirsher 				 pi->mac.offset,
226f7917c00SJeff Kirsher 				 F_XGM_INT, F_XGM_INT);
227f7917c00SJeff Kirsher 		t3_xgm_intr_enable(adap, pi->port_id);
228f7917c00SJeff Kirsher 
229f7917c00SJeff Kirsher 		t3_mac_enable(mac, MAC_DIRECTION_TX);
230f7917c00SJeff Kirsher 	} else {
231f7917c00SJeff Kirsher 		netif_carrier_off(dev);
232f7917c00SJeff Kirsher 
233f7917c00SJeff Kirsher 		/* Flush TX FIFO */
234f7917c00SJeff Kirsher 		enable_tx_fifo_drain(adap, pi);
235f7917c00SJeff Kirsher 	}
236f7917c00SJeff Kirsher 	link_report(dev);
237f7917c00SJeff Kirsher }
238f7917c00SJeff Kirsher 
239f7917c00SJeff Kirsher /**
240f7917c00SJeff Kirsher  *	t3_os_link_changed - handle link status changes
241f7917c00SJeff Kirsher  *	@adapter: the adapter associated with the link change
242f7917c00SJeff Kirsher  *	@port_id: the port index whose limk status has changed
243f7917c00SJeff Kirsher  *	@link_stat: the new status of the link
244f7917c00SJeff Kirsher  *	@speed: the new speed setting
245f7917c00SJeff Kirsher  *	@duplex: the new duplex setting
246f7917c00SJeff Kirsher  *	@pause: the new flow-control setting
247f7917c00SJeff Kirsher  *
248f7917c00SJeff Kirsher  *	This is the OS-dependent handler for link status changes.  The OS
249f7917c00SJeff Kirsher  *	neutral handler takes care of most of the processing for these events,
250f7917c00SJeff Kirsher  *	then calls this handler for any OS-specific processing.
251f7917c00SJeff Kirsher  */
252f7917c00SJeff Kirsher void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
253f7917c00SJeff Kirsher 			int speed, int duplex, int pause)
254f7917c00SJeff Kirsher {
255f7917c00SJeff Kirsher 	struct net_device *dev = adapter->port[port_id];
256f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
257f7917c00SJeff Kirsher 	struct cmac *mac = &pi->mac;
258f7917c00SJeff Kirsher 
259f7917c00SJeff Kirsher 	/* Skip changes from disabled ports. */
260f7917c00SJeff Kirsher 	if (!netif_running(dev))
261f7917c00SJeff Kirsher 		return;
262f7917c00SJeff Kirsher 
263f7917c00SJeff Kirsher 	if (link_stat != netif_carrier_ok(dev)) {
264f7917c00SJeff Kirsher 		if (link_stat) {
265f7917c00SJeff Kirsher 			disable_tx_fifo_drain(adapter, pi);
266f7917c00SJeff Kirsher 
267f7917c00SJeff Kirsher 			t3_mac_enable(mac, MAC_DIRECTION_RX);
268f7917c00SJeff Kirsher 
269f7917c00SJeff Kirsher 			/* Clear local faults */
270f7917c00SJeff Kirsher 			t3_xgm_intr_disable(adapter, pi->port_id);
271f7917c00SJeff Kirsher 			t3_read_reg(adapter, A_XGM_INT_STATUS +
272f7917c00SJeff Kirsher 				    pi->mac.offset);
273f7917c00SJeff Kirsher 			t3_write_reg(adapter,
274f7917c00SJeff Kirsher 				     A_XGM_INT_CAUSE + pi->mac.offset,
275f7917c00SJeff Kirsher 				     F_XGM_INT);
276f7917c00SJeff Kirsher 
277f7917c00SJeff Kirsher 			t3_set_reg_field(adapter,
278f7917c00SJeff Kirsher 					 A_XGM_INT_ENABLE + pi->mac.offset,
279f7917c00SJeff Kirsher 					 F_XGM_INT, F_XGM_INT);
280f7917c00SJeff Kirsher 			t3_xgm_intr_enable(adapter, pi->port_id);
281f7917c00SJeff Kirsher 
282f7917c00SJeff Kirsher 			netif_carrier_on(dev);
283f7917c00SJeff Kirsher 		} else {
284f7917c00SJeff Kirsher 			netif_carrier_off(dev);
285f7917c00SJeff Kirsher 
286f7917c00SJeff Kirsher 			t3_xgm_intr_disable(adapter, pi->port_id);
287f7917c00SJeff Kirsher 			t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
288f7917c00SJeff Kirsher 			t3_set_reg_field(adapter,
289f7917c00SJeff Kirsher 					 A_XGM_INT_ENABLE + pi->mac.offset,
290f7917c00SJeff Kirsher 					 F_XGM_INT, 0);
291f7917c00SJeff Kirsher 
292f7917c00SJeff Kirsher 			if (is_10G(adapter))
293f7917c00SJeff Kirsher 				pi->phy.ops->power_down(&pi->phy, 1);
294f7917c00SJeff Kirsher 
295f7917c00SJeff Kirsher 			t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
296f7917c00SJeff Kirsher 			t3_mac_disable(mac, MAC_DIRECTION_RX);
297f7917c00SJeff Kirsher 			t3_link_start(&pi->phy, mac, &pi->link_config);
298f7917c00SJeff Kirsher 
299f7917c00SJeff Kirsher 			/* Flush TX FIFO */
300f7917c00SJeff Kirsher 			enable_tx_fifo_drain(adapter, pi);
301f7917c00SJeff Kirsher 		}
302f7917c00SJeff Kirsher 
303f7917c00SJeff Kirsher 		link_report(dev);
304f7917c00SJeff Kirsher 	}
305f7917c00SJeff Kirsher }
306f7917c00SJeff Kirsher 
307f7917c00SJeff Kirsher /**
308f7917c00SJeff Kirsher  *	t3_os_phymod_changed - handle PHY module changes
309f7917c00SJeff Kirsher  *	@phy: the PHY reporting the module change
310f7917c00SJeff Kirsher  *	@mod_type: new module type
311f7917c00SJeff Kirsher  *
312f7917c00SJeff Kirsher  *	This is the OS-dependent handler for PHY module changes.  It is
313f7917c00SJeff Kirsher  *	invoked when a PHY module is removed or inserted for any OS-specific
314f7917c00SJeff Kirsher  *	processing.
315f7917c00SJeff Kirsher  */
316f7917c00SJeff Kirsher void t3_os_phymod_changed(struct adapter *adap, int port_id)
317f7917c00SJeff Kirsher {
318f7917c00SJeff Kirsher 	static const char *mod_str[] = {
319f7917c00SJeff Kirsher 		NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
320f7917c00SJeff Kirsher 	};
321f7917c00SJeff Kirsher 
322f7917c00SJeff Kirsher 	const struct net_device *dev = adap->port[port_id];
323f7917c00SJeff Kirsher 	const struct port_info *pi = netdev_priv(dev);
324f7917c00SJeff Kirsher 
325f7917c00SJeff Kirsher 	if (pi->phy.modtype == phy_modtype_none)
326428ac43fSJoe Perches 		netdev_info(dev, "PHY module unplugged\n");
327f7917c00SJeff Kirsher 	else
328428ac43fSJoe Perches 		netdev_info(dev, "%s PHY module inserted\n",
329f7917c00SJeff Kirsher 			    mod_str[pi->phy.modtype]);
330f7917c00SJeff Kirsher }
331f7917c00SJeff Kirsher 
332f7917c00SJeff Kirsher static void cxgb_set_rxmode(struct net_device *dev)
333f7917c00SJeff Kirsher {
334f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
335f7917c00SJeff Kirsher 
336f7917c00SJeff Kirsher 	t3_mac_set_rx_mode(&pi->mac, dev);
337f7917c00SJeff Kirsher }
338f7917c00SJeff Kirsher 
339f7917c00SJeff Kirsher /**
340f7917c00SJeff Kirsher  *	link_start - enable a port
341f7917c00SJeff Kirsher  *	@dev: the device to enable
342f7917c00SJeff Kirsher  *
343f7917c00SJeff Kirsher  *	Performs the MAC and PHY actions needed to enable a port.
344f7917c00SJeff Kirsher  */
345f7917c00SJeff Kirsher static void link_start(struct net_device *dev)
346f7917c00SJeff Kirsher {
347f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
348f7917c00SJeff Kirsher 	struct cmac *mac = &pi->mac;
349f7917c00SJeff Kirsher 
350f7917c00SJeff Kirsher 	t3_mac_reset(mac);
351f7917c00SJeff Kirsher 	t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
352f7917c00SJeff Kirsher 	t3_mac_set_mtu(mac, dev->mtu);
353f7917c00SJeff Kirsher 	t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
354f7917c00SJeff Kirsher 	t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
355f7917c00SJeff Kirsher 	t3_mac_set_rx_mode(mac, dev);
356f7917c00SJeff Kirsher 	t3_link_start(&pi->phy, mac, &pi->link_config);
357f7917c00SJeff Kirsher 	t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
358f7917c00SJeff Kirsher }
359f7917c00SJeff Kirsher 
360f7917c00SJeff Kirsher static inline void cxgb_disable_msi(struct adapter *adapter)
361f7917c00SJeff Kirsher {
362f7917c00SJeff Kirsher 	if (adapter->flags & USING_MSIX) {
363f7917c00SJeff Kirsher 		pci_disable_msix(adapter->pdev);
364f7917c00SJeff Kirsher 		adapter->flags &= ~USING_MSIX;
365f7917c00SJeff Kirsher 	} else if (adapter->flags & USING_MSI) {
366f7917c00SJeff Kirsher 		pci_disable_msi(adapter->pdev);
367f7917c00SJeff Kirsher 		adapter->flags &= ~USING_MSI;
368f7917c00SJeff Kirsher 	}
369f7917c00SJeff Kirsher }
370f7917c00SJeff Kirsher 
371f7917c00SJeff Kirsher /*
372f7917c00SJeff Kirsher  * Interrupt handler for asynchronous events used with MSI-X.
373f7917c00SJeff Kirsher  */
374f7917c00SJeff Kirsher static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
375f7917c00SJeff Kirsher {
376f7917c00SJeff Kirsher 	t3_slow_intr_handler(cookie);
377f7917c00SJeff Kirsher 	return IRQ_HANDLED;
378f7917c00SJeff Kirsher }
379f7917c00SJeff Kirsher 
380f7917c00SJeff Kirsher /*
381f7917c00SJeff Kirsher  * Name the MSI-X interrupts.
382f7917c00SJeff Kirsher  */
383f7917c00SJeff Kirsher static void name_msix_vecs(struct adapter *adap)
384f7917c00SJeff Kirsher {
385f7917c00SJeff Kirsher 	int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
386f7917c00SJeff Kirsher 
387f7917c00SJeff Kirsher 	snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
388f7917c00SJeff Kirsher 	adap->msix_info[0].desc[n] = 0;
389f7917c00SJeff Kirsher 
390f7917c00SJeff Kirsher 	for_each_port(adap, j) {
391f7917c00SJeff Kirsher 		struct net_device *d = adap->port[j];
392f7917c00SJeff Kirsher 		const struct port_info *pi = netdev_priv(d);
393f7917c00SJeff Kirsher 
394f7917c00SJeff Kirsher 		for (i = 0; i < pi->nqsets; i++, msi_idx++) {
395f7917c00SJeff Kirsher 			snprintf(adap->msix_info[msi_idx].desc, n,
396f7917c00SJeff Kirsher 				 "%s-%d", d->name, pi->first_qset + i);
397f7917c00SJeff Kirsher 			adap->msix_info[msi_idx].desc[n] = 0;
398f7917c00SJeff Kirsher 		}
399f7917c00SJeff Kirsher 	}
400f7917c00SJeff Kirsher }
401f7917c00SJeff Kirsher 
402f7917c00SJeff Kirsher static int request_msix_data_irqs(struct adapter *adap)
403f7917c00SJeff Kirsher {
404f7917c00SJeff Kirsher 	int i, j, err, qidx = 0;
405f7917c00SJeff Kirsher 
406f7917c00SJeff Kirsher 	for_each_port(adap, i) {
407f7917c00SJeff Kirsher 		int nqsets = adap2pinfo(adap, i)->nqsets;
408f7917c00SJeff Kirsher 
409f7917c00SJeff Kirsher 		for (j = 0; j < nqsets; ++j) {
410f7917c00SJeff Kirsher 			err = request_irq(adap->msix_info[qidx + 1].vec,
411f7917c00SJeff Kirsher 					  t3_intr_handler(adap,
412f7917c00SJeff Kirsher 							  adap->sge.qs[qidx].
413f7917c00SJeff Kirsher 							  rspq.polling), 0,
414f7917c00SJeff Kirsher 					  adap->msix_info[qidx + 1].desc,
415f7917c00SJeff Kirsher 					  &adap->sge.qs[qidx]);
416f7917c00SJeff Kirsher 			if (err) {
417f7917c00SJeff Kirsher 				while (--qidx >= 0)
418f7917c00SJeff Kirsher 					free_irq(adap->msix_info[qidx + 1].vec,
419f7917c00SJeff Kirsher 						 &adap->sge.qs[qidx]);
420f7917c00SJeff Kirsher 				return err;
421f7917c00SJeff Kirsher 			}
422f7917c00SJeff Kirsher 			qidx++;
423f7917c00SJeff Kirsher 		}
424f7917c00SJeff Kirsher 	}
425f7917c00SJeff Kirsher 	return 0;
426f7917c00SJeff Kirsher }
427f7917c00SJeff Kirsher 
428f7917c00SJeff Kirsher static void free_irq_resources(struct adapter *adapter)
429f7917c00SJeff Kirsher {
430f7917c00SJeff Kirsher 	if (adapter->flags & USING_MSIX) {
431f7917c00SJeff Kirsher 		int i, n = 0;
432f7917c00SJeff Kirsher 
433f7917c00SJeff Kirsher 		free_irq(adapter->msix_info[0].vec, adapter);
434f7917c00SJeff Kirsher 		for_each_port(adapter, i)
435f7917c00SJeff Kirsher 			n += adap2pinfo(adapter, i)->nqsets;
436f7917c00SJeff Kirsher 
437f7917c00SJeff Kirsher 		for (i = 0; i < n; ++i)
438f7917c00SJeff Kirsher 			free_irq(adapter->msix_info[i + 1].vec,
439f7917c00SJeff Kirsher 				 &adapter->sge.qs[i]);
440f7917c00SJeff Kirsher 	} else
441f7917c00SJeff Kirsher 		free_irq(adapter->pdev->irq, adapter);
442f7917c00SJeff Kirsher }
443f7917c00SJeff Kirsher 
444f7917c00SJeff Kirsher static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
445f7917c00SJeff Kirsher 			      unsigned long n)
446f7917c00SJeff Kirsher {
447f7917c00SJeff Kirsher 	int attempts = 10;
448f7917c00SJeff Kirsher 
449f7917c00SJeff Kirsher 	while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
450f7917c00SJeff Kirsher 		if (!--attempts)
451f7917c00SJeff Kirsher 			return -ETIMEDOUT;
452f7917c00SJeff Kirsher 		msleep(10);
453f7917c00SJeff Kirsher 	}
454f7917c00SJeff Kirsher 	return 0;
455f7917c00SJeff Kirsher }
456f7917c00SJeff Kirsher 
457f7917c00SJeff Kirsher static int init_tp_parity(struct adapter *adap)
458f7917c00SJeff Kirsher {
459f7917c00SJeff Kirsher 	int i;
460f7917c00SJeff Kirsher 	struct sk_buff *skb;
461f7917c00SJeff Kirsher 	struct cpl_set_tcb_field *greq;
462f7917c00SJeff Kirsher 	unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
463f7917c00SJeff Kirsher 
464f7917c00SJeff Kirsher 	t3_tp_set_offload_mode(adap, 1);
465f7917c00SJeff Kirsher 
466f7917c00SJeff Kirsher 	for (i = 0; i < 16; i++) {
467f7917c00SJeff Kirsher 		struct cpl_smt_write_req *req;
468f7917c00SJeff Kirsher 
469f7917c00SJeff Kirsher 		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
470f7917c00SJeff Kirsher 		if (!skb)
471f7917c00SJeff Kirsher 			skb = adap->nofail_skb;
472f7917c00SJeff Kirsher 		if (!skb)
473f7917c00SJeff Kirsher 			goto alloc_skb_fail;
474f7917c00SJeff Kirsher 
475de77b966Syuan linyu 		req = __skb_put_zero(skb, sizeof(*req));
476f7917c00SJeff Kirsher 		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
477f7917c00SJeff Kirsher 		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
478f7917c00SJeff Kirsher 		req->mtu_idx = NMTUS - 1;
479f7917c00SJeff Kirsher 		req->iff = i;
480f7917c00SJeff Kirsher 		t3_mgmt_tx(adap, skb);
481f7917c00SJeff Kirsher 		if (skb == adap->nofail_skb) {
482f7917c00SJeff Kirsher 			await_mgmt_replies(adap, cnt, i + 1);
483f7917c00SJeff Kirsher 			adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
484f7917c00SJeff Kirsher 			if (!adap->nofail_skb)
485f7917c00SJeff Kirsher 				goto alloc_skb_fail;
486f7917c00SJeff Kirsher 		}
487f7917c00SJeff Kirsher 	}
488f7917c00SJeff Kirsher 
489f7917c00SJeff Kirsher 	for (i = 0; i < 2048; i++) {
490f7917c00SJeff Kirsher 		struct cpl_l2t_write_req *req;
491f7917c00SJeff Kirsher 
492f7917c00SJeff Kirsher 		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
493f7917c00SJeff Kirsher 		if (!skb)
494f7917c00SJeff Kirsher 			skb = adap->nofail_skb;
495f7917c00SJeff Kirsher 		if (!skb)
496f7917c00SJeff Kirsher 			goto alloc_skb_fail;
497f7917c00SJeff Kirsher 
498de77b966Syuan linyu 		req = __skb_put_zero(skb, sizeof(*req));
499f7917c00SJeff Kirsher 		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
500f7917c00SJeff Kirsher 		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
501f7917c00SJeff Kirsher 		req->params = htonl(V_L2T_W_IDX(i));
502f7917c00SJeff Kirsher 		t3_mgmt_tx(adap, skb);
503f7917c00SJeff Kirsher 		if (skb == adap->nofail_skb) {
504f7917c00SJeff Kirsher 			await_mgmt_replies(adap, cnt, 16 + i + 1);
505f7917c00SJeff Kirsher 			adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
506f7917c00SJeff Kirsher 			if (!adap->nofail_skb)
507f7917c00SJeff Kirsher 				goto alloc_skb_fail;
508f7917c00SJeff Kirsher 		}
509f7917c00SJeff Kirsher 	}
510f7917c00SJeff Kirsher 
511f7917c00SJeff Kirsher 	for (i = 0; i < 2048; i++) {
512f7917c00SJeff Kirsher 		struct cpl_rte_write_req *req;
513f7917c00SJeff Kirsher 
514f7917c00SJeff Kirsher 		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
515f7917c00SJeff Kirsher 		if (!skb)
516f7917c00SJeff Kirsher 			skb = adap->nofail_skb;
517f7917c00SJeff Kirsher 		if (!skb)
518f7917c00SJeff Kirsher 			goto alloc_skb_fail;
519f7917c00SJeff Kirsher 
520de77b966Syuan linyu 		req = __skb_put_zero(skb, sizeof(*req));
521f7917c00SJeff Kirsher 		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
522f7917c00SJeff Kirsher 		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
523f7917c00SJeff Kirsher 		req->l2t_idx = htonl(V_L2T_W_IDX(i));
524f7917c00SJeff Kirsher 		t3_mgmt_tx(adap, skb);
525f7917c00SJeff Kirsher 		if (skb == adap->nofail_skb) {
526f7917c00SJeff Kirsher 			await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
527f7917c00SJeff Kirsher 			adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
528f7917c00SJeff Kirsher 			if (!adap->nofail_skb)
529f7917c00SJeff Kirsher 				goto alloc_skb_fail;
530f7917c00SJeff Kirsher 		}
531f7917c00SJeff Kirsher 	}
532f7917c00SJeff Kirsher 
533f7917c00SJeff Kirsher 	skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
534f7917c00SJeff Kirsher 	if (!skb)
535f7917c00SJeff Kirsher 		skb = adap->nofail_skb;
536f7917c00SJeff Kirsher 	if (!skb)
537f7917c00SJeff Kirsher 		goto alloc_skb_fail;
538f7917c00SJeff Kirsher 
539de77b966Syuan linyu 	greq = __skb_put_zero(skb, sizeof(*greq));
540f7917c00SJeff Kirsher 	greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
541f7917c00SJeff Kirsher 	OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
542f7917c00SJeff Kirsher 	greq->mask = cpu_to_be64(1);
543f7917c00SJeff Kirsher 	t3_mgmt_tx(adap, skb);
544f7917c00SJeff Kirsher 
545f7917c00SJeff Kirsher 	i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
546f7917c00SJeff Kirsher 	if (skb == adap->nofail_skb) {
547f7917c00SJeff Kirsher 		i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
548f7917c00SJeff Kirsher 		adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
549f7917c00SJeff Kirsher 	}
550f7917c00SJeff Kirsher 
551f7917c00SJeff Kirsher 	t3_tp_set_offload_mode(adap, 0);
552f7917c00SJeff Kirsher 	return i;
553f7917c00SJeff Kirsher 
554f7917c00SJeff Kirsher alloc_skb_fail:
555f7917c00SJeff Kirsher 	t3_tp_set_offload_mode(adap, 0);
556f7917c00SJeff Kirsher 	return -ENOMEM;
557f7917c00SJeff Kirsher }
558f7917c00SJeff Kirsher 
559f7917c00SJeff Kirsher /**
560f7917c00SJeff Kirsher  *	setup_rss - configure RSS
561f7917c00SJeff Kirsher  *	@adap: the adapter
562f7917c00SJeff Kirsher  *
563f7917c00SJeff Kirsher  *	Sets up RSS to distribute packets to multiple receive queues.  We
564f7917c00SJeff Kirsher  *	configure the RSS CPU lookup table to distribute to the number of HW
565f7917c00SJeff Kirsher  *	receive queues, and the response queue lookup table to narrow that
566f7917c00SJeff Kirsher  *	down to the response queues actually configured for each port.
567f7917c00SJeff Kirsher  *	We always configure the RSS mapping for two ports since the mapping
568f7917c00SJeff Kirsher  *	table has plenty of entries.
569f7917c00SJeff Kirsher  */
570f7917c00SJeff Kirsher static void setup_rss(struct adapter *adap)
571f7917c00SJeff Kirsher {
572f7917c00SJeff Kirsher 	int i;
573f7917c00SJeff Kirsher 	unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
574f7917c00SJeff Kirsher 	unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
575f7917c00SJeff Kirsher 	u8 cpus[SGE_QSETS + 1];
5760b86a2a1SMichal Schmidt 	u16 rspq_map[RSS_TABLE_SIZE + 1];
577f7917c00SJeff Kirsher 
578f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; ++i)
579f7917c00SJeff Kirsher 		cpus[i] = i;
580f7917c00SJeff Kirsher 	cpus[SGE_QSETS] = 0xff;	/* terminator */
581f7917c00SJeff Kirsher 
582f7917c00SJeff Kirsher 	for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
583f7917c00SJeff Kirsher 		rspq_map[i] = i % nq0;
584f7917c00SJeff Kirsher 		rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
585f7917c00SJeff Kirsher 	}
5860b86a2a1SMichal Schmidt 	rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
587f7917c00SJeff Kirsher 
588f7917c00SJeff Kirsher 	t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
589f7917c00SJeff Kirsher 		      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
590f7917c00SJeff Kirsher 		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
591f7917c00SJeff Kirsher }
592f7917c00SJeff Kirsher 
593f7917c00SJeff Kirsher static void ring_dbs(struct adapter *adap)
594f7917c00SJeff Kirsher {
595f7917c00SJeff Kirsher 	int i, j;
596f7917c00SJeff Kirsher 
597f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; i++) {
598f7917c00SJeff Kirsher 		struct sge_qset *qs = &adap->sge.qs[i];
599f7917c00SJeff Kirsher 
600f7917c00SJeff Kirsher 		if (qs->adap)
601f7917c00SJeff Kirsher 			for (j = 0; j < SGE_TXQ_PER_SET; j++)
602f7917c00SJeff Kirsher 				t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
603f7917c00SJeff Kirsher 	}
604f7917c00SJeff Kirsher }
605f7917c00SJeff Kirsher 
606f7917c00SJeff Kirsher static void init_napi(struct adapter *adap)
607f7917c00SJeff Kirsher {
608f7917c00SJeff Kirsher 	int i;
609f7917c00SJeff Kirsher 
610f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; i++) {
611f7917c00SJeff Kirsher 		struct sge_qset *qs = &adap->sge.qs[i];
612f7917c00SJeff Kirsher 
613f7917c00SJeff Kirsher 		if (qs->adap)
614f7917c00SJeff Kirsher 			netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
615f7917c00SJeff Kirsher 				       64);
616f7917c00SJeff Kirsher 	}
617f7917c00SJeff Kirsher 
618f7917c00SJeff Kirsher 	/*
619f7917c00SJeff Kirsher 	 * netif_napi_add() can be called only once per napi_struct because it
620f7917c00SJeff Kirsher 	 * adds each new napi_struct to a list.  Be careful not to call it a
621f7917c00SJeff Kirsher 	 * second time, e.g., during EEH recovery, by making a note of it.
622f7917c00SJeff Kirsher 	 */
623f7917c00SJeff Kirsher 	adap->flags |= NAPI_INIT;
624f7917c00SJeff Kirsher }
625f7917c00SJeff Kirsher 
626f7917c00SJeff Kirsher /*
627f7917c00SJeff Kirsher  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
628f7917c00SJeff Kirsher  * both netdevices representing interfaces and the dummy ones for the extra
629f7917c00SJeff Kirsher  * queues.
630f7917c00SJeff Kirsher  */
631f7917c00SJeff Kirsher static void quiesce_rx(struct adapter *adap)
632f7917c00SJeff Kirsher {
633f7917c00SJeff Kirsher 	int i;
634f7917c00SJeff Kirsher 
635f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; i++)
636f7917c00SJeff Kirsher 		if (adap->sge.qs[i].adap)
637f7917c00SJeff Kirsher 			napi_disable(&adap->sge.qs[i].napi);
638f7917c00SJeff Kirsher }
639f7917c00SJeff Kirsher 
640f7917c00SJeff Kirsher static void enable_all_napi(struct adapter *adap)
641f7917c00SJeff Kirsher {
642f7917c00SJeff Kirsher 	int i;
643f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; i++)
644f7917c00SJeff Kirsher 		if (adap->sge.qs[i].adap)
645f7917c00SJeff Kirsher 			napi_enable(&adap->sge.qs[i].napi);
646f7917c00SJeff Kirsher }
647f7917c00SJeff Kirsher 
648f7917c00SJeff Kirsher /**
649f7917c00SJeff Kirsher  *	setup_sge_qsets - configure SGE Tx/Rx/response queues
650f7917c00SJeff Kirsher  *	@adap: the adapter
651f7917c00SJeff Kirsher  *
652f7917c00SJeff Kirsher  *	Determines how many sets of SGE queues to use and initializes them.
653f7917c00SJeff Kirsher  *	We support multiple queue sets per port if we have MSI-X, otherwise
654f7917c00SJeff Kirsher  *	just one queue set per port.
655f7917c00SJeff Kirsher  */
656f7917c00SJeff Kirsher static int setup_sge_qsets(struct adapter *adap)
657f7917c00SJeff Kirsher {
658f7917c00SJeff Kirsher 	int i, j, err, irq_idx = 0, qset_idx = 0;
659f7917c00SJeff Kirsher 	unsigned int ntxq = SGE_TXQ_PER_SET;
660f7917c00SJeff Kirsher 
661f7917c00SJeff Kirsher 	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
662f7917c00SJeff Kirsher 		irq_idx = -1;
663f7917c00SJeff Kirsher 
664f7917c00SJeff Kirsher 	for_each_port(adap, i) {
665f7917c00SJeff Kirsher 		struct net_device *dev = adap->port[i];
666f7917c00SJeff Kirsher 		struct port_info *pi = netdev_priv(dev);
667f7917c00SJeff Kirsher 
668f7917c00SJeff Kirsher 		pi->qs = &adap->sge.qs[pi->first_qset];
669f7917c00SJeff Kirsher 		for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
670f7917c00SJeff Kirsher 			err = t3_sge_alloc_qset(adap, qset_idx, 1,
671f7917c00SJeff Kirsher 				(adap->flags & USING_MSIX) ? qset_idx + 1 :
672f7917c00SJeff Kirsher 							     irq_idx,
673f7917c00SJeff Kirsher 				&adap->params.sge.qset[qset_idx], ntxq, dev,
674f7917c00SJeff Kirsher 				netdev_get_tx_queue(dev, j));
675f7917c00SJeff Kirsher 			if (err) {
676f7917c00SJeff Kirsher 				t3_free_sge_resources(adap);
677f7917c00SJeff Kirsher 				return err;
678f7917c00SJeff Kirsher 			}
679f7917c00SJeff Kirsher 		}
680f7917c00SJeff Kirsher 	}
681f7917c00SJeff Kirsher 
682f7917c00SJeff Kirsher 	return 0;
683f7917c00SJeff Kirsher }
684f7917c00SJeff Kirsher 
685f7917c00SJeff Kirsher static ssize_t attr_show(struct device *d, char *buf,
686f7917c00SJeff Kirsher 			 ssize_t(*format) (struct net_device *, char *))
687f7917c00SJeff Kirsher {
688f7917c00SJeff Kirsher 	ssize_t len;
689f7917c00SJeff Kirsher 
690f7917c00SJeff Kirsher 	/* Synchronize with ioctls that may shut down the device */
691f7917c00SJeff Kirsher 	rtnl_lock();
692f7917c00SJeff Kirsher 	len = (*format) (to_net_dev(d), buf);
693f7917c00SJeff Kirsher 	rtnl_unlock();
694f7917c00SJeff Kirsher 	return len;
695f7917c00SJeff Kirsher }
696f7917c00SJeff Kirsher 
697f7917c00SJeff Kirsher static ssize_t attr_store(struct device *d,
698f7917c00SJeff Kirsher 			  const char *buf, size_t len,
699f7917c00SJeff Kirsher 			  ssize_t(*set) (struct net_device *, unsigned int),
700f7917c00SJeff Kirsher 			  unsigned int min_val, unsigned int max_val)
701f7917c00SJeff Kirsher {
702f7917c00SJeff Kirsher 	ssize_t ret;
703f7917c00SJeff Kirsher 	unsigned int val;
704f7917c00SJeff Kirsher 
705f7917c00SJeff Kirsher 	if (!capable(CAP_NET_ADMIN))
706f7917c00SJeff Kirsher 		return -EPERM;
707f7917c00SJeff Kirsher 
708e72c932dSLABBE Corentin 	ret = kstrtouint(buf, 0, &val);
709e72c932dSLABBE Corentin 	if (ret)
710e72c932dSLABBE Corentin 		return ret;
711e72c932dSLABBE Corentin 	if (val < min_val || val > max_val)
712f7917c00SJeff Kirsher 		return -EINVAL;
713f7917c00SJeff Kirsher 
714f7917c00SJeff Kirsher 	rtnl_lock();
715f7917c00SJeff Kirsher 	ret = (*set) (to_net_dev(d), val);
716f7917c00SJeff Kirsher 	if (!ret)
717f7917c00SJeff Kirsher 		ret = len;
718f7917c00SJeff Kirsher 	rtnl_unlock();
719f7917c00SJeff Kirsher 	return ret;
720f7917c00SJeff Kirsher }
721f7917c00SJeff Kirsher 
722f7917c00SJeff Kirsher #define CXGB3_SHOW(name, val_expr) \
723f7917c00SJeff Kirsher static ssize_t format_##name(struct net_device *dev, char *buf) \
724f7917c00SJeff Kirsher { \
725f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev); \
726f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter; \
727f7917c00SJeff Kirsher 	return sprintf(buf, "%u\n", val_expr); \
728f7917c00SJeff Kirsher } \
729f7917c00SJeff Kirsher static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
730f7917c00SJeff Kirsher 			   char *buf) \
731f7917c00SJeff Kirsher { \
732f7917c00SJeff Kirsher 	return attr_show(d, buf, format_##name); \
733f7917c00SJeff Kirsher }
734f7917c00SJeff Kirsher 
735f7917c00SJeff Kirsher static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
736f7917c00SJeff Kirsher {
737f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
738f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
739f7917c00SJeff Kirsher 	int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
740f7917c00SJeff Kirsher 
741f7917c00SJeff Kirsher 	if (adap->flags & FULL_INIT_DONE)
742f7917c00SJeff Kirsher 		return -EBUSY;
743f7917c00SJeff Kirsher 	if (val && adap->params.rev == 0)
744f7917c00SJeff Kirsher 		return -EINVAL;
745f7917c00SJeff Kirsher 	if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
746f7917c00SJeff Kirsher 	    min_tids)
747f7917c00SJeff Kirsher 		return -EINVAL;
748f7917c00SJeff Kirsher 	adap->params.mc5.nfilters = val;
749f7917c00SJeff Kirsher 	return 0;
750f7917c00SJeff Kirsher }
751f7917c00SJeff Kirsher 
752f7917c00SJeff Kirsher static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
753f7917c00SJeff Kirsher 			      const char *buf, size_t len)
754f7917c00SJeff Kirsher {
755f7917c00SJeff Kirsher 	return attr_store(d, buf, len, set_nfilters, 0, ~0);
756f7917c00SJeff Kirsher }
757f7917c00SJeff Kirsher 
758f7917c00SJeff Kirsher static ssize_t set_nservers(struct net_device *dev, unsigned int val)
759f7917c00SJeff Kirsher {
760f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
761f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
762f7917c00SJeff Kirsher 
763f7917c00SJeff Kirsher 	if (adap->flags & FULL_INIT_DONE)
764f7917c00SJeff Kirsher 		return -EBUSY;
765f7917c00SJeff Kirsher 	if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
766f7917c00SJeff Kirsher 	    MC5_MIN_TIDS)
767f7917c00SJeff Kirsher 		return -EINVAL;
768f7917c00SJeff Kirsher 	adap->params.mc5.nservers = val;
769f7917c00SJeff Kirsher 	return 0;
770f7917c00SJeff Kirsher }
771f7917c00SJeff Kirsher 
772f7917c00SJeff Kirsher static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
773f7917c00SJeff Kirsher 			      const char *buf, size_t len)
774f7917c00SJeff Kirsher {
775f7917c00SJeff Kirsher 	return attr_store(d, buf, len, set_nservers, 0, ~0);
776f7917c00SJeff Kirsher }
777f7917c00SJeff Kirsher 
778f7917c00SJeff Kirsher #define CXGB3_ATTR_R(name, val_expr) \
779f7917c00SJeff Kirsher CXGB3_SHOW(name, val_expr) \
780d3757ba4SJoe Perches static DEVICE_ATTR(name, 0444, show_##name, NULL)
781f7917c00SJeff Kirsher 
782f7917c00SJeff Kirsher #define CXGB3_ATTR_RW(name, val_expr, store_method) \
783f7917c00SJeff Kirsher CXGB3_SHOW(name, val_expr) \
784d3757ba4SJoe Perches static DEVICE_ATTR(name, 0644, show_##name, store_method)
785f7917c00SJeff Kirsher 
786f7917c00SJeff Kirsher CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
787f7917c00SJeff Kirsher CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
788f7917c00SJeff Kirsher CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
789f7917c00SJeff Kirsher 
790f7917c00SJeff Kirsher static struct attribute *cxgb3_attrs[] = {
791f7917c00SJeff Kirsher 	&dev_attr_cam_size.attr,
792f7917c00SJeff Kirsher 	&dev_attr_nfilters.attr,
793f7917c00SJeff Kirsher 	&dev_attr_nservers.attr,
794f7917c00SJeff Kirsher 	NULL
795f7917c00SJeff Kirsher };
796f7917c00SJeff Kirsher 
79798dc8373SArvind Yadav static const struct attribute_group cxgb3_attr_group = {
79898dc8373SArvind Yadav 	.attrs = cxgb3_attrs,
79998dc8373SArvind Yadav };
800f7917c00SJeff Kirsher 
801f7917c00SJeff Kirsher static ssize_t tm_attr_show(struct device *d,
802f7917c00SJeff Kirsher 			    char *buf, int sched)
803f7917c00SJeff Kirsher {
804f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(to_net_dev(d));
805f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
806f7917c00SJeff Kirsher 	unsigned int v, addr, bpt, cpt;
807f7917c00SJeff Kirsher 	ssize_t len;
808f7917c00SJeff Kirsher 
809f7917c00SJeff Kirsher 	addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
810f7917c00SJeff Kirsher 	rtnl_lock();
811f7917c00SJeff Kirsher 	t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
812f7917c00SJeff Kirsher 	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
813f7917c00SJeff Kirsher 	if (sched & 1)
814f7917c00SJeff Kirsher 		v >>= 16;
815f7917c00SJeff Kirsher 	bpt = (v >> 8) & 0xff;
816f7917c00SJeff Kirsher 	cpt = v & 0xff;
817f7917c00SJeff Kirsher 	if (!cpt)
818f7917c00SJeff Kirsher 		len = sprintf(buf, "disabled\n");
819f7917c00SJeff Kirsher 	else {
820f7917c00SJeff Kirsher 		v = (adap->params.vpd.cclk * 1000) / cpt;
821f7917c00SJeff Kirsher 		len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
822f7917c00SJeff Kirsher 	}
823f7917c00SJeff Kirsher 	rtnl_unlock();
824f7917c00SJeff Kirsher 	return len;
825f7917c00SJeff Kirsher }
826f7917c00SJeff Kirsher 
827f7917c00SJeff Kirsher static ssize_t tm_attr_store(struct device *d,
828f7917c00SJeff Kirsher 			     const char *buf, size_t len, int sched)
829f7917c00SJeff Kirsher {
830f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(to_net_dev(d));
831f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
832f7917c00SJeff Kirsher 	unsigned int val;
833f7917c00SJeff Kirsher 	ssize_t ret;
834f7917c00SJeff Kirsher 
835f7917c00SJeff Kirsher 	if (!capable(CAP_NET_ADMIN))
836f7917c00SJeff Kirsher 		return -EPERM;
837f7917c00SJeff Kirsher 
838e72c932dSLABBE Corentin 	ret = kstrtouint(buf, 0, &val);
839e72c932dSLABBE Corentin 	if (ret)
840e72c932dSLABBE Corentin 		return ret;
841e72c932dSLABBE Corentin 	if (val > 10000000)
842f7917c00SJeff Kirsher 		return -EINVAL;
843f7917c00SJeff Kirsher 
844f7917c00SJeff Kirsher 	rtnl_lock();
845f7917c00SJeff Kirsher 	ret = t3_config_sched(adap, val, sched);
846f7917c00SJeff Kirsher 	if (!ret)
847f7917c00SJeff Kirsher 		ret = len;
848f7917c00SJeff Kirsher 	rtnl_unlock();
849f7917c00SJeff Kirsher 	return ret;
850f7917c00SJeff Kirsher }
851f7917c00SJeff Kirsher 
852f7917c00SJeff Kirsher #define TM_ATTR(name, sched) \
853f7917c00SJeff Kirsher static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
854f7917c00SJeff Kirsher 			   char *buf) \
855f7917c00SJeff Kirsher { \
856f7917c00SJeff Kirsher 	return tm_attr_show(d, buf, sched); \
857f7917c00SJeff Kirsher } \
858f7917c00SJeff Kirsher static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
859f7917c00SJeff Kirsher 			    const char *buf, size_t len) \
860f7917c00SJeff Kirsher { \
861f7917c00SJeff Kirsher 	return tm_attr_store(d, buf, len, sched); \
862f7917c00SJeff Kirsher } \
863d3757ba4SJoe Perches static DEVICE_ATTR(name, 0644, show_##name, store_##name)
864f7917c00SJeff Kirsher 
865f7917c00SJeff Kirsher TM_ATTR(sched0, 0);
866f7917c00SJeff Kirsher TM_ATTR(sched1, 1);
867f7917c00SJeff Kirsher TM_ATTR(sched2, 2);
868f7917c00SJeff Kirsher TM_ATTR(sched3, 3);
869f7917c00SJeff Kirsher TM_ATTR(sched4, 4);
870f7917c00SJeff Kirsher TM_ATTR(sched5, 5);
871f7917c00SJeff Kirsher TM_ATTR(sched6, 6);
872f7917c00SJeff Kirsher TM_ATTR(sched7, 7);
873f7917c00SJeff Kirsher 
874f7917c00SJeff Kirsher static struct attribute *offload_attrs[] = {
875f7917c00SJeff Kirsher 	&dev_attr_sched0.attr,
876f7917c00SJeff Kirsher 	&dev_attr_sched1.attr,
877f7917c00SJeff Kirsher 	&dev_attr_sched2.attr,
878f7917c00SJeff Kirsher 	&dev_attr_sched3.attr,
879f7917c00SJeff Kirsher 	&dev_attr_sched4.attr,
880f7917c00SJeff Kirsher 	&dev_attr_sched5.attr,
881f7917c00SJeff Kirsher 	&dev_attr_sched6.attr,
882f7917c00SJeff Kirsher 	&dev_attr_sched7.attr,
883f7917c00SJeff Kirsher 	NULL
884f7917c00SJeff Kirsher };
885f7917c00SJeff Kirsher 
88698dc8373SArvind Yadav static const struct attribute_group offload_attr_group = {
88798dc8373SArvind Yadav 	.attrs = offload_attrs,
88898dc8373SArvind Yadav };
889f7917c00SJeff Kirsher 
890f7917c00SJeff Kirsher /*
891f7917c00SJeff Kirsher  * Sends an sk_buff to an offload queue driver
892f7917c00SJeff Kirsher  * after dealing with any active network taps.
893f7917c00SJeff Kirsher  */
894f7917c00SJeff Kirsher static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
895f7917c00SJeff Kirsher {
896f7917c00SJeff Kirsher 	int ret;
897f7917c00SJeff Kirsher 
898f7917c00SJeff Kirsher 	local_bh_disable();
899f7917c00SJeff Kirsher 	ret = t3_offload_tx(tdev, skb);
900f7917c00SJeff Kirsher 	local_bh_enable();
901f7917c00SJeff Kirsher 	return ret;
902f7917c00SJeff Kirsher }
903f7917c00SJeff Kirsher 
904f7917c00SJeff Kirsher static int write_smt_entry(struct adapter *adapter, int idx)
905f7917c00SJeff Kirsher {
906f7917c00SJeff Kirsher 	struct cpl_smt_write_req *req;
907f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(adapter->port[idx]);
908f7917c00SJeff Kirsher 	struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
909f7917c00SJeff Kirsher 
910f7917c00SJeff Kirsher 	if (!skb)
911f7917c00SJeff Kirsher 		return -ENOMEM;
912f7917c00SJeff Kirsher 
9134df864c1SJohannes Berg 	req = __skb_put(skb, sizeof(*req));
914f7917c00SJeff Kirsher 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
915f7917c00SJeff Kirsher 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
916f7917c00SJeff Kirsher 	req->mtu_idx = NMTUS - 1;	/* should be 0 but there's a T3 bug */
917f7917c00SJeff Kirsher 	req->iff = idx;
918f7917c00SJeff Kirsher 	memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
919f7917c00SJeff Kirsher 	memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
920f7917c00SJeff Kirsher 	skb->priority = 1;
921f7917c00SJeff Kirsher 	offload_tx(&adapter->tdev, skb);
922f7917c00SJeff Kirsher 	return 0;
923f7917c00SJeff Kirsher }
924f7917c00SJeff Kirsher 
925f7917c00SJeff Kirsher static int init_smt(struct adapter *adapter)
926f7917c00SJeff Kirsher {
927f7917c00SJeff Kirsher 	int i;
928f7917c00SJeff Kirsher 
929f7917c00SJeff Kirsher 	for_each_port(adapter, i)
930f7917c00SJeff Kirsher 	    write_smt_entry(adapter, i);
931f7917c00SJeff Kirsher 	return 0;
932f7917c00SJeff Kirsher }
933f7917c00SJeff Kirsher 
934f7917c00SJeff Kirsher static void init_port_mtus(struct adapter *adapter)
935f7917c00SJeff Kirsher {
936f7917c00SJeff Kirsher 	unsigned int mtus = adapter->port[0]->mtu;
937f7917c00SJeff Kirsher 
938f7917c00SJeff Kirsher 	if (adapter->port[1])
939f7917c00SJeff Kirsher 		mtus |= adapter->port[1]->mtu << 16;
940f7917c00SJeff Kirsher 	t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
941f7917c00SJeff Kirsher }
942f7917c00SJeff Kirsher 
943f7917c00SJeff Kirsher static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
944f7917c00SJeff Kirsher 			      int hi, int port)
945f7917c00SJeff Kirsher {
946f7917c00SJeff Kirsher 	struct sk_buff *skb;
947f7917c00SJeff Kirsher 	struct mngt_pktsched_wr *req;
948f7917c00SJeff Kirsher 	int ret;
949f7917c00SJeff Kirsher 
950f7917c00SJeff Kirsher 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
951f7917c00SJeff Kirsher 	if (!skb)
952f7917c00SJeff Kirsher 		skb = adap->nofail_skb;
953f7917c00SJeff Kirsher 	if (!skb)
954f7917c00SJeff Kirsher 		return -ENOMEM;
955f7917c00SJeff Kirsher 
9564df864c1SJohannes Berg 	req = skb_put(skb, sizeof(*req));
957f7917c00SJeff Kirsher 	req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
958f7917c00SJeff Kirsher 	req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
959f7917c00SJeff Kirsher 	req->sched = sched;
960f7917c00SJeff Kirsher 	req->idx = qidx;
961f7917c00SJeff Kirsher 	req->min = lo;
962f7917c00SJeff Kirsher 	req->max = hi;
963f7917c00SJeff Kirsher 	req->binding = port;
964f7917c00SJeff Kirsher 	ret = t3_mgmt_tx(adap, skb);
965f7917c00SJeff Kirsher 	if (skb == adap->nofail_skb) {
966f7917c00SJeff Kirsher 		adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
967f7917c00SJeff Kirsher 					     GFP_KERNEL);
968f7917c00SJeff Kirsher 		if (!adap->nofail_skb)
969f7917c00SJeff Kirsher 			ret = -ENOMEM;
970f7917c00SJeff Kirsher 	}
971f7917c00SJeff Kirsher 
972f7917c00SJeff Kirsher 	return ret;
973f7917c00SJeff Kirsher }
974f7917c00SJeff Kirsher 
975f7917c00SJeff Kirsher static int bind_qsets(struct adapter *adap)
976f7917c00SJeff Kirsher {
977f7917c00SJeff Kirsher 	int i, j, err = 0;
978f7917c00SJeff Kirsher 
979f7917c00SJeff Kirsher 	for_each_port(adap, i) {
980f7917c00SJeff Kirsher 		const struct port_info *pi = adap2pinfo(adap, i);
981f7917c00SJeff Kirsher 
982f7917c00SJeff Kirsher 		for (j = 0; j < pi->nqsets; ++j) {
983f7917c00SJeff Kirsher 			int ret = send_pktsched_cmd(adap, 1,
984f7917c00SJeff Kirsher 						    pi->first_qset + j, -1,
985f7917c00SJeff Kirsher 						    -1, i);
986f7917c00SJeff Kirsher 			if (ret)
987f7917c00SJeff Kirsher 				err = ret;
988f7917c00SJeff Kirsher 		}
989f7917c00SJeff Kirsher 	}
990f7917c00SJeff Kirsher 
991f7917c00SJeff Kirsher 	return err;
992f7917c00SJeff Kirsher }
993f7917c00SJeff Kirsher 
994f7917c00SJeff Kirsher #define FW_VERSION __stringify(FW_VERSION_MAJOR) "."			\
995f7917c00SJeff Kirsher 	__stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
996f7917c00SJeff Kirsher #define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
997f7917c00SJeff Kirsher #define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "."		\
998f7917c00SJeff Kirsher 	__stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
999f7917c00SJeff Kirsher #define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
1000f7917c00SJeff Kirsher #define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
1001f7917c00SJeff Kirsher #define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
1002f7917c00SJeff Kirsher #define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
1003f7917c00SJeff Kirsher MODULE_FIRMWARE(FW_FNAME);
1004f7917c00SJeff Kirsher MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
1005f7917c00SJeff Kirsher MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
1006f7917c00SJeff Kirsher MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
1007f7917c00SJeff Kirsher MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
1008f7917c00SJeff Kirsher MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
1009f7917c00SJeff Kirsher 
1010f7917c00SJeff Kirsher static inline const char *get_edc_fw_name(int edc_idx)
1011f7917c00SJeff Kirsher {
1012f7917c00SJeff Kirsher 	const char *fw_name = NULL;
1013f7917c00SJeff Kirsher 
1014f7917c00SJeff Kirsher 	switch (edc_idx) {
1015f7917c00SJeff Kirsher 	case EDC_OPT_AEL2005:
1016f7917c00SJeff Kirsher 		fw_name = AEL2005_OPT_EDC_NAME;
1017f7917c00SJeff Kirsher 		break;
1018f7917c00SJeff Kirsher 	case EDC_TWX_AEL2005:
1019f7917c00SJeff Kirsher 		fw_name = AEL2005_TWX_EDC_NAME;
1020f7917c00SJeff Kirsher 		break;
1021f7917c00SJeff Kirsher 	case EDC_TWX_AEL2020:
1022f7917c00SJeff Kirsher 		fw_name = AEL2020_TWX_EDC_NAME;
1023f7917c00SJeff Kirsher 		break;
1024f7917c00SJeff Kirsher 	}
1025f7917c00SJeff Kirsher 	return fw_name;
1026f7917c00SJeff Kirsher }
1027f7917c00SJeff Kirsher 
1028f7917c00SJeff Kirsher int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1029f7917c00SJeff Kirsher {
1030f7917c00SJeff Kirsher 	struct adapter *adapter = phy->adapter;
1031f7917c00SJeff Kirsher 	const struct firmware *fw;
103292a486caSKees Cook 	const char *fw_name;
1033f7917c00SJeff Kirsher 	u32 csum;
1034f7917c00SJeff Kirsher 	const __be32 *p;
1035f7917c00SJeff Kirsher 	u16 *cache = phy->phy_cache;
103692a486caSKees Cook 	int i, ret = -EINVAL;
1037f7917c00SJeff Kirsher 
103892a486caSKees Cook 	fw_name = get_edc_fw_name(edc_idx);
103992a486caSKees Cook 	if (fw_name)
104092a486caSKees Cook 		ret = request_firmware(&fw, fw_name, &adapter->pdev->dev);
1041f7917c00SJeff Kirsher 	if (ret < 0) {
1042f7917c00SJeff Kirsher 		dev_err(&adapter->pdev->dev,
1043f7917c00SJeff Kirsher 			"could not upgrade firmware: unable to load %s\n",
104492a486caSKees Cook 			fw_name);
1045f7917c00SJeff Kirsher 		return ret;
1046f7917c00SJeff Kirsher 	}
1047f7917c00SJeff Kirsher 
1048f7917c00SJeff Kirsher 	/* check size, take checksum in account */
1049f7917c00SJeff Kirsher 	if (fw->size > size + 4) {
1050f7917c00SJeff Kirsher 		CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1051f7917c00SJeff Kirsher 		       (unsigned int)fw->size, size + 4);
1052f7917c00SJeff Kirsher 		ret = -EINVAL;
1053f7917c00SJeff Kirsher 	}
1054f7917c00SJeff Kirsher 
1055f7917c00SJeff Kirsher 	/* compute checksum */
1056f7917c00SJeff Kirsher 	p = (const __be32 *)fw->data;
1057f7917c00SJeff Kirsher 	for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1058f7917c00SJeff Kirsher 		csum += ntohl(p[i]);
1059f7917c00SJeff Kirsher 
1060f7917c00SJeff Kirsher 	if (csum != 0xffffffff) {
1061f7917c00SJeff Kirsher 		CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1062f7917c00SJeff Kirsher 		       csum);
1063f7917c00SJeff Kirsher 		ret = -EINVAL;
1064f7917c00SJeff Kirsher 	}
1065f7917c00SJeff Kirsher 
1066f7917c00SJeff Kirsher 	for (i = 0; i < size / 4 ; i++) {
1067f7917c00SJeff Kirsher 		*cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1068f7917c00SJeff Kirsher 		*cache++ = be32_to_cpu(p[i]) & 0xffff;
1069f7917c00SJeff Kirsher 	}
1070f7917c00SJeff Kirsher 
1071f7917c00SJeff Kirsher 	release_firmware(fw);
1072f7917c00SJeff Kirsher 
1073f7917c00SJeff Kirsher 	return ret;
1074f7917c00SJeff Kirsher }
1075f7917c00SJeff Kirsher 
1076f7917c00SJeff Kirsher static int upgrade_fw(struct adapter *adap)
1077f7917c00SJeff Kirsher {
1078f7917c00SJeff Kirsher 	int ret;
1079f7917c00SJeff Kirsher 	const struct firmware *fw;
1080f7917c00SJeff Kirsher 	struct device *dev = &adap->pdev->dev;
1081f7917c00SJeff Kirsher 
1082f7917c00SJeff Kirsher 	ret = request_firmware(&fw, FW_FNAME, dev);
1083f7917c00SJeff Kirsher 	if (ret < 0) {
1084f7917c00SJeff Kirsher 		dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1085f7917c00SJeff Kirsher 			FW_FNAME);
1086f7917c00SJeff Kirsher 		return ret;
1087f7917c00SJeff Kirsher 	}
1088f7917c00SJeff Kirsher 	ret = t3_load_fw(adap, fw->data, fw->size);
1089f7917c00SJeff Kirsher 	release_firmware(fw);
1090f7917c00SJeff Kirsher 
1091f7917c00SJeff Kirsher 	if (ret == 0)
1092f7917c00SJeff Kirsher 		dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1093f7917c00SJeff Kirsher 			 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1094f7917c00SJeff Kirsher 	else
1095f7917c00SJeff Kirsher 		dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1096f7917c00SJeff Kirsher 			FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1097f7917c00SJeff Kirsher 
1098f7917c00SJeff Kirsher 	return ret;
1099f7917c00SJeff Kirsher }
1100f7917c00SJeff Kirsher 
1101f7917c00SJeff Kirsher static inline char t3rev2char(struct adapter *adapter)
1102f7917c00SJeff Kirsher {
1103f7917c00SJeff Kirsher 	char rev = 0;
1104f7917c00SJeff Kirsher 
1105f7917c00SJeff Kirsher 	switch(adapter->params.rev) {
1106f7917c00SJeff Kirsher 	case T3_REV_B:
1107f7917c00SJeff Kirsher 	case T3_REV_B2:
1108f7917c00SJeff Kirsher 		rev = 'b';
1109f7917c00SJeff Kirsher 		break;
1110f7917c00SJeff Kirsher 	case T3_REV_C:
1111f7917c00SJeff Kirsher 		rev = 'c';
1112f7917c00SJeff Kirsher 		break;
1113f7917c00SJeff Kirsher 	}
1114f7917c00SJeff Kirsher 	return rev;
1115f7917c00SJeff Kirsher }
1116f7917c00SJeff Kirsher 
1117f7917c00SJeff Kirsher static int update_tpsram(struct adapter *adap)
1118f7917c00SJeff Kirsher {
1119f7917c00SJeff Kirsher 	const struct firmware *tpsram;
1120f7917c00SJeff Kirsher 	char buf[64];
1121f7917c00SJeff Kirsher 	struct device *dev = &adap->pdev->dev;
1122f7917c00SJeff Kirsher 	int ret;
1123f7917c00SJeff Kirsher 	char rev;
1124f7917c00SJeff Kirsher 
1125f7917c00SJeff Kirsher 	rev = t3rev2char(adap);
1126f7917c00SJeff Kirsher 	if (!rev)
1127f7917c00SJeff Kirsher 		return 0;
1128f7917c00SJeff Kirsher 
1129f7917c00SJeff Kirsher 	snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1130f7917c00SJeff Kirsher 
1131f7917c00SJeff Kirsher 	ret = request_firmware(&tpsram, buf, dev);
1132f7917c00SJeff Kirsher 	if (ret < 0) {
1133f7917c00SJeff Kirsher 		dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1134f7917c00SJeff Kirsher 			buf);
1135f7917c00SJeff Kirsher 		return ret;
1136f7917c00SJeff Kirsher 	}
1137f7917c00SJeff Kirsher 
1138f7917c00SJeff Kirsher 	ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1139f7917c00SJeff Kirsher 	if (ret)
1140f7917c00SJeff Kirsher 		goto release_tpsram;
1141f7917c00SJeff Kirsher 
1142f7917c00SJeff Kirsher 	ret = t3_set_proto_sram(adap, tpsram->data);
1143f7917c00SJeff Kirsher 	if (ret == 0)
1144f7917c00SJeff Kirsher 		dev_info(dev,
1145f7917c00SJeff Kirsher 			 "successful update of protocol engine "
1146f7917c00SJeff Kirsher 			 "to %d.%d.%d\n",
1147f7917c00SJeff Kirsher 			 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1148f7917c00SJeff Kirsher 	else
1149f7917c00SJeff Kirsher 		dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1150f7917c00SJeff Kirsher 			TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1151f7917c00SJeff Kirsher 	if (ret)
1152f7917c00SJeff Kirsher 		dev_err(dev, "loading protocol SRAM failed\n");
1153f7917c00SJeff Kirsher 
1154f7917c00SJeff Kirsher release_tpsram:
1155f7917c00SJeff Kirsher 	release_firmware(tpsram);
1156f7917c00SJeff Kirsher 
1157f7917c00SJeff Kirsher 	return ret;
1158f7917c00SJeff Kirsher }
1159f7917c00SJeff Kirsher 
1160f7917c00SJeff Kirsher /**
116160158e64SRoland Dreier  * t3_synchronize_rx - wait for current Rx processing on a port to complete
116260158e64SRoland Dreier  * @adap: the adapter
116360158e64SRoland Dreier  * @p: the port
116460158e64SRoland Dreier  *
116560158e64SRoland Dreier  * Ensures that current Rx processing on any of the queues associated with
116660158e64SRoland Dreier  * the given port completes before returning.  We do this by acquiring and
116760158e64SRoland Dreier  * releasing the locks of the response queues associated with the port.
116860158e64SRoland Dreier  */
116960158e64SRoland Dreier static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
117060158e64SRoland Dreier {
117160158e64SRoland Dreier 	int i;
117260158e64SRoland Dreier 
117360158e64SRoland Dreier 	for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
117460158e64SRoland Dreier 		struct sge_rspq *q = &adap->sge.qs[i].rspq;
117560158e64SRoland Dreier 
117660158e64SRoland Dreier 		spin_lock_irq(&q->lock);
117760158e64SRoland Dreier 		spin_unlock_irq(&q->lock);
117860158e64SRoland Dreier 	}
117960158e64SRoland Dreier }
118060158e64SRoland Dreier 
118160158e64SRoland Dreier static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
118260158e64SRoland Dreier {
118360158e64SRoland Dreier 	struct port_info *pi = netdev_priv(dev);
118460158e64SRoland Dreier 	struct adapter *adapter = pi->adapter;
118560158e64SRoland Dreier 
118660158e64SRoland Dreier 	if (adapter->params.rev > 0) {
118760158e64SRoland Dreier 		t3_set_vlan_accel(adapter, 1 << pi->port_id,
1188f646968fSPatrick McHardy 				  features & NETIF_F_HW_VLAN_CTAG_RX);
118960158e64SRoland Dreier 	} else {
119060158e64SRoland Dreier 		/* single control for all ports */
1191f646968fSPatrick McHardy 		unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
119260158e64SRoland Dreier 
119360158e64SRoland Dreier 		for_each_port(adapter, i)
119460158e64SRoland Dreier 			have_vlans |=
1195f646968fSPatrick McHardy 				adapter->port[i]->features &
1196f646968fSPatrick McHardy 				NETIF_F_HW_VLAN_CTAG_RX;
119760158e64SRoland Dreier 
119860158e64SRoland Dreier 		t3_set_vlan_accel(adapter, 1, have_vlans);
119960158e64SRoland Dreier 	}
120060158e64SRoland Dreier 	t3_synchronize_rx(adapter, pi);
120160158e64SRoland Dreier }
120260158e64SRoland Dreier 
120360158e64SRoland Dreier /**
1204f7917c00SJeff Kirsher  *	cxgb_up - enable the adapter
1205f7917c00SJeff Kirsher  *	@adapter: adapter being enabled
1206f7917c00SJeff Kirsher  *
1207f7917c00SJeff Kirsher  *	Called when the first port is enabled, this function performs the
1208f7917c00SJeff Kirsher  *	actions necessary to make an adapter operational, such as completing
1209f7917c00SJeff Kirsher  *	the initialization of HW modules, and enabling interrupts.
1210f7917c00SJeff Kirsher  *
1211f7917c00SJeff Kirsher  *	Must be called with the rtnl lock held.
1212f7917c00SJeff Kirsher  */
1213f7917c00SJeff Kirsher static int cxgb_up(struct adapter *adap)
1214f7917c00SJeff Kirsher {
121560158e64SRoland Dreier 	int i, err;
1216f7917c00SJeff Kirsher 
1217f7917c00SJeff Kirsher 	if (!(adap->flags & FULL_INIT_DONE)) {
1218f7917c00SJeff Kirsher 		err = t3_check_fw_version(adap);
1219f7917c00SJeff Kirsher 		if (err == -EINVAL) {
1220f7917c00SJeff Kirsher 			err = upgrade_fw(adap);
1221f7917c00SJeff Kirsher 			CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1222f7917c00SJeff Kirsher 				FW_VERSION_MAJOR, FW_VERSION_MINOR,
1223f7917c00SJeff Kirsher 				FW_VERSION_MICRO, err ? "failed" : "succeeded");
1224f7917c00SJeff Kirsher 		}
1225f7917c00SJeff Kirsher 
1226f7917c00SJeff Kirsher 		err = t3_check_tpsram_version(adap);
1227f7917c00SJeff Kirsher 		if (err == -EINVAL) {
1228f7917c00SJeff Kirsher 			err = update_tpsram(adap);
1229f7917c00SJeff Kirsher 			CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1230f7917c00SJeff Kirsher 				TP_VERSION_MAJOR, TP_VERSION_MINOR,
1231f7917c00SJeff Kirsher 				TP_VERSION_MICRO, err ? "failed" : "succeeded");
1232f7917c00SJeff Kirsher 		}
1233f7917c00SJeff Kirsher 
1234f7917c00SJeff Kirsher 		/*
1235f7917c00SJeff Kirsher 		 * Clear interrupts now to catch errors if t3_init_hw fails.
1236f7917c00SJeff Kirsher 		 * We clear them again later as initialization may trigger
1237f7917c00SJeff Kirsher 		 * conditions that can interrupt.
1238f7917c00SJeff Kirsher 		 */
1239f7917c00SJeff Kirsher 		t3_intr_clear(adap);
1240f7917c00SJeff Kirsher 
1241f7917c00SJeff Kirsher 		err = t3_init_hw(adap, 0);
1242f7917c00SJeff Kirsher 		if (err)
1243f7917c00SJeff Kirsher 			goto out;
1244f7917c00SJeff Kirsher 
1245f7917c00SJeff Kirsher 		t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1246f7917c00SJeff Kirsher 		t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1247f7917c00SJeff Kirsher 
1248f7917c00SJeff Kirsher 		err = setup_sge_qsets(adap);
1249f7917c00SJeff Kirsher 		if (err)
1250f7917c00SJeff Kirsher 			goto out;
1251f7917c00SJeff Kirsher 
125260158e64SRoland Dreier 		for_each_port(adap, i)
125360158e64SRoland Dreier 			cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
125460158e64SRoland Dreier 
1255f7917c00SJeff Kirsher 		setup_rss(adap);
1256f7917c00SJeff Kirsher 		if (!(adap->flags & NAPI_INIT))
1257f7917c00SJeff Kirsher 			init_napi(adap);
1258f7917c00SJeff Kirsher 
1259f7917c00SJeff Kirsher 		t3_start_sge_timers(adap);
1260f7917c00SJeff Kirsher 		adap->flags |= FULL_INIT_DONE;
1261f7917c00SJeff Kirsher 	}
1262f7917c00SJeff Kirsher 
1263f7917c00SJeff Kirsher 	t3_intr_clear(adap);
1264f7917c00SJeff Kirsher 
1265f7917c00SJeff Kirsher 	if (adap->flags & USING_MSIX) {
1266f7917c00SJeff Kirsher 		name_msix_vecs(adap);
1267f7917c00SJeff Kirsher 		err = request_irq(adap->msix_info[0].vec,
1268f7917c00SJeff Kirsher 				  t3_async_intr_handler, 0,
1269f7917c00SJeff Kirsher 				  adap->msix_info[0].desc, adap);
1270f7917c00SJeff Kirsher 		if (err)
1271f7917c00SJeff Kirsher 			goto irq_err;
1272f7917c00SJeff Kirsher 
1273f7917c00SJeff Kirsher 		err = request_msix_data_irqs(adap);
1274f7917c00SJeff Kirsher 		if (err) {
1275f7917c00SJeff Kirsher 			free_irq(adap->msix_info[0].vec, adap);
1276f7917c00SJeff Kirsher 			goto irq_err;
1277f7917c00SJeff Kirsher 		}
1278f7917c00SJeff Kirsher 	} else if ((err = request_irq(adap->pdev->irq,
1279f7917c00SJeff Kirsher 				      t3_intr_handler(adap,
1280f7917c00SJeff Kirsher 						      adap->sge.qs[0].rspq.
1281f7917c00SJeff Kirsher 						      polling),
1282f7917c00SJeff Kirsher 				      (adap->flags & USING_MSI) ?
1283f7917c00SJeff Kirsher 				       0 : IRQF_SHARED,
1284f7917c00SJeff Kirsher 				      adap->name, adap)))
1285f7917c00SJeff Kirsher 		goto irq_err;
1286f7917c00SJeff Kirsher 
1287f7917c00SJeff Kirsher 	enable_all_napi(adap);
1288f7917c00SJeff Kirsher 	t3_sge_start(adap);
1289f7917c00SJeff Kirsher 	t3_intr_enable(adap);
1290f7917c00SJeff Kirsher 
1291f7917c00SJeff Kirsher 	if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1292f7917c00SJeff Kirsher 	    is_offload(adap) && init_tp_parity(adap) == 0)
1293f7917c00SJeff Kirsher 		adap->flags |= TP_PARITY_INIT;
1294f7917c00SJeff Kirsher 
1295f7917c00SJeff Kirsher 	if (adap->flags & TP_PARITY_INIT) {
1296f7917c00SJeff Kirsher 		t3_write_reg(adap, A_TP_INT_CAUSE,
1297f7917c00SJeff Kirsher 			     F_CMCACHEPERR | F_ARPLUTPERR);
1298f7917c00SJeff Kirsher 		t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1299f7917c00SJeff Kirsher 	}
1300f7917c00SJeff Kirsher 
1301f7917c00SJeff Kirsher 	if (!(adap->flags & QUEUES_BOUND)) {
1302f7917c00SJeff Kirsher 		int ret = bind_qsets(adap);
1303f7917c00SJeff Kirsher 
1304f7917c00SJeff Kirsher 		if (ret < 0) {
1305f7917c00SJeff Kirsher 			CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1306f7917c00SJeff Kirsher 			t3_intr_disable(adap);
1307f7917c00SJeff Kirsher 			free_irq_resources(adap);
1308f7917c00SJeff Kirsher 			err = ret;
1309f7917c00SJeff Kirsher 			goto out;
1310f7917c00SJeff Kirsher 		}
1311f7917c00SJeff Kirsher 		adap->flags |= QUEUES_BOUND;
1312f7917c00SJeff Kirsher 	}
1313f7917c00SJeff Kirsher 
1314f7917c00SJeff Kirsher out:
1315f7917c00SJeff Kirsher 	return err;
1316f7917c00SJeff Kirsher irq_err:
1317f7917c00SJeff Kirsher 	CH_ERR(adap, "request_irq failed, err %d\n", err);
1318f7917c00SJeff Kirsher 	goto out;
1319f7917c00SJeff Kirsher }
1320f7917c00SJeff Kirsher 
1321f7917c00SJeff Kirsher /*
1322f7917c00SJeff Kirsher  * Release resources when all the ports and offloading have been stopped.
1323f7917c00SJeff Kirsher  */
1324f7917c00SJeff Kirsher static void cxgb_down(struct adapter *adapter, int on_wq)
1325f7917c00SJeff Kirsher {
1326f7917c00SJeff Kirsher 	t3_sge_stop(adapter);
1327f7917c00SJeff Kirsher 	spin_lock_irq(&adapter->work_lock);	/* sync with PHY intr task */
1328f7917c00SJeff Kirsher 	t3_intr_disable(adapter);
1329f7917c00SJeff Kirsher 	spin_unlock_irq(&adapter->work_lock);
1330f7917c00SJeff Kirsher 
1331f7917c00SJeff Kirsher 	free_irq_resources(adapter);
1332f7917c00SJeff Kirsher 	quiesce_rx(adapter);
1333f7917c00SJeff Kirsher 	t3_sge_stop(adapter);
1334f7917c00SJeff Kirsher 	if (!on_wq)
1335f7917c00SJeff Kirsher 		flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1336f7917c00SJeff Kirsher }
1337f7917c00SJeff Kirsher 
1338f7917c00SJeff Kirsher static void schedule_chk_task(struct adapter *adap)
1339f7917c00SJeff Kirsher {
1340f7917c00SJeff Kirsher 	unsigned int timeo;
1341f7917c00SJeff Kirsher 
1342f7917c00SJeff Kirsher 	timeo = adap->params.linkpoll_period ?
1343f7917c00SJeff Kirsher 	    (HZ * adap->params.linkpoll_period) / 10 :
1344f7917c00SJeff Kirsher 	    adap->params.stats_update_period * HZ;
1345f7917c00SJeff Kirsher 	if (timeo)
1346f7917c00SJeff Kirsher 		queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1347f7917c00SJeff Kirsher }
1348f7917c00SJeff Kirsher 
1349f7917c00SJeff Kirsher static int offload_open(struct net_device *dev)
1350f7917c00SJeff Kirsher {
1351f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1352f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
1353f7917c00SJeff Kirsher 	struct t3cdev *tdev = dev2t3cdev(dev);
1354f7917c00SJeff Kirsher 	int adap_up = adapter->open_device_map & PORT_MASK;
1355f7917c00SJeff Kirsher 	int err;
1356f7917c00SJeff Kirsher 
1357f7917c00SJeff Kirsher 	if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1358f7917c00SJeff Kirsher 		return 0;
1359f7917c00SJeff Kirsher 
1360f7917c00SJeff Kirsher 	if (!adap_up && (err = cxgb_up(adapter)) < 0)
1361f7917c00SJeff Kirsher 		goto out;
1362f7917c00SJeff Kirsher 
1363f7917c00SJeff Kirsher 	t3_tp_set_offload_mode(adapter, 1);
1364f7917c00SJeff Kirsher 	tdev->lldev = adapter->port[0];
1365f7917c00SJeff Kirsher 	err = cxgb3_offload_activate(adapter);
1366f7917c00SJeff Kirsher 	if (err)
1367f7917c00SJeff Kirsher 		goto out;
1368f7917c00SJeff Kirsher 
1369f7917c00SJeff Kirsher 	init_port_mtus(adapter);
1370f7917c00SJeff Kirsher 	t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1371f7917c00SJeff Kirsher 		     adapter->params.b_wnd,
1372f7917c00SJeff Kirsher 		     adapter->params.rev == 0 ?
1373f7917c00SJeff Kirsher 		     adapter->port[0]->mtu : 0xffff);
1374f7917c00SJeff Kirsher 	init_smt(adapter);
1375f7917c00SJeff Kirsher 
1376f7917c00SJeff Kirsher 	if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1377f7917c00SJeff Kirsher 		dev_dbg(&dev->dev, "cannot create sysfs group\n");
1378f7917c00SJeff Kirsher 
1379f7917c00SJeff Kirsher 	/* Call back all registered clients */
1380f7917c00SJeff Kirsher 	cxgb3_add_clients(tdev);
1381f7917c00SJeff Kirsher 
1382f7917c00SJeff Kirsher out:
1383f7917c00SJeff Kirsher 	/* restore them in case the offload module has changed them */
1384f7917c00SJeff Kirsher 	if (err) {
1385f7917c00SJeff Kirsher 		t3_tp_set_offload_mode(adapter, 0);
1386f7917c00SJeff Kirsher 		clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1387f7917c00SJeff Kirsher 		cxgb3_set_dummy_ops(tdev);
1388f7917c00SJeff Kirsher 	}
1389f7917c00SJeff Kirsher 	return err;
1390f7917c00SJeff Kirsher }
1391f7917c00SJeff Kirsher 
1392f7917c00SJeff Kirsher static int offload_close(struct t3cdev *tdev)
1393f7917c00SJeff Kirsher {
1394f7917c00SJeff Kirsher 	struct adapter *adapter = tdev2adap(tdev);
1395f7917c00SJeff Kirsher 	struct t3c_data *td = T3C_DATA(tdev);
1396f7917c00SJeff Kirsher 
1397f7917c00SJeff Kirsher 	if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1398f7917c00SJeff Kirsher 		return 0;
1399f7917c00SJeff Kirsher 
1400f7917c00SJeff Kirsher 	/* Call back all registered clients */
1401f7917c00SJeff Kirsher 	cxgb3_remove_clients(tdev);
1402f7917c00SJeff Kirsher 
1403f7917c00SJeff Kirsher 	sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1404f7917c00SJeff Kirsher 
1405f7917c00SJeff Kirsher 	/* Flush work scheduled while releasing TIDs */
140643829731STejun Heo 	flush_work(&td->tid_release_task);
1407f7917c00SJeff Kirsher 
1408f7917c00SJeff Kirsher 	tdev->lldev = NULL;
1409f7917c00SJeff Kirsher 	cxgb3_set_dummy_ops(tdev);
1410f7917c00SJeff Kirsher 	t3_tp_set_offload_mode(adapter, 0);
1411f7917c00SJeff Kirsher 	clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1412f7917c00SJeff Kirsher 
1413f7917c00SJeff Kirsher 	if (!adapter->open_device_map)
1414f7917c00SJeff Kirsher 		cxgb_down(adapter, 0);
1415f7917c00SJeff Kirsher 
1416f7917c00SJeff Kirsher 	cxgb3_offload_deactivate(adapter);
1417f7917c00SJeff Kirsher 	return 0;
1418f7917c00SJeff Kirsher }
1419f7917c00SJeff Kirsher 
1420f7917c00SJeff Kirsher static int cxgb_open(struct net_device *dev)
1421f7917c00SJeff Kirsher {
1422f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1423f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
1424f7917c00SJeff Kirsher 	int other_ports = adapter->open_device_map & PORT_MASK;
1425f7917c00SJeff Kirsher 	int err;
1426f7917c00SJeff Kirsher 
1427f7917c00SJeff Kirsher 	if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1428f7917c00SJeff Kirsher 		return err;
1429f7917c00SJeff Kirsher 
1430f7917c00SJeff Kirsher 	set_bit(pi->port_id, &adapter->open_device_map);
1431f7917c00SJeff Kirsher 	if (is_offload(adapter) && !ofld_disable) {
1432f7917c00SJeff Kirsher 		err = offload_open(dev);
1433f7917c00SJeff Kirsher 		if (err)
1434428ac43fSJoe Perches 			pr_warn("Could not initialize offload capabilities\n");
1435f7917c00SJeff Kirsher 	}
1436f7917c00SJeff Kirsher 
1437f7917c00SJeff Kirsher 	netif_set_real_num_tx_queues(dev, pi->nqsets);
1438f7917c00SJeff Kirsher 	err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1439f7917c00SJeff Kirsher 	if (err)
1440f7917c00SJeff Kirsher 		return err;
1441f7917c00SJeff Kirsher 	link_start(dev);
1442f7917c00SJeff Kirsher 	t3_port_intr_enable(adapter, pi->port_id);
1443f7917c00SJeff Kirsher 	netif_tx_start_all_queues(dev);
1444f7917c00SJeff Kirsher 	if (!other_ports)
1445f7917c00SJeff Kirsher 		schedule_chk_task(adapter);
1446f7917c00SJeff Kirsher 
1447f7917c00SJeff Kirsher 	cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1448f7917c00SJeff Kirsher 	return 0;
1449f7917c00SJeff Kirsher }
1450f7917c00SJeff Kirsher 
1451f7917c00SJeff Kirsher static int __cxgb_close(struct net_device *dev, int on_wq)
1452f7917c00SJeff Kirsher {
1453f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1454f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
1455f7917c00SJeff Kirsher 
1456f7917c00SJeff Kirsher 
1457f7917c00SJeff Kirsher 	if (!adapter->open_device_map)
1458f7917c00SJeff Kirsher 		return 0;
1459f7917c00SJeff Kirsher 
1460f7917c00SJeff Kirsher 	/* Stop link fault interrupts */
1461f7917c00SJeff Kirsher 	t3_xgm_intr_disable(adapter, pi->port_id);
1462f7917c00SJeff Kirsher 	t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1463f7917c00SJeff Kirsher 
1464f7917c00SJeff Kirsher 	t3_port_intr_disable(adapter, pi->port_id);
1465f7917c00SJeff Kirsher 	netif_tx_stop_all_queues(dev);
1466f7917c00SJeff Kirsher 	pi->phy.ops->power_down(&pi->phy, 1);
1467f7917c00SJeff Kirsher 	netif_carrier_off(dev);
1468f7917c00SJeff Kirsher 	t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1469f7917c00SJeff Kirsher 
1470f7917c00SJeff Kirsher 	spin_lock_irq(&adapter->work_lock);	/* sync with update task */
1471f7917c00SJeff Kirsher 	clear_bit(pi->port_id, &adapter->open_device_map);
1472f7917c00SJeff Kirsher 	spin_unlock_irq(&adapter->work_lock);
1473f7917c00SJeff Kirsher 
1474f7917c00SJeff Kirsher 	if (!(adapter->open_device_map & PORT_MASK))
1475f7917c00SJeff Kirsher 		cancel_delayed_work_sync(&adapter->adap_check_task);
1476f7917c00SJeff Kirsher 
1477f7917c00SJeff Kirsher 	if (!adapter->open_device_map)
1478f7917c00SJeff Kirsher 		cxgb_down(adapter, on_wq);
1479f7917c00SJeff Kirsher 
1480f7917c00SJeff Kirsher 	cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1481f7917c00SJeff Kirsher 	return 0;
1482f7917c00SJeff Kirsher }
1483f7917c00SJeff Kirsher 
1484f7917c00SJeff Kirsher static int cxgb_close(struct net_device *dev)
1485f7917c00SJeff Kirsher {
1486f7917c00SJeff Kirsher 	return __cxgb_close(dev, 0);
1487f7917c00SJeff Kirsher }
1488f7917c00SJeff Kirsher 
1489f7917c00SJeff Kirsher static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1490f7917c00SJeff Kirsher {
1491f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1492f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
1493a73be7feSTobias Klauser 	struct net_device_stats *ns = &dev->stats;
1494f7917c00SJeff Kirsher 	const struct mac_stats *pstats;
1495f7917c00SJeff Kirsher 
1496f7917c00SJeff Kirsher 	spin_lock(&adapter->stats_lock);
1497f7917c00SJeff Kirsher 	pstats = t3_mac_update_stats(&pi->mac);
1498f7917c00SJeff Kirsher 	spin_unlock(&adapter->stats_lock);
1499f7917c00SJeff Kirsher 
1500f7917c00SJeff Kirsher 	ns->tx_bytes = pstats->tx_octets;
1501f7917c00SJeff Kirsher 	ns->tx_packets = pstats->tx_frames;
1502f7917c00SJeff Kirsher 	ns->rx_bytes = pstats->rx_octets;
1503f7917c00SJeff Kirsher 	ns->rx_packets = pstats->rx_frames;
1504f7917c00SJeff Kirsher 	ns->multicast = pstats->rx_mcast_frames;
1505f7917c00SJeff Kirsher 
1506f7917c00SJeff Kirsher 	ns->tx_errors = pstats->tx_underrun;
1507f7917c00SJeff Kirsher 	ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1508f7917c00SJeff Kirsher 	    pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1509f7917c00SJeff Kirsher 	    pstats->rx_fifo_ovfl;
1510f7917c00SJeff Kirsher 
1511f7917c00SJeff Kirsher 	/* detailed rx_errors */
1512f7917c00SJeff Kirsher 	ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1513f7917c00SJeff Kirsher 	ns->rx_over_errors = 0;
1514f7917c00SJeff Kirsher 	ns->rx_crc_errors = pstats->rx_fcs_errs;
1515f7917c00SJeff Kirsher 	ns->rx_frame_errors = pstats->rx_symbol_errs;
1516f7917c00SJeff Kirsher 	ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1517f7917c00SJeff Kirsher 	ns->rx_missed_errors = pstats->rx_cong_drops;
1518f7917c00SJeff Kirsher 
1519f7917c00SJeff Kirsher 	/* detailed tx_errors */
1520f7917c00SJeff Kirsher 	ns->tx_aborted_errors = 0;
1521f7917c00SJeff Kirsher 	ns->tx_carrier_errors = 0;
1522f7917c00SJeff Kirsher 	ns->tx_fifo_errors = pstats->tx_underrun;
1523f7917c00SJeff Kirsher 	ns->tx_heartbeat_errors = 0;
1524f7917c00SJeff Kirsher 	ns->tx_window_errors = 0;
1525f7917c00SJeff Kirsher 	return ns;
1526f7917c00SJeff Kirsher }
1527f7917c00SJeff Kirsher 
1528f7917c00SJeff Kirsher static u32 get_msglevel(struct net_device *dev)
1529f7917c00SJeff Kirsher {
1530f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1531f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
1532f7917c00SJeff Kirsher 
1533f7917c00SJeff Kirsher 	return adapter->msg_enable;
1534f7917c00SJeff Kirsher }
1535f7917c00SJeff Kirsher 
1536f7917c00SJeff Kirsher static void set_msglevel(struct net_device *dev, u32 val)
1537f7917c00SJeff Kirsher {
1538f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1539f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
1540f7917c00SJeff Kirsher 
1541f7917c00SJeff Kirsher 	adapter->msg_enable = val;
1542f7917c00SJeff Kirsher }
1543f7917c00SJeff Kirsher 
15449ca683c6SJoe Perches static const char stats_strings[][ETH_GSTRING_LEN] = {
1545f7917c00SJeff Kirsher 	"TxOctetsOK         ",
1546f7917c00SJeff Kirsher 	"TxFramesOK         ",
1547f7917c00SJeff Kirsher 	"TxMulticastFramesOK",
1548f7917c00SJeff Kirsher 	"TxBroadcastFramesOK",
1549f7917c00SJeff Kirsher 	"TxPauseFrames      ",
1550f7917c00SJeff Kirsher 	"TxUnderrun         ",
1551f7917c00SJeff Kirsher 	"TxExtUnderrun      ",
1552f7917c00SJeff Kirsher 
1553f7917c00SJeff Kirsher 	"TxFrames64         ",
1554f7917c00SJeff Kirsher 	"TxFrames65To127    ",
1555f7917c00SJeff Kirsher 	"TxFrames128To255   ",
1556f7917c00SJeff Kirsher 	"TxFrames256To511   ",
1557f7917c00SJeff Kirsher 	"TxFrames512To1023  ",
1558f7917c00SJeff Kirsher 	"TxFrames1024To1518 ",
1559f7917c00SJeff Kirsher 	"TxFrames1519ToMax  ",
1560f7917c00SJeff Kirsher 
1561f7917c00SJeff Kirsher 	"RxOctetsOK         ",
1562f7917c00SJeff Kirsher 	"RxFramesOK         ",
1563f7917c00SJeff Kirsher 	"RxMulticastFramesOK",
1564f7917c00SJeff Kirsher 	"RxBroadcastFramesOK",
1565f7917c00SJeff Kirsher 	"RxPauseFrames      ",
1566f7917c00SJeff Kirsher 	"RxFCSErrors        ",
1567f7917c00SJeff Kirsher 	"RxSymbolErrors     ",
1568f7917c00SJeff Kirsher 	"RxShortErrors      ",
1569f7917c00SJeff Kirsher 	"RxJabberErrors     ",
1570f7917c00SJeff Kirsher 	"RxLengthErrors     ",
1571f7917c00SJeff Kirsher 	"RxFIFOoverflow     ",
1572f7917c00SJeff Kirsher 
1573f7917c00SJeff Kirsher 	"RxFrames64         ",
1574f7917c00SJeff Kirsher 	"RxFrames65To127    ",
1575f7917c00SJeff Kirsher 	"RxFrames128To255   ",
1576f7917c00SJeff Kirsher 	"RxFrames256To511   ",
1577f7917c00SJeff Kirsher 	"RxFrames512To1023  ",
1578f7917c00SJeff Kirsher 	"RxFrames1024To1518 ",
1579f7917c00SJeff Kirsher 	"RxFrames1519ToMax  ",
1580f7917c00SJeff Kirsher 
1581f7917c00SJeff Kirsher 	"PhyFIFOErrors      ",
1582f7917c00SJeff Kirsher 	"TSO                ",
1583f7917c00SJeff Kirsher 	"VLANextractions    ",
1584f7917c00SJeff Kirsher 	"VLANinsertions     ",
1585f7917c00SJeff Kirsher 	"TxCsumOffload      ",
1586f7917c00SJeff Kirsher 	"RxCsumGood         ",
1587f7917c00SJeff Kirsher 	"LroAggregated      ",
1588f7917c00SJeff Kirsher 	"LroFlushed         ",
1589f7917c00SJeff Kirsher 	"LroNoDesc          ",
1590f7917c00SJeff Kirsher 	"RxDrops            ",
1591f7917c00SJeff Kirsher 
1592f7917c00SJeff Kirsher 	"CheckTXEnToggled   ",
1593f7917c00SJeff Kirsher 	"CheckResets        ",
1594f7917c00SJeff Kirsher 
1595f7917c00SJeff Kirsher 	"LinkFaults         ",
1596f7917c00SJeff Kirsher };
1597f7917c00SJeff Kirsher 
1598f7917c00SJeff Kirsher static int get_sset_count(struct net_device *dev, int sset)
1599f7917c00SJeff Kirsher {
1600f7917c00SJeff Kirsher 	switch (sset) {
1601f7917c00SJeff Kirsher 	case ETH_SS_STATS:
1602f7917c00SJeff Kirsher 		return ARRAY_SIZE(stats_strings);
1603f7917c00SJeff Kirsher 	default:
1604f7917c00SJeff Kirsher 		return -EOPNOTSUPP;
1605f7917c00SJeff Kirsher 	}
1606f7917c00SJeff Kirsher }
1607f7917c00SJeff Kirsher 
1608f7917c00SJeff Kirsher #define T3_REGMAP_SIZE (3 * 1024)
1609f7917c00SJeff Kirsher 
1610f7917c00SJeff Kirsher static int get_regs_len(struct net_device *dev)
1611f7917c00SJeff Kirsher {
1612f7917c00SJeff Kirsher 	return T3_REGMAP_SIZE;
1613f7917c00SJeff Kirsher }
1614f7917c00SJeff Kirsher 
1615f7917c00SJeff Kirsher static int get_eeprom_len(struct net_device *dev)
1616f7917c00SJeff Kirsher {
1617f7917c00SJeff Kirsher 	return EEPROMSIZE;
1618f7917c00SJeff Kirsher }
1619f7917c00SJeff Kirsher 
1620f7917c00SJeff Kirsher static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1621f7917c00SJeff Kirsher {
1622f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1623f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
1624f7917c00SJeff Kirsher 	u32 fw_vers = 0;
1625f7917c00SJeff Kirsher 	u32 tp_vers = 0;
1626f7917c00SJeff Kirsher 
1627f7917c00SJeff Kirsher 	spin_lock(&adapter->stats_lock);
1628f7917c00SJeff Kirsher 	t3_get_fw_version(adapter, &fw_vers);
1629f7917c00SJeff Kirsher 	t3_get_tp_version(adapter, &tp_vers);
1630f7917c00SJeff Kirsher 	spin_unlock(&adapter->stats_lock);
1631f7917c00SJeff Kirsher 
163223020ab3SRick Jones 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
163323020ab3SRick Jones 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
163423020ab3SRick Jones 	strlcpy(info->bus_info, pci_name(adapter->pdev),
163523020ab3SRick Jones 		sizeof(info->bus_info));
163684b40501SRick Jones 	if (fw_vers)
1637f7917c00SJeff Kirsher 		snprintf(info->fw_version, sizeof(info->fw_version),
1638f7917c00SJeff Kirsher 			 "%s %u.%u.%u TP %u.%u.%u",
1639f7917c00SJeff Kirsher 			 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1640f7917c00SJeff Kirsher 			 G_FW_VERSION_MAJOR(fw_vers),
1641f7917c00SJeff Kirsher 			 G_FW_VERSION_MINOR(fw_vers),
1642f7917c00SJeff Kirsher 			 G_FW_VERSION_MICRO(fw_vers),
1643f7917c00SJeff Kirsher 			 G_TP_VERSION_MAJOR(tp_vers),
1644f7917c00SJeff Kirsher 			 G_TP_VERSION_MINOR(tp_vers),
1645f7917c00SJeff Kirsher 			 G_TP_VERSION_MICRO(tp_vers));
1646f7917c00SJeff Kirsher }
1647f7917c00SJeff Kirsher 
1648f7917c00SJeff Kirsher static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1649f7917c00SJeff Kirsher {
1650f7917c00SJeff Kirsher 	if (stringset == ETH_SS_STATS)
1651f7917c00SJeff Kirsher 		memcpy(data, stats_strings, sizeof(stats_strings));
1652f7917c00SJeff Kirsher }
1653f7917c00SJeff Kirsher 
1654f7917c00SJeff Kirsher static unsigned long collect_sge_port_stats(struct adapter *adapter,
1655f7917c00SJeff Kirsher 					    struct port_info *p, int idx)
1656f7917c00SJeff Kirsher {
1657f7917c00SJeff Kirsher 	int i;
1658f7917c00SJeff Kirsher 	unsigned long tot = 0;
1659f7917c00SJeff Kirsher 
1660f7917c00SJeff Kirsher 	for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1661f7917c00SJeff Kirsher 		tot += adapter->sge.qs[i].port_stats[idx];
1662f7917c00SJeff Kirsher 	return tot;
1663f7917c00SJeff Kirsher }
1664f7917c00SJeff Kirsher 
1665f7917c00SJeff Kirsher static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1666f7917c00SJeff Kirsher 		      u64 *data)
1667f7917c00SJeff Kirsher {
1668f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1669f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
1670f7917c00SJeff Kirsher 	const struct mac_stats *s;
1671f7917c00SJeff Kirsher 
1672f7917c00SJeff Kirsher 	spin_lock(&adapter->stats_lock);
1673f7917c00SJeff Kirsher 	s = t3_mac_update_stats(&pi->mac);
1674f7917c00SJeff Kirsher 	spin_unlock(&adapter->stats_lock);
1675f7917c00SJeff Kirsher 
1676f7917c00SJeff Kirsher 	*data++ = s->tx_octets;
1677f7917c00SJeff Kirsher 	*data++ = s->tx_frames;
1678f7917c00SJeff Kirsher 	*data++ = s->tx_mcast_frames;
1679f7917c00SJeff Kirsher 	*data++ = s->tx_bcast_frames;
1680f7917c00SJeff Kirsher 	*data++ = s->tx_pause;
1681f7917c00SJeff Kirsher 	*data++ = s->tx_underrun;
1682f7917c00SJeff Kirsher 	*data++ = s->tx_fifo_urun;
1683f7917c00SJeff Kirsher 
1684f7917c00SJeff Kirsher 	*data++ = s->tx_frames_64;
1685f7917c00SJeff Kirsher 	*data++ = s->tx_frames_65_127;
1686f7917c00SJeff Kirsher 	*data++ = s->tx_frames_128_255;
1687f7917c00SJeff Kirsher 	*data++ = s->tx_frames_256_511;
1688f7917c00SJeff Kirsher 	*data++ = s->tx_frames_512_1023;
1689f7917c00SJeff Kirsher 	*data++ = s->tx_frames_1024_1518;
1690f7917c00SJeff Kirsher 	*data++ = s->tx_frames_1519_max;
1691f7917c00SJeff Kirsher 
1692f7917c00SJeff Kirsher 	*data++ = s->rx_octets;
1693f7917c00SJeff Kirsher 	*data++ = s->rx_frames;
1694f7917c00SJeff Kirsher 	*data++ = s->rx_mcast_frames;
1695f7917c00SJeff Kirsher 	*data++ = s->rx_bcast_frames;
1696f7917c00SJeff Kirsher 	*data++ = s->rx_pause;
1697f7917c00SJeff Kirsher 	*data++ = s->rx_fcs_errs;
1698f7917c00SJeff Kirsher 	*data++ = s->rx_symbol_errs;
1699f7917c00SJeff Kirsher 	*data++ = s->rx_short;
1700f7917c00SJeff Kirsher 	*data++ = s->rx_jabber;
1701f7917c00SJeff Kirsher 	*data++ = s->rx_too_long;
1702f7917c00SJeff Kirsher 	*data++ = s->rx_fifo_ovfl;
1703f7917c00SJeff Kirsher 
1704f7917c00SJeff Kirsher 	*data++ = s->rx_frames_64;
1705f7917c00SJeff Kirsher 	*data++ = s->rx_frames_65_127;
1706f7917c00SJeff Kirsher 	*data++ = s->rx_frames_128_255;
1707f7917c00SJeff Kirsher 	*data++ = s->rx_frames_256_511;
1708f7917c00SJeff Kirsher 	*data++ = s->rx_frames_512_1023;
1709f7917c00SJeff Kirsher 	*data++ = s->rx_frames_1024_1518;
1710f7917c00SJeff Kirsher 	*data++ = s->rx_frames_1519_max;
1711f7917c00SJeff Kirsher 
1712f7917c00SJeff Kirsher 	*data++ = pi->phy.fifo_errors;
1713f7917c00SJeff Kirsher 
1714f7917c00SJeff Kirsher 	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1715f7917c00SJeff Kirsher 	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1716f7917c00SJeff Kirsher 	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1717f7917c00SJeff Kirsher 	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1718f7917c00SJeff Kirsher 	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1719f7917c00SJeff Kirsher 	*data++ = 0;
1720f7917c00SJeff Kirsher 	*data++ = 0;
1721f7917c00SJeff Kirsher 	*data++ = 0;
1722f7917c00SJeff Kirsher 	*data++ = s->rx_cong_drops;
1723f7917c00SJeff Kirsher 
1724f7917c00SJeff Kirsher 	*data++ = s->num_toggled;
1725f7917c00SJeff Kirsher 	*data++ = s->num_resets;
1726f7917c00SJeff Kirsher 
1727f7917c00SJeff Kirsher 	*data++ = s->link_faults;
1728f7917c00SJeff Kirsher }
1729f7917c00SJeff Kirsher 
1730f7917c00SJeff Kirsher static inline void reg_block_dump(struct adapter *ap, void *buf,
1731f7917c00SJeff Kirsher 				  unsigned int start, unsigned int end)
1732f7917c00SJeff Kirsher {
1733f7917c00SJeff Kirsher 	u32 *p = buf + start;
1734f7917c00SJeff Kirsher 
1735f7917c00SJeff Kirsher 	for (; start <= end; start += sizeof(u32))
1736f7917c00SJeff Kirsher 		*p++ = t3_read_reg(ap, start);
1737f7917c00SJeff Kirsher }
1738f7917c00SJeff Kirsher 
1739f7917c00SJeff Kirsher static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1740f7917c00SJeff Kirsher 		     void *buf)
1741f7917c00SJeff Kirsher {
1742f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1743f7917c00SJeff Kirsher 	struct adapter *ap = pi->adapter;
1744f7917c00SJeff Kirsher 
1745f7917c00SJeff Kirsher 	/*
1746f7917c00SJeff Kirsher 	 * Version scheme:
1747f7917c00SJeff Kirsher 	 * bits 0..9: chip version
1748f7917c00SJeff Kirsher 	 * bits 10..15: chip revision
1749f7917c00SJeff Kirsher 	 * bit 31: set for PCIe cards
1750f7917c00SJeff Kirsher 	 */
1751f7917c00SJeff Kirsher 	regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1752f7917c00SJeff Kirsher 
1753f7917c00SJeff Kirsher 	/*
1754f7917c00SJeff Kirsher 	 * We skip the MAC statistics registers because they are clear-on-read.
1755f7917c00SJeff Kirsher 	 * Also reading multi-register stats would need to synchronize with the
1756f7917c00SJeff Kirsher 	 * periodic mac stats accumulation.  Hard to justify the complexity.
1757f7917c00SJeff Kirsher 	 */
1758f7917c00SJeff Kirsher 	memset(buf, 0, T3_REGMAP_SIZE);
1759f7917c00SJeff Kirsher 	reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1760f7917c00SJeff Kirsher 	reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1761f7917c00SJeff Kirsher 	reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1762f7917c00SJeff Kirsher 	reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1763f7917c00SJeff Kirsher 	reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1764f7917c00SJeff Kirsher 	reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1765f7917c00SJeff Kirsher 		       XGM_REG(A_XGM_SERDES_STAT3, 1));
1766f7917c00SJeff Kirsher 	reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1767f7917c00SJeff Kirsher 		       XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1768f7917c00SJeff Kirsher }
1769f7917c00SJeff Kirsher 
1770f7917c00SJeff Kirsher static int restart_autoneg(struct net_device *dev)
1771f7917c00SJeff Kirsher {
1772f7917c00SJeff Kirsher 	struct port_info *p = netdev_priv(dev);
1773f7917c00SJeff Kirsher 
1774f7917c00SJeff Kirsher 	if (!netif_running(dev))
1775f7917c00SJeff Kirsher 		return -EAGAIN;
1776f7917c00SJeff Kirsher 	if (p->link_config.autoneg != AUTONEG_ENABLE)
1777f7917c00SJeff Kirsher 		return -EINVAL;
1778f7917c00SJeff Kirsher 	p->phy.ops->autoneg_restart(&p->phy);
1779f7917c00SJeff Kirsher 	return 0;
1780f7917c00SJeff Kirsher }
1781f7917c00SJeff Kirsher 
1782f7917c00SJeff Kirsher static int set_phys_id(struct net_device *dev,
1783f7917c00SJeff Kirsher 		       enum ethtool_phys_id_state state)
1784f7917c00SJeff Kirsher {
1785f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1786f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
1787f7917c00SJeff Kirsher 
1788f7917c00SJeff Kirsher 	switch (state) {
1789f7917c00SJeff Kirsher 	case ETHTOOL_ID_ACTIVE:
1790f7917c00SJeff Kirsher 		return 1;	/* cycle on/off once per second */
1791f7917c00SJeff Kirsher 
1792f7917c00SJeff Kirsher 	case ETHTOOL_ID_OFF:
1793f7917c00SJeff Kirsher 		t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
1794f7917c00SJeff Kirsher 		break;
1795f7917c00SJeff Kirsher 
1796f7917c00SJeff Kirsher 	case ETHTOOL_ID_ON:
1797f7917c00SJeff Kirsher 	case ETHTOOL_ID_INACTIVE:
1798f7917c00SJeff Kirsher 		t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1799f7917c00SJeff Kirsher 			 F_GPIO0_OUT_VAL);
1800f7917c00SJeff Kirsher 	}
1801f7917c00SJeff Kirsher 
1802f7917c00SJeff Kirsher 	return 0;
1803f7917c00SJeff Kirsher }
1804f7917c00SJeff Kirsher 
1805b7b44fd2SPhilippe Reynes static int get_link_ksettings(struct net_device *dev,
1806b7b44fd2SPhilippe Reynes 			      struct ethtool_link_ksettings *cmd)
1807f7917c00SJeff Kirsher {
1808f7917c00SJeff Kirsher 	struct port_info *p = netdev_priv(dev);
1809b7b44fd2SPhilippe Reynes 	u32 supported;
1810f7917c00SJeff Kirsher 
1811b7b44fd2SPhilippe Reynes 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1812b7b44fd2SPhilippe Reynes 						p->link_config.supported);
1813b7b44fd2SPhilippe Reynes 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1814b7b44fd2SPhilippe Reynes 						p->link_config.advertising);
1815f7917c00SJeff Kirsher 
1816f7917c00SJeff Kirsher 	if (netif_carrier_ok(dev)) {
1817b7b44fd2SPhilippe Reynes 		cmd->base.speed = p->link_config.speed;
1818b7b44fd2SPhilippe Reynes 		cmd->base.duplex = p->link_config.duplex;
1819f7917c00SJeff Kirsher 	} else {
1820b7b44fd2SPhilippe Reynes 		cmd->base.speed = SPEED_UNKNOWN;
1821b7b44fd2SPhilippe Reynes 		cmd->base.duplex = DUPLEX_UNKNOWN;
1822f7917c00SJeff Kirsher 	}
1823f7917c00SJeff Kirsher 
1824b7b44fd2SPhilippe Reynes 	ethtool_convert_link_mode_to_legacy_u32(&supported,
1825b7b44fd2SPhilippe Reynes 						cmd->link_modes.supported);
1826b7b44fd2SPhilippe Reynes 
1827b7b44fd2SPhilippe Reynes 	cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1828b7b44fd2SPhilippe Reynes 	cmd->base.phy_address = p->phy.mdio.prtad;
1829b7b44fd2SPhilippe Reynes 	cmd->base.autoneg = p->link_config.autoneg;
1830f7917c00SJeff Kirsher 	return 0;
1831f7917c00SJeff Kirsher }
1832f7917c00SJeff Kirsher 
1833f7917c00SJeff Kirsher static int speed_duplex_to_caps(int speed, int duplex)
1834f7917c00SJeff Kirsher {
1835f7917c00SJeff Kirsher 	int cap = 0;
1836f7917c00SJeff Kirsher 
1837f7917c00SJeff Kirsher 	switch (speed) {
1838f7917c00SJeff Kirsher 	case SPEED_10:
1839f7917c00SJeff Kirsher 		if (duplex == DUPLEX_FULL)
1840f7917c00SJeff Kirsher 			cap = SUPPORTED_10baseT_Full;
1841f7917c00SJeff Kirsher 		else
1842f7917c00SJeff Kirsher 			cap = SUPPORTED_10baseT_Half;
1843f7917c00SJeff Kirsher 		break;
1844f7917c00SJeff Kirsher 	case SPEED_100:
1845f7917c00SJeff Kirsher 		if (duplex == DUPLEX_FULL)
1846f7917c00SJeff Kirsher 			cap = SUPPORTED_100baseT_Full;
1847f7917c00SJeff Kirsher 		else
1848f7917c00SJeff Kirsher 			cap = SUPPORTED_100baseT_Half;
1849f7917c00SJeff Kirsher 		break;
1850f7917c00SJeff Kirsher 	case SPEED_1000:
1851f7917c00SJeff Kirsher 		if (duplex == DUPLEX_FULL)
1852f7917c00SJeff Kirsher 			cap = SUPPORTED_1000baseT_Full;
1853f7917c00SJeff Kirsher 		else
1854f7917c00SJeff Kirsher 			cap = SUPPORTED_1000baseT_Half;
1855f7917c00SJeff Kirsher 		break;
1856f7917c00SJeff Kirsher 	case SPEED_10000:
1857f7917c00SJeff Kirsher 		if (duplex == DUPLEX_FULL)
1858f7917c00SJeff Kirsher 			cap = SUPPORTED_10000baseT_Full;
1859f7917c00SJeff Kirsher 	}
1860f7917c00SJeff Kirsher 	return cap;
1861f7917c00SJeff Kirsher }
1862f7917c00SJeff Kirsher 
1863f7917c00SJeff Kirsher #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1864f7917c00SJeff Kirsher 		      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1865f7917c00SJeff Kirsher 		      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1866f7917c00SJeff Kirsher 		      ADVERTISED_10000baseT_Full)
1867f7917c00SJeff Kirsher 
1868b7b44fd2SPhilippe Reynes static int set_link_ksettings(struct net_device *dev,
1869b7b44fd2SPhilippe Reynes 			      const struct ethtool_link_ksettings *cmd)
1870f7917c00SJeff Kirsher {
1871f7917c00SJeff Kirsher 	struct port_info *p = netdev_priv(dev);
1872f7917c00SJeff Kirsher 	struct link_config *lc = &p->link_config;
1873b7b44fd2SPhilippe Reynes 	u32 advertising;
1874b7b44fd2SPhilippe Reynes 
1875b7b44fd2SPhilippe Reynes 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
1876b7b44fd2SPhilippe Reynes 						cmd->link_modes.advertising);
1877f7917c00SJeff Kirsher 
1878f7917c00SJeff Kirsher 	if (!(lc->supported & SUPPORTED_Autoneg)) {
1879f7917c00SJeff Kirsher 		/*
1880f7917c00SJeff Kirsher 		 * PHY offers a single speed/duplex.  See if that's what's
1881f7917c00SJeff Kirsher 		 * being requested.
1882f7917c00SJeff Kirsher 		 */
1883b7b44fd2SPhilippe Reynes 		if (cmd->base.autoneg == AUTONEG_DISABLE) {
1884b7b44fd2SPhilippe Reynes 			u32 speed = cmd->base.speed;
1885b7b44fd2SPhilippe Reynes 			int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1886f7917c00SJeff Kirsher 			if (lc->supported & cap)
1887f7917c00SJeff Kirsher 				return 0;
1888f7917c00SJeff Kirsher 		}
1889f7917c00SJeff Kirsher 		return -EINVAL;
1890f7917c00SJeff Kirsher 	}
1891f7917c00SJeff Kirsher 
1892b7b44fd2SPhilippe Reynes 	if (cmd->base.autoneg == AUTONEG_DISABLE) {
1893b7b44fd2SPhilippe Reynes 		u32 speed = cmd->base.speed;
1894b7b44fd2SPhilippe Reynes 		int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1895f7917c00SJeff Kirsher 
1896f7917c00SJeff Kirsher 		if (!(lc->supported & cap) || (speed == SPEED_1000))
1897f7917c00SJeff Kirsher 			return -EINVAL;
1898f7917c00SJeff Kirsher 		lc->requested_speed = speed;
1899b7b44fd2SPhilippe Reynes 		lc->requested_duplex = cmd->base.duplex;
1900f7917c00SJeff Kirsher 		lc->advertising = 0;
1901f7917c00SJeff Kirsher 	} else {
1902b7b44fd2SPhilippe Reynes 		advertising &= ADVERTISED_MASK;
1903b7b44fd2SPhilippe Reynes 		advertising &= lc->supported;
1904b7b44fd2SPhilippe Reynes 		if (!advertising)
1905f7917c00SJeff Kirsher 			return -EINVAL;
1906f7917c00SJeff Kirsher 		lc->requested_speed = SPEED_INVALID;
1907f7917c00SJeff Kirsher 		lc->requested_duplex = DUPLEX_INVALID;
1908b7b44fd2SPhilippe Reynes 		lc->advertising = advertising | ADVERTISED_Autoneg;
1909f7917c00SJeff Kirsher 	}
1910b7b44fd2SPhilippe Reynes 	lc->autoneg = cmd->base.autoneg;
1911f7917c00SJeff Kirsher 	if (netif_running(dev))
1912f7917c00SJeff Kirsher 		t3_link_start(&p->phy, &p->mac, lc);
1913f7917c00SJeff Kirsher 	return 0;
1914f7917c00SJeff Kirsher }
1915f7917c00SJeff Kirsher 
1916f7917c00SJeff Kirsher static void get_pauseparam(struct net_device *dev,
1917f7917c00SJeff Kirsher 			   struct ethtool_pauseparam *epause)
1918f7917c00SJeff Kirsher {
1919f7917c00SJeff Kirsher 	struct port_info *p = netdev_priv(dev);
1920f7917c00SJeff Kirsher 
1921f7917c00SJeff Kirsher 	epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1922f7917c00SJeff Kirsher 	epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1923f7917c00SJeff Kirsher 	epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1924f7917c00SJeff Kirsher }
1925f7917c00SJeff Kirsher 
1926f7917c00SJeff Kirsher static int set_pauseparam(struct net_device *dev,
1927f7917c00SJeff Kirsher 			  struct ethtool_pauseparam *epause)
1928f7917c00SJeff Kirsher {
1929f7917c00SJeff Kirsher 	struct port_info *p = netdev_priv(dev);
1930f7917c00SJeff Kirsher 	struct link_config *lc = &p->link_config;
1931f7917c00SJeff Kirsher 
1932f7917c00SJeff Kirsher 	if (epause->autoneg == AUTONEG_DISABLE)
1933f7917c00SJeff Kirsher 		lc->requested_fc = 0;
1934f7917c00SJeff Kirsher 	else if (lc->supported & SUPPORTED_Autoneg)
1935f7917c00SJeff Kirsher 		lc->requested_fc = PAUSE_AUTONEG;
1936f7917c00SJeff Kirsher 	else
1937f7917c00SJeff Kirsher 		return -EINVAL;
1938f7917c00SJeff Kirsher 
1939f7917c00SJeff Kirsher 	if (epause->rx_pause)
1940f7917c00SJeff Kirsher 		lc->requested_fc |= PAUSE_RX;
1941f7917c00SJeff Kirsher 	if (epause->tx_pause)
1942f7917c00SJeff Kirsher 		lc->requested_fc |= PAUSE_TX;
1943f7917c00SJeff Kirsher 	if (lc->autoneg == AUTONEG_ENABLE) {
1944f7917c00SJeff Kirsher 		if (netif_running(dev))
1945f7917c00SJeff Kirsher 			t3_link_start(&p->phy, &p->mac, lc);
1946f7917c00SJeff Kirsher 	} else {
1947f7917c00SJeff Kirsher 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1948f7917c00SJeff Kirsher 		if (netif_running(dev))
1949f7917c00SJeff Kirsher 			t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1950f7917c00SJeff Kirsher 	}
1951f7917c00SJeff Kirsher 	return 0;
1952f7917c00SJeff Kirsher }
1953f7917c00SJeff Kirsher 
1954f7917c00SJeff Kirsher static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1955f7917c00SJeff Kirsher {
1956f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1957f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
1958f7917c00SJeff Kirsher 	const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1959f7917c00SJeff Kirsher 
1960f7917c00SJeff Kirsher 	e->rx_max_pending = MAX_RX_BUFFERS;
1961f7917c00SJeff Kirsher 	e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1962f7917c00SJeff Kirsher 	e->tx_max_pending = MAX_TXQ_ENTRIES;
1963f7917c00SJeff Kirsher 
1964f7917c00SJeff Kirsher 	e->rx_pending = q->fl_size;
1965f7917c00SJeff Kirsher 	e->rx_mini_pending = q->rspq_size;
1966f7917c00SJeff Kirsher 	e->rx_jumbo_pending = q->jumbo_size;
1967f7917c00SJeff Kirsher 	e->tx_pending = q->txq_size[0];
1968f7917c00SJeff Kirsher }
1969f7917c00SJeff Kirsher 
1970f7917c00SJeff Kirsher static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1971f7917c00SJeff Kirsher {
1972f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
1973f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
1974f7917c00SJeff Kirsher 	struct qset_params *q;
1975f7917c00SJeff Kirsher 	int i;
1976f7917c00SJeff Kirsher 
1977f7917c00SJeff Kirsher 	if (e->rx_pending > MAX_RX_BUFFERS ||
1978f7917c00SJeff Kirsher 	    e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1979f7917c00SJeff Kirsher 	    e->tx_pending > MAX_TXQ_ENTRIES ||
1980f7917c00SJeff Kirsher 	    e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1981f7917c00SJeff Kirsher 	    e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1982f7917c00SJeff Kirsher 	    e->rx_pending < MIN_FL_ENTRIES ||
1983f7917c00SJeff Kirsher 	    e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1984f7917c00SJeff Kirsher 	    e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1985f7917c00SJeff Kirsher 		return -EINVAL;
1986f7917c00SJeff Kirsher 
1987f7917c00SJeff Kirsher 	if (adapter->flags & FULL_INIT_DONE)
1988f7917c00SJeff Kirsher 		return -EBUSY;
1989f7917c00SJeff Kirsher 
1990f7917c00SJeff Kirsher 	q = &adapter->params.sge.qset[pi->first_qset];
1991f7917c00SJeff Kirsher 	for (i = 0; i < pi->nqsets; ++i, ++q) {
1992f7917c00SJeff Kirsher 		q->rspq_size = e->rx_mini_pending;
1993f7917c00SJeff Kirsher 		q->fl_size = e->rx_pending;
1994f7917c00SJeff Kirsher 		q->jumbo_size = e->rx_jumbo_pending;
1995f7917c00SJeff Kirsher 		q->txq_size[0] = e->tx_pending;
1996f7917c00SJeff Kirsher 		q->txq_size[1] = e->tx_pending;
1997f7917c00SJeff Kirsher 		q->txq_size[2] = e->tx_pending;
1998f7917c00SJeff Kirsher 	}
1999f7917c00SJeff Kirsher 	return 0;
2000f7917c00SJeff Kirsher }
2001f7917c00SJeff Kirsher 
2002f7917c00SJeff Kirsher static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2003f7917c00SJeff Kirsher {
2004f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
2005f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
2006f7917c00SJeff Kirsher 	struct qset_params *qsp;
2007f7917c00SJeff Kirsher 	struct sge_qset *qs;
2008f7917c00SJeff Kirsher 	int i;
2009f7917c00SJeff Kirsher 
2010f7917c00SJeff Kirsher 	if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
2011f7917c00SJeff Kirsher 		return -EINVAL;
2012f7917c00SJeff Kirsher 
2013f7917c00SJeff Kirsher 	for (i = 0; i < pi->nqsets; i++) {
2014f7917c00SJeff Kirsher 		qsp = &adapter->params.sge.qset[i];
2015f7917c00SJeff Kirsher 		qs = &adapter->sge.qs[i];
2016f7917c00SJeff Kirsher 		qsp->coalesce_usecs = c->rx_coalesce_usecs;
2017f7917c00SJeff Kirsher 		t3_update_qset_coalesce(qs, qsp);
2018f7917c00SJeff Kirsher 	}
2019f7917c00SJeff Kirsher 
2020f7917c00SJeff Kirsher 	return 0;
2021f7917c00SJeff Kirsher }
2022f7917c00SJeff Kirsher 
2023f7917c00SJeff Kirsher static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2024f7917c00SJeff Kirsher {
2025f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
2026f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
2027f7917c00SJeff Kirsher 	struct qset_params *q = adapter->params.sge.qset;
2028f7917c00SJeff Kirsher 
2029f7917c00SJeff Kirsher 	c->rx_coalesce_usecs = q->coalesce_usecs;
2030f7917c00SJeff Kirsher 	return 0;
2031f7917c00SJeff Kirsher }
2032f7917c00SJeff Kirsher 
2033f7917c00SJeff Kirsher static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2034f7917c00SJeff Kirsher 		      u8 * data)
2035f7917c00SJeff Kirsher {
2036f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
2037f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
2038f7917c00SJeff Kirsher 	int i, err = 0;
2039f7917c00SJeff Kirsher 
2040f7917c00SJeff Kirsher 	u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2041f7917c00SJeff Kirsher 	if (!buf)
2042f7917c00SJeff Kirsher 		return -ENOMEM;
2043f7917c00SJeff Kirsher 
2044f7917c00SJeff Kirsher 	e->magic = EEPROM_MAGIC;
2045f7917c00SJeff Kirsher 	for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2046f7917c00SJeff Kirsher 		err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
2047f7917c00SJeff Kirsher 
2048f7917c00SJeff Kirsher 	if (!err)
2049f7917c00SJeff Kirsher 		memcpy(data, buf + e->offset, e->len);
2050f7917c00SJeff Kirsher 	kfree(buf);
2051f7917c00SJeff Kirsher 	return err;
2052f7917c00SJeff Kirsher }
2053f7917c00SJeff Kirsher 
2054f7917c00SJeff Kirsher static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2055f7917c00SJeff Kirsher 		      u8 * data)
2056f7917c00SJeff Kirsher {
2057f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
2058f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
2059f7917c00SJeff Kirsher 	u32 aligned_offset, aligned_len;
2060f7917c00SJeff Kirsher 	__le32 *p;
2061f7917c00SJeff Kirsher 	u8 *buf;
2062f7917c00SJeff Kirsher 	int err;
2063f7917c00SJeff Kirsher 
2064f7917c00SJeff Kirsher 	if (eeprom->magic != EEPROM_MAGIC)
2065f7917c00SJeff Kirsher 		return -EINVAL;
2066f7917c00SJeff Kirsher 
2067f7917c00SJeff Kirsher 	aligned_offset = eeprom->offset & ~3;
2068f7917c00SJeff Kirsher 	aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2069f7917c00SJeff Kirsher 
2070f7917c00SJeff Kirsher 	if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2071f7917c00SJeff Kirsher 		buf = kmalloc(aligned_len, GFP_KERNEL);
2072f7917c00SJeff Kirsher 		if (!buf)
2073f7917c00SJeff Kirsher 			return -ENOMEM;
2074f7917c00SJeff Kirsher 		err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2075f7917c00SJeff Kirsher 		if (!err && aligned_len > 4)
2076f7917c00SJeff Kirsher 			err = t3_seeprom_read(adapter,
2077f7917c00SJeff Kirsher 					      aligned_offset + aligned_len - 4,
2078f7917c00SJeff Kirsher 					      (__le32 *) & buf[aligned_len - 4]);
2079f7917c00SJeff Kirsher 		if (err)
2080f7917c00SJeff Kirsher 			goto out;
2081f7917c00SJeff Kirsher 		memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2082f7917c00SJeff Kirsher 	} else
2083f7917c00SJeff Kirsher 		buf = data;
2084f7917c00SJeff Kirsher 
2085f7917c00SJeff Kirsher 	err = t3_seeprom_wp(adapter, 0);
2086f7917c00SJeff Kirsher 	if (err)
2087f7917c00SJeff Kirsher 		goto out;
2088f7917c00SJeff Kirsher 
2089f7917c00SJeff Kirsher 	for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2090f7917c00SJeff Kirsher 		err = t3_seeprom_write(adapter, aligned_offset, *p);
2091f7917c00SJeff Kirsher 		aligned_offset += 4;
2092f7917c00SJeff Kirsher 	}
2093f7917c00SJeff Kirsher 
2094f7917c00SJeff Kirsher 	if (!err)
2095f7917c00SJeff Kirsher 		err = t3_seeprom_wp(adapter, 1);
2096f7917c00SJeff Kirsher out:
2097f7917c00SJeff Kirsher 	if (buf != data)
2098f7917c00SJeff Kirsher 		kfree(buf);
2099f7917c00SJeff Kirsher 	return err;
2100f7917c00SJeff Kirsher }
2101f7917c00SJeff Kirsher 
2102f7917c00SJeff Kirsher static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2103f7917c00SJeff Kirsher {
2104f7917c00SJeff Kirsher 	wol->supported = 0;
2105f7917c00SJeff Kirsher 	wol->wolopts = 0;
2106f7917c00SJeff Kirsher 	memset(&wol->sopass, 0, sizeof(wol->sopass));
2107f7917c00SJeff Kirsher }
2108f7917c00SJeff Kirsher 
2109f7917c00SJeff Kirsher static const struct ethtool_ops cxgb_ethtool_ops = {
2110f7917c00SJeff Kirsher 	.get_drvinfo = get_drvinfo,
2111f7917c00SJeff Kirsher 	.get_msglevel = get_msglevel,
2112f7917c00SJeff Kirsher 	.set_msglevel = set_msglevel,
2113f7917c00SJeff Kirsher 	.get_ringparam = get_sge_param,
2114f7917c00SJeff Kirsher 	.set_ringparam = set_sge_param,
2115f7917c00SJeff Kirsher 	.get_coalesce = get_coalesce,
2116f7917c00SJeff Kirsher 	.set_coalesce = set_coalesce,
2117f7917c00SJeff Kirsher 	.get_eeprom_len = get_eeprom_len,
2118f7917c00SJeff Kirsher 	.get_eeprom = get_eeprom,
2119f7917c00SJeff Kirsher 	.set_eeprom = set_eeprom,
2120f7917c00SJeff Kirsher 	.get_pauseparam = get_pauseparam,
2121f7917c00SJeff Kirsher 	.set_pauseparam = set_pauseparam,
2122f7917c00SJeff Kirsher 	.get_link = ethtool_op_get_link,
2123f7917c00SJeff Kirsher 	.get_strings = get_strings,
2124f7917c00SJeff Kirsher 	.set_phys_id = set_phys_id,
2125f7917c00SJeff Kirsher 	.nway_reset = restart_autoneg,
2126f7917c00SJeff Kirsher 	.get_sset_count = get_sset_count,
2127f7917c00SJeff Kirsher 	.get_ethtool_stats = get_stats,
2128f7917c00SJeff Kirsher 	.get_regs_len = get_regs_len,
2129f7917c00SJeff Kirsher 	.get_regs = get_regs,
2130f7917c00SJeff Kirsher 	.get_wol = get_wol,
2131b7b44fd2SPhilippe Reynes 	.get_link_ksettings = get_link_ksettings,
2132b7b44fd2SPhilippe Reynes 	.set_link_ksettings = set_link_ksettings,
2133f7917c00SJeff Kirsher };
2134f7917c00SJeff Kirsher 
2135f7917c00SJeff Kirsher static int in_range(int val, int lo, int hi)
2136f7917c00SJeff Kirsher {
2137f7917c00SJeff Kirsher 	return val < 0 || (val <= hi && val >= lo);
2138f7917c00SJeff Kirsher }
2139f7917c00SJeff Kirsher 
2140f7917c00SJeff Kirsher static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2141f7917c00SJeff Kirsher {
2142f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
2143f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
2144f7917c00SJeff Kirsher 	u32 cmd;
2145f7917c00SJeff Kirsher 	int ret;
2146f7917c00SJeff Kirsher 
2147f7917c00SJeff Kirsher 	if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2148f7917c00SJeff Kirsher 		return -EFAULT;
2149f7917c00SJeff Kirsher 
2150f7917c00SJeff Kirsher 	switch (cmd) {
2151f7917c00SJeff Kirsher 	case CHELSIO_SET_QSET_PARAMS:{
2152f7917c00SJeff Kirsher 		int i;
2153f7917c00SJeff Kirsher 		struct qset_params *q;
2154f7917c00SJeff Kirsher 		struct ch_qset_params t;
2155f7917c00SJeff Kirsher 		int q1 = pi->first_qset;
2156f7917c00SJeff Kirsher 		int nqsets = pi->nqsets;
2157f7917c00SJeff Kirsher 
2158f7917c00SJeff Kirsher 		if (!capable(CAP_NET_ADMIN))
2159f7917c00SJeff Kirsher 			return -EPERM;
2160f7917c00SJeff Kirsher 		if (copy_from_user(&t, useraddr, sizeof(t)))
2161f7917c00SJeff Kirsher 			return -EFAULT;
21622c05d888SWenwen Wang 		if (t.cmd != CHELSIO_SET_QSET_PARAMS)
21632c05d888SWenwen Wang 			return -EINVAL;
2164f7917c00SJeff Kirsher 		if (t.qset_idx >= SGE_QSETS)
2165f7917c00SJeff Kirsher 			return -EINVAL;
2166f7917c00SJeff Kirsher 		if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2167f7917c00SJeff Kirsher 		    !in_range(t.cong_thres, 0, 255) ||
2168f7917c00SJeff Kirsher 		    !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2169f7917c00SJeff Kirsher 			      MAX_TXQ_ENTRIES) ||
2170f7917c00SJeff Kirsher 		    !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2171f7917c00SJeff Kirsher 			      MAX_TXQ_ENTRIES) ||
2172f7917c00SJeff Kirsher 		    !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2173f7917c00SJeff Kirsher 			      MAX_CTRL_TXQ_ENTRIES) ||
2174f7917c00SJeff Kirsher 		    !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2175f7917c00SJeff Kirsher 			      MAX_RX_BUFFERS) ||
2176f7917c00SJeff Kirsher 		    !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2177f7917c00SJeff Kirsher 			      MAX_RX_JUMBO_BUFFERS) ||
2178f7917c00SJeff Kirsher 		    !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2179f7917c00SJeff Kirsher 			      MAX_RSPQ_ENTRIES))
2180f7917c00SJeff Kirsher 			return -EINVAL;
2181f7917c00SJeff Kirsher 
2182f7917c00SJeff Kirsher 		if ((adapter->flags & FULL_INIT_DONE) &&
2183f7917c00SJeff Kirsher 			(t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2184f7917c00SJeff Kirsher 			t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2185f7917c00SJeff Kirsher 			t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2186f7917c00SJeff Kirsher 			t.polling >= 0 || t.cong_thres >= 0))
2187f7917c00SJeff Kirsher 			return -EBUSY;
2188f7917c00SJeff Kirsher 
2189f7917c00SJeff Kirsher 		/* Allow setting of any available qset when offload enabled */
2190f7917c00SJeff Kirsher 		if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2191f7917c00SJeff Kirsher 			q1 = 0;
2192f7917c00SJeff Kirsher 			for_each_port(adapter, i) {
2193f7917c00SJeff Kirsher 				pi = adap2pinfo(adapter, i);
2194f7917c00SJeff Kirsher 				nqsets += pi->first_qset + pi->nqsets;
2195f7917c00SJeff Kirsher 			}
2196f7917c00SJeff Kirsher 		}
2197f7917c00SJeff Kirsher 
2198f7917c00SJeff Kirsher 		if (t.qset_idx < q1)
2199f7917c00SJeff Kirsher 			return -EINVAL;
2200f7917c00SJeff Kirsher 		if (t.qset_idx > q1 + nqsets - 1)
2201f7917c00SJeff Kirsher 			return -EINVAL;
2202f7917c00SJeff Kirsher 
2203f7917c00SJeff Kirsher 		q = &adapter->params.sge.qset[t.qset_idx];
2204f7917c00SJeff Kirsher 
2205f7917c00SJeff Kirsher 		if (t.rspq_size >= 0)
2206f7917c00SJeff Kirsher 			q->rspq_size = t.rspq_size;
2207f7917c00SJeff Kirsher 		if (t.fl_size[0] >= 0)
2208f7917c00SJeff Kirsher 			q->fl_size = t.fl_size[0];
2209f7917c00SJeff Kirsher 		if (t.fl_size[1] >= 0)
2210f7917c00SJeff Kirsher 			q->jumbo_size = t.fl_size[1];
2211f7917c00SJeff Kirsher 		if (t.txq_size[0] >= 0)
2212f7917c00SJeff Kirsher 			q->txq_size[0] = t.txq_size[0];
2213f7917c00SJeff Kirsher 		if (t.txq_size[1] >= 0)
2214f7917c00SJeff Kirsher 			q->txq_size[1] = t.txq_size[1];
2215f7917c00SJeff Kirsher 		if (t.txq_size[2] >= 0)
2216f7917c00SJeff Kirsher 			q->txq_size[2] = t.txq_size[2];
2217f7917c00SJeff Kirsher 		if (t.cong_thres >= 0)
2218f7917c00SJeff Kirsher 			q->cong_thres = t.cong_thres;
2219f7917c00SJeff Kirsher 		if (t.intr_lat >= 0) {
2220f7917c00SJeff Kirsher 			struct sge_qset *qs =
2221f7917c00SJeff Kirsher 				&adapter->sge.qs[t.qset_idx];
2222f7917c00SJeff Kirsher 
2223f7917c00SJeff Kirsher 			q->coalesce_usecs = t.intr_lat;
2224f7917c00SJeff Kirsher 			t3_update_qset_coalesce(qs, q);
2225f7917c00SJeff Kirsher 		}
2226f7917c00SJeff Kirsher 		if (t.polling >= 0) {
2227f7917c00SJeff Kirsher 			if (adapter->flags & USING_MSIX)
2228f7917c00SJeff Kirsher 				q->polling = t.polling;
2229f7917c00SJeff Kirsher 			else {
2230f7917c00SJeff Kirsher 				/* No polling with INTx for T3A */
2231f7917c00SJeff Kirsher 				if (adapter->params.rev == 0 &&
2232f7917c00SJeff Kirsher 					!(adapter->flags & USING_MSI))
2233f7917c00SJeff Kirsher 					t.polling = 0;
2234f7917c00SJeff Kirsher 
2235f7917c00SJeff Kirsher 				for (i = 0; i < SGE_QSETS; i++) {
2236f7917c00SJeff Kirsher 					q = &adapter->params.sge.
2237f7917c00SJeff Kirsher 						qset[i];
2238f7917c00SJeff Kirsher 					q->polling = t.polling;
2239f7917c00SJeff Kirsher 				}
2240f7917c00SJeff Kirsher 			}
2241f7917c00SJeff Kirsher 		}
2242f7917c00SJeff Kirsher 
2243f7917c00SJeff Kirsher 		if (t.lro >= 0) {
2244f7917c00SJeff Kirsher 			if (t.lro)
2245f7917c00SJeff Kirsher 				dev->wanted_features |= NETIF_F_GRO;
2246f7917c00SJeff Kirsher 			else
2247f7917c00SJeff Kirsher 				dev->wanted_features &= ~NETIF_F_GRO;
2248f7917c00SJeff Kirsher 			netdev_update_features(dev);
2249f7917c00SJeff Kirsher 		}
2250f7917c00SJeff Kirsher 
2251f7917c00SJeff Kirsher 		break;
2252f7917c00SJeff Kirsher 	}
2253f7917c00SJeff Kirsher 	case CHELSIO_GET_QSET_PARAMS:{
2254f7917c00SJeff Kirsher 		struct qset_params *q;
2255f7917c00SJeff Kirsher 		struct ch_qset_params t;
2256f7917c00SJeff Kirsher 		int q1 = pi->first_qset;
2257f7917c00SJeff Kirsher 		int nqsets = pi->nqsets;
2258f7917c00SJeff Kirsher 		int i;
2259f7917c00SJeff Kirsher 
2260f7917c00SJeff Kirsher 		if (copy_from_user(&t, useraddr, sizeof(t)))
2261f7917c00SJeff Kirsher 			return -EFAULT;
2262f7917c00SJeff Kirsher 
22632c05d888SWenwen Wang 		if (t.cmd != CHELSIO_GET_QSET_PARAMS)
22642c05d888SWenwen Wang 			return -EINVAL;
22652c05d888SWenwen Wang 
2266f7917c00SJeff Kirsher 		/* Display qsets for all ports when offload enabled */
2267f7917c00SJeff Kirsher 		if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2268f7917c00SJeff Kirsher 			q1 = 0;
2269f7917c00SJeff Kirsher 			for_each_port(adapter, i) {
2270f7917c00SJeff Kirsher 				pi = adap2pinfo(adapter, i);
2271f7917c00SJeff Kirsher 				nqsets = pi->first_qset + pi->nqsets;
2272f7917c00SJeff Kirsher 			}
2273f7917c00SJeff Kirsher 		}
2274f7917c00SJeff Kirsher 
2275f7917c00SJeff Kirsher 		if (t.qset_idx >= nqsets)
2276f7917c00SJeff Kirsher 			return -EINVAL;
2277676bcfecSGustavo A. R. Silva 		t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
2278f7917c00SJeff Kirsher 
2279f7917c00SJeff Kirsher 		q = &adapter->params.sge.qset[q1 + t.qset_idx];
2280f7917c00SJeff Kirsher 		t.rspq_size = q->rspq_size;
2281f7917c00SJeff Kirsher 		t.txq_size[0] = q->txq_size[0];
2282f7917c00SJeff Kirsher 		t.txq_size[1] = q->txq_size[1];
2283f7917c00SJeff Kirsher 		t.txq_size[2] = q->txq_size[2];
2284f7917c00SJeff Kirsher 		t.fl_size[0] = q->fl_size;
2285f7917c00SJeff Kirsher 		t.fl_size[1] = q->jumbo_size;
2286f7917c00SJeff Kirsher 		t.polling = q->polling;
2287f7917c00SJeff Kirsher 		t.lro = !!(dev->features & NETIF_F_GRO);
2288f7917c00SJeff Kirsher 		t.intr_lat = q->coalesce_usecs;
2289f7917c00SJeff Kirsher 		t.cong_thres = q->cong_thres;
2290f7917c00SJeff Kirsher 		t.qnum = q1;
2291f7917c00SJeff Kirsher 
2292f7917c00SJeff Kirsher 		if (adapter->flags & USING_MSIX)
2293f7917c00SJeff Kirsher 			t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2294f7917c00SJeff Kirsher 		else
2295f7917c00SJeff Kirsher 			t.vector = adapter->pdev->irq;
2296f7917c00SJeff Kirsher 
2297f7917c00SJeff Kirsher 		if (copy_to_user(useraddr, &t, sizeof(t)))
2298f7917c00SJeff Kirsher 			return -EFAULT;
2299f7917c00SJeff Kirsher 		break;
2300f7917c00SJeff Kirsher 	}
2301f7917c00SJeff Kirsher 	case CHELSIO_SET_QSET_NUM:{
2302f7917c00SJeff Kirsher 		struct ch_reg edata;
2303f7917c00SJeff Kirsher 		unsigned int i, first_qset = 0, other_qsets = 0;
2304f7917c00SJeff Kirsher 
2305f7917c00SJeff Kirsher 		if (!capable(CAP_NET_ADMIN))
2306f7917c00SJeff Kirsher 			return -EPERM;
2307f7917c00SJeff Kirsher 		if (adapter->flags & FULL_INIT_DONE)
2308f7917c00SJeff Kirsher 			return -EBUSY;
2309f7917c00SJeff Kirsher 		if (copy_from_user(&edata, useraddr, sizeof(edata)))
2310f7917c00SJeff Kirsher 			return -EFAULT;
23112c05d888SWenwen Wang 		if (edata.cmd != CHELSIO_SET_QSET_NUM)
23122c05d888SWenwen Wang 			return -EINVAL;
2313f7917c00SJeff Kirsher 		if (edata.val < 1 ||
2314f7917c00SJeff Kirsher 			(edata.val > 1 && !(adapter->flags & USING_MSIX)))
2315f7917c00SJeff Kirsher 			return -EINVAL;
2316f7917c00SJeff Kirsher 
2317f7917c00SJeff Kirsher 		for_each_port(adapter, i)
2318f7917c00SJeff Kirsher 			if (adapter->port[i] && adapter->port[i] != dev)
2319f7917c00SJeff Kirsher 				other_qsets += adap2pinfo(adapter, i)->nqsets;
2320f7917c00SJeff Kirsher 
2321f7917c00SJeff Kirsher 		if (edata.val + other_qsets > SGE_QSETS)
2322f7917c00SJeff Kirsher 			return -EINVAL;
2323f7917c00SJeff Kirsher 
2324f7917c00SJeff Kirsher 		pi->nqsets = edata.val;
2325f7917c00SJeff Kirsher 
2326f7917c00SJeff Kirsher 		for_each_port(adapter, i)
2327f7917c00SJeff Kirsher 			if (adapter->port[i]) {
2328f7917c00SJeff Kirsher 				pi = adap2pinfo(adapter, i);
2329f7917c00SJeff Kirsher 				pi->first_qset = first_qset;
2330f7917c00SJeff Kirsher 				first_qset += pi->nqsets;
2331f7917c00SJeff Kirsher 			}
2332f7917c00SJeff Kirsher 		break;
2333f7917c00SJeff Kirsher 	}
2334f7917c00SJeff Kirsher 	case CHELSIO_GET_QSET_NUM:{
2335f7917c00SJeff Kirsher 		struct ch_reg edata;
2336f7917c00SJeff Kirsher 
2337f7917c00SJeff Kirsher 		memset(&edata, 0, sizeof(struct ch_reg));
2338f7917c00SJeff Kirsher 
2339f7917c00SJeff Kirsher 		edata.cmd = CHELSIO_GET_QSET_NUM;
2340f7917c00SJeff Kirsher 		edata.val = pi->nqsets;
2341f7917c00SJeff Kirsher 		if (copy_to_user(useraddr, &edata, sizeof(edata)))
2342f7917c00SJeff Kirsher 			return -EFAULT;
2343f7917c00SJeff Kirsher 		break;
2344f7917c00SJeff Kirsher 	}
2345f7917c00SJeff Kirsher 	case CHELSIO_LOAD_FW:{
2346f7917c00SJeff Kirsher 		u8 *fw_data;
2347f7917c00SJeff Kirsher 		struct ch_mem_range t;
2348f7917c00SJeff Kirsher 
2349f7917c00SJeff Kirsher 		if (!capable(CAP_SYS_RAWIO))
2350f7917c00SJeff Kirsher 			return -EPERM;
2351f7917c00SJeff Kirsher 		if (copy_from_user(&t, useraddr, sizeof(t)))
2352f7917c00SJeff Kirsher 			return -EFAULT;
23532c05d888SWenwen Wang 		if (t.cmd != CHELSIO_LOAD_FW)
23542c05d888SWenwen Wang 			return -EINVAL;
2355f7917c00SJeff Kirsher 		/* Check t.len sanity ? */
2356f7917c00SJeff Kirsher 		fw_data = memdup_user(useraddr + sizeof(t), t.len);
2357f7917c00SJeff Kirsher 		if (IS_ERR(fw_data))
2358f7917c00SJeff Kirsher 			return PTR_ERR(fw_data);
2359f7917c00SJeff Kirsher 
2360f7917c00SJeff Kirsher 		ret = t3_load_fw(adapter, fw_data, t.len);
2361f7917c00SJeff Kirsher 		kfree(fw_data);
2362f7917c00SJeff Kirsher 		if (ret)
2363f7917c00SJeff Kirsher 			return ret;
2364f7917c00SJeff Kirsher 		break;
2365f7917c00SJeff Kirsher 	}
2366f7917c00SJeff Kirsher 	case CHELSIO_SETMTUTAB:{
2367f7917c00SJeff Kirsher 		struct ch_mtus m;
2368f7917c00SJeff Kirsher 		int i;
2369f7917c00SJeff Kirsher 
2370f7917c00SJeff Kirsher 		if (!is_offload(adapter))
2371f7917c00SJeff Kirsher 			return -EOPNOTSUPP;
2372f7917c00SJeff Kirsher 		if (!capable(CAP_NET_ADMIN))
2373f7917c00SJeff Kirsher 			return -EPERM;
2374f7917c00SJeff Kirsher 		if (offload_running(adapter))
2375f7917c00SJeff Kirsher 			return -EBUSY;
2376f7917c00SJeff Kirsher 		if (copy_from_user(&m, useraddr, sizeof(m)))
2377f7917c00SJeff Kirsher 			return -EFAULT;
23782c05d888SWenwen Wang 		if (m.cmd != CHELSIO_SETMTUTAB)
23792c05d888SWenwen Wang 			return -EINVAL;
2380f7917c00SJeff Kirsher 		if (m.nmtus != NMTUS)
2381f7917c00SJeff Kirsher 			return -EINVAL;
2382f7917c00SJeff Kirsher 		if (m.mtus[0] < 81)	/* accommodate SACK */
2383f7917c00SJeff Kirsher 			return -EINVAL;
2384f7917c00SJeff Kirsher 
2385f7917c00SJeff Kirsher 		/* MTUs must be in ascending order */
2386f7917c00SJeff Kirsher 		for (i = 1; i < NMTUS; ++i)
2387f7917c00SJeff Kirsher 			if (m.mtus[i] < m.mtus[i - 1])
2388f7917c00SJeff Kirsher 				return -EINVAL;
2389f7917c00SJeff Kirsher 
2390f7917c00SJeff Kirsher 		memcpy(adapter->params.mtus, m.mtus,
2391f7917c00SJeff Kirsher 			sizeof(adapter->params.mtus));
2392f7917c00SJeff Kirsher 		break;
2393f7917c00SJeff Kirsher 	}
2394f7917c00SJeff Kirsher 	case CHELSIO_GET_PM:{
2395f7917c00SJeff Kirsher 		struct tp_params *p = &adapter->params.tp;
2396f7917c00SJeff Kirsher 		struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2397f7917c00SJeff Kirsher 
2398f7917c00SJeff Kirsher 		if (!is_offload(adapter))
2399f7917c00SJeff Kirsher 			return -EOPNOTSUPP;
2400f7917c00SJeff Kirsher 		m.tx_pg_sz = p->tx_pg_size;
2401f7917c00SJeff Kirsher 		m.tx_num_pg = p->tx_num_pgs;
2402f7917c00SJeff Kirsher 		m.rx_pg_sz = p->rx_pg_size;
2403f7917c00SJeff Kirsher 		m.rx_num_pg = p->rx_num_pgs;
2404f7917c00SJeff Kirsher 		m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2405f7917c00SJeff Kirsher 		if (copy_to_user(useraddr, &m, sizeof(m)))
2406f7917c00SJeff Kirsher 			return -EFAULT;
2407f7917c00SJeff Kirsher 		break;
2408f7917c00SJeff Kirsher 	}
2409f7917c00SJeff Kirsher 	case CHELSIO_SET_PM:{
2410f7917c00SJeff Kirsher 		struct ch_pm m;
2411f7917c00SJeff Kirsher 		struct tp_params *p = &adapter->params.tp;
2412f7917c00SJeff Kirsher 
2413f7917c00SJeff Kirsher 		if (!is_offload(adapter))
2414f7917c00SJeff Kirsher 			return -EOPNOTSUPP;
2415f7917c00SJeff Kirsher 		if (!capable(CAP_NET_ADMIN))
2416f7917c00SJeff Kirsher 			return -EPERM;
2417f7917c00SJeff Kirsher 		if (adapter->flags & FULL_INIT_DONE)
2418f7917c00SJeff Kirsher 			return -EBUSY;
2419f7917c00SJeff Kirsher 		if (copy_from_user(&m, useraddr, sizeof(m)))
2420f7917c00SJeff Kirsher 			return -EFAULT;
24212c05d888SWenwen Wang 		if (m.cmd != CHELSIO_SET_PM)
24222c05d888SWenwen Wang 			return -EINVAL;
2423f7917c00SJeff Kirsher 		if (!is_power_of_2(m.rx_pg_sz) ||
2424f7917c00SJeff Kirsher 			!is_power_of_2(m.tx_pg_sz))
2425f7917c00SJeff Kirsher 			return -EINVAL;	/* not power of 2 */
2426f7917c00SJeff Kirsher 		if (!(m.rx_pg_sz & 0x14000))
2427f7917c00SJeff Kirsher 			return -EINVAL;	/* not 16KB or 64KB */
2428f7917c00SJeff Kirsher 		if (!(m.tx_pg_sz & 0x1554000))
2429f7917c00SJeff Kirsher 			return -EINVAL;
2430f7917c00SJeff Kirsher 		if (m.tx_num_pg == -1)
2431f7917c00SJeff Kirsher 			m.tx_num_pg = p->tx_num_pgs;
2432f7917c00SJeff Kirsher 		if (m.rx_num_pg == -1)
2433f7917c00SJeff Kirsher 			m.rx_num_pg = p->rx_num_pgs;
2434f7917c00SJeff Kirsher 		if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2435f7917c00SJeff Kirsher 			return -EINVAL;
2436f7917c00SJeff Kirsher 		if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2437f7917c00SJeff Kirsher 			m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2438f7917c00SJeff Kirsher 			return -EINVAL;
2439f7917c00SJeff Kirsher 		p->rx_pg_size = m.rx_pg_sz;
2440f7917c00SJeff Kirsher 		p->tx_pg_size = m.tx_pg_sz;
2441f7917c00SJeff Kirsher 		p->rx_num_pgs = m.rx_num_pg;
2442f7917c00SJeff Kirsher 		p->tx_num_pgs = m.tx_num_pg;
2443f7917c00SJeff Kirsher 		break;
2444f7917c00SJeff Kirsher 	}
2445f7917c00SJeff Kirsher 	case CHELSIO_GET_MEM:{
2446f7917c00SJeff Kirsher 		struct ch_mem_range t;
2447f7917c00SJeff Kirsher 		struct mc7 *mem;
2448f7917c00SJeff Kirsher 		u64 buf[32];
2449f7917c00SJeff Kirsher 
2450f7917c00SJeff Kirsher 		if (!is_offload(adapter))
2451f7917c00SJeff Kirsher 			return -EOPNOTSUPP;
2452f7917c00SJeff Kirsher 		if (!(adapter->flags & FULL_INIT_DONE))
2453f7917c00SJeff Kirsher 			return -EIO;	/* need the memory controllers */
2454f7917c00SJeff Kirsher 		if (copy_from_user(&t, useraddr, sizeof(t)))
2455f7917c00SJeff Kirsher 			return -EFAULT;
24562c05d888SWenwen Wang 		if (t.cmd != CHELSIO_GET_MEM)
24572c05d888SWenwen Wang 			return -EINVAL;
2458f7917c00SJeff Kirsher 		if ((t.addr & 7) || (t.len & 7))
2459f7917c00SJeff Kirsher 			return -EINVAL;
2460f7917c00SJeff Kirsher 		if (t.mem_id == MEM_CM)
2461f7917c00SJeff Kirsher 			mem = &adapter->cm;
2462f7917c00SJeff Kirsher 		else if (t.mem_id == MEM_PMRX)
2463f7917c00SJeff Kirsher 			mem = &adapter->pmrx;
2464f7917c00SJeff Kirsher 		else if (t.mem_id == MEM_PMTX)
2465f7917c00SJeff Kirsher 			mem = &adapter->pmtx;
2466f7917c00SJeff Kirsher 		else
2467f7917c00SJeff Kirsher 			return -EINVAL;
2468f7917c00SJeff Kirsher 
2469f7917c00SJeff Kirsher 		/*
2470f7917c00SJeff Kirsher 		 * Version scheme:
2471f7917c00SJeff Kirsher 		 * bits 0..9: chip version
2472f7917c00SJeff Kirsher 		 * bits 10..15: chip revision
2473f7917c00SJeff Kirsher 		 */
2474f7917c00SJeff Kirsher 		t.version = 3 | (adapter->params.rev << 10);
2475f7917c00SJeff Kirsher 		if (copy_to_user(useraddr, &t, sizeof(t)))
2476f7917c00SJeff Kirsher 			return -EFAULT;
2477f7917c00SJeff Kirsher 
2478f7917c00SJeff Kirsher 		/*
2479f7917c00SJeff Kirsher 		 * Read 256 bytes at a time as len can be large and we don't
2480f7917c00SJeff Kirsher 		 * want to use huge intermediate buffers.
2481f7917c00SJeff Kirsher 		 */
2482f7917c00SJeff Kirsher 		useraddr += sizeof(t);	/* advance to start of buffer */
2483f7917c00SJeff Kirsher 		while (t.len) {
2484f7917c00SJeff Kirsher 			unsigned int chunk =
2485f7917c00SJeff Kirsher 				min_t(unsigned int, t.len, sizeof(buf));
2486f7917c00SJeff Kirsher 
2487f7917c00SJeff Kirsher 			ret =
2488f7917c00SJeff Kirsher 				t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2489f7917c00SJeff Kirsher 						buf);
2490f7917c00SJeff Kirsher 			if (ret)
2491f7917c00SJeff Kirsher 				return ret;
2492f7917c00SJeff Kirsher 			if (copy_to_user(useraddr, buf, chunk))
2493f7917c00SJeff Kirsher 				return -EFAULT;
2494f7917c00SJeff Kirsher 			useraddr += chunk;
2495f7917c00SJeff Kirsher 			t.addr += chunk;
2496f7917c00SJeff Kirsher 			t.len -= chunk;
2497f7917c00SJeff Kirsher 		}
2498f7917c00SJeff Kirsher 		break;
2499f7917c00SJeff Kirsher 	}
2500f7917c00SJeff Kirsher 	case CHELSIO_SET_TRACE_FILTER:{
2501f7917c00SJeff Kirsher 		struct ch_trace t;
2502f7917c00SJeff Kirsher 		const struct trace_params *tp;
2503f7917c00SJeff Kirsher 
2504f7917c00SJeff Kirsher 		if (!capable(CAP_NET_ADMIN))
2505f7917c00SJeff Kirsher 			return -EPERM;
2506f7917c00SJeff Kirsher 		if (!offload_running(adapter))
2507f7917c00SJeff Kirsher 			return -EAGAIN;
2508f7917c00SJeff Kirsher 		if (copy_from_user(&t, useraddr, sizeof(t)))
2509f7917c00SJeff Kirsher 			return -EFAULT;
25102c05d888SWenwen Wang 		if (t.cmd != CHELSIO_SET_TRACE_FILTER)
25112c05d888SWenwen Wang 			return -EINVAL;
2512f7917c00SJeff Kirsher 
2513f7917c00SJeff Kirsher 		tp = (const struct trace_params *)&t.sip;
2514f7917c00SJeff Kirsher 		if (t.config_tx)
2515f7917c00SJeff Kirsher 			t3_config_trace_filter(adapter, tp, 0,
2516f7917c00SJeff Kirsher 						t.invert_match,
2517f7917c00SJeff Kirsher 						t.trace_tx);
2518f7917c00SJeff Kirsher 		if (t.config_rx)
2519f7917c00SJeff Kirsher 			t3_config_trace_filter(adapter, tp, 1,
2520f7917c00SJeff Kirsher 						t.invert_match,
2521f7917c00SJeff Kirsher 						t.trace_rx);
2522f7917c00SJeff Kirsher 		break;
2523f7917c00SJeff Kirsher 	}
2524f7917c00SJeff Kirsher 	default:
2525f7917c00SJeff Kirsher 		return -EOPNOTSUPP;
2526f7917c00SJeff Kirsher 	}
2527f7917c00SJeff Kirsher 	return 0;
2528f7917c00SJeff Kirsher }
2529f7917c00SJeff Kirsher 
2530f7917c00SJeff Kirsher static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2531f7917c00SJeff Kirsher {
2532f7917c00SJeff Kirsher 	struct mii_ioctl_data *data = if_mii(req);
2533f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
2534f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
2535f7917c00SJeff Kirsher 
2536f7917c00SJeff Kirsher 	switch (cmd) {
2537f7917c00SJeff Kirsher 	case SIOCGMIIREG:
2538f7917c00SJeff Kirsher 	case SIOCSMIIREG:
2539f7917c00SJeff Kirsher 		/* Convert phy_id from older PRTAD/DEVAD format */
2540f7917c00SJeff Kirsher 		if (is_10G(adapter) &&
2541f7917c00SJeff Kirsher 		    !mdio_phy_id_is_c45(data->phy_id) &&
2542f7917c00SJeff Kirsher 		    (data->phy_id & 0x1f00) &&
2543f7917c00SJeff Kirsher 		    !(data->phy_id & 0xe0e0))
2544f7917c00SJeff Kirsher 			data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2545f7917c00SJeff Kirsher 						       data->phy_id & 0x1f);
2546f7917c00SJeff Kirsher 		/* FALLTHRU */
2547f7917c00SJeff Kirsher 	case SIOCGMIIPHY:
2548f7917c00SJeff Kirsher 		return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2549f7917c00SJeff Kirsher 	case SIOCCHIOCTL:
2550f7917c00SJeff Kirsher 		return cxgb_extension_ioctl(dev, req->ifr_data);
2551f7917c00SJeff Kirsher 	default:
2552f7917c00SJeff Kirsher 		return -EOPNOTSUPP;
2553f7917c00SJeff Kirsher 	}
2554f7917c00SJeff Kirsher }
2555f7917c00SJeff Kirsher 
2556f7917c00SJeff Kirsher static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2557f7917c00SJeff Kirsher {
2558f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
2559f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
2560f7917c00SJeff Kirsher 	int ret;
2561f7917c00SJeff Kirsher 
2562f7917c00SJeff Kirsher 	if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2563f7917c00SJeff Kirsher 		return ret;
2564f7917c00SJeff Kirsher 	dev->mtu = new_mtu;
2565f7917c00SJeff Kirsher 	init_port_mtus(adapter);
2566f7917c00SJeff Kirsher 	if (adapter->params.rev == 0 && offload_running(adapter))
2567f7917c00SJeff Kirsher 		t3_load_mtus(adapter, adapter->params.mtus,
2568f7917c00SJeff Kirsher 			     adapter->params.a_wnd, adapter->params.b_wnd,
2569f7917c00SJeff Kirsher 			     adapter->port[0]->mtu);
2570f7917c00SJeff Kirsher 	return 0;
2571f7917c00SJeff Kirsher }
2572f7917c00SJeff Kirsher 
2573f7917c00SJeff Kirsher static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2574f7917c00SJeff Kirsher {
2575f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
2576f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
2577f7917c00SJeff Kirsher 	struct sockaddr *addr = p;
2578f7917c00SJeff Kirsher 
2579f7917c00SJeff Kirsher 	if (!is_valid_ether_addr(addr->sa_data))
2580504f9b5aSDanny Kukawka 		return -EADDRNOTAVAIL;
2581f7917c00SJeff Kirsher 
2582f7917c00SJeff Kirsher 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2583f7917c00SJeff Kirsher 	t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2584f7917c00SJeff Kirsher 	if (offload_running(adapter))
2585f7917c00SJeff Kirsher 		write_smt_entry(adapter, pi->port_id);
2586f7917c00SJeff Kirsher 	return 0;
2587f7917c00SJeff Kirsher }
2588f7917c00SJeff Kirsher 
2589c8f44affSMichał Mirosław static netdev_features_t cxgb_fix_features(struct net_device *dev,
2590c8f44affSMichał Mirosław 	netdev_features_t features)
2591f7917c00SJeff Kirsher {
2592f7917c00SJeff Kirsher 	/*
2593f7917c00SJeff Kirsher 	 * Since there is no support for separate rx/tx vlan accel
2594f7917c00SJeff Kirsher 	 * enable/disable make sure tx flag is always in same state as rx.
2595f7917c00SJeff Kirsher 	 */
2596f646968fSPatrick McHardy 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2597f646968fSPatrick McHardy 		features |= NETIF_F_HW_VLAN_CTAG_TX;
2598f7917c00SJeff Kirsher 	else
2599f646968fSPatrick McHardy 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2600f7917c00SJeff Kirsher 
2601f7917c00SJeff Kirsher 	return features;
2602f7917c00SJeff Kirsher }
2603f7917c00SJeff Kirsher 
2604c8f44affSMichał Mirosław static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2605f7917c00SJeff Kirsher {
2606c8f44affSMichał Mirosław 	netdev_features_t changed = dev->features ^ features;
2607f7917c00SJeff Kirsher 
2608f646968fSPatrick McHardy 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2609f7917c00SJeff Kirsher 		cxgb_vlan_mode(dev, features);
2610f7917c00SJeff Kirsher 
2611f7917c00SJeff Kirsher 	return 0;
2612f7917c00SJeff Kirsher }
2613f7917c00SJeff Kirsher 
2614f7917c00SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
2615f7917c00SJeff Kirsher static void cxgb_netpoll(struct net_device *dev)
2616f7917c00SJeff Kirsher {
2617f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
2618f7917c00SJeff Kirsher 	struct adapter *adapter = pi->adapter;
2619f7917c00SJeff Kirsher 	int qidx;
2620f7917c00SJeff Kirsher 
2621f7917c00SJeff Kirsher 	for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2622f7917c00SJeff Kirsher 		struct sge_qset *qs = &adapter->sge.qs[qidx];
2623f7917c00SJeff Kirsher 		void *source;
2624f7917c00SJeff Kirsher 
2625f7917c00SJeff Kirsher 		if (adapter->flags & USING_MSIX)
2626f7917c00SJeff Kirsher 			source = qs;
2627f7917c00SJeff Kirsher 		else
2628f7917c00SJeff Kirsher 			source = adapter;
2629f7917c00SJeff Kirsher 
2630f7917c00SJeff Kirsher 		t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2631f7917c00SJeff Kirsher 	}
2632f7917c00SJeff Kirsher }
2633f7917c00SJeff Kirsher #endif
2634f7917c00SJeff Kirsher 
2635f7917c00SJeff Kirsher /*
2636f7917c00SJeff Kirsher  * Periodic accumulation of MAC statistics.
2637f7917c00SJeff Kirsher  */
2638f7917c00SJeff Kirsher static void mac_stats_update(struct adapter *adapter)
2639f7917c00SJeff Kirsher {
2640f7917c00SJeff Kirsher 	int i;
2641f7917c00SJeff Kirsher 
2642f7917c00SJeff Kirsher 	for_each_port(adapter, i) {
2643f7917c00SJeff Kirsher 		struct net_device *dev = adapter->port[i];
2644f7917c00SJeff Kirsher 		struct port_info *p = netdev_priv(dev);
2645f7917c00SJeff Kirsher 
2646f7917c00SJeff Kirsher 		if (netif_running(dev)) {
2647f7917c00SJeff Kirsher 			spin_lock(&adapter->stats_lock);
2648f7917c00SJeff Kirsher 			t3_mac_update_stats(&p->mac);
2649f7917c00SJeff Kirsher 			spin_unlock(&adapter->stats_lock);
2650f7917c00SJeff Kirsher 		}
2651f7917c00SJeff Kirsher 	}
2652f7917c00SJeff Kirsher }
2653f7917c00SJeff Kirsher 
2654f7917c00SJeff Kirsher static void check_link_status(struct adapter *adapter)
2655f7917c00SJeff Kirsher {
2656f7917c00SJeff Kirsher 	int i;
2657f7917c00SJeff Kirsher 
2658f7917c00SJeff Kirsher 	for_each_port(adapter, i) {
2659f7917c00SJeff Kirsher 		struct net_device *dev = adapter->port[i];
2660f7917c00SJeff Kirsher 		struct port_info *p = netdev_priv(dev);
2661f7917c00SJeff Kirsher 		int link_fault;
2662f7917c00SJeff Kirsher 
2663f7917c00SJeff Kirsher 		spin_lock_irq(&adapter->work_lock);
2664f7917c00SJeff Kirsher 		link_fault = p->link_fault;
2665f7917c00SJeff Kirsher 		spin_unlock_irq(&adapter->work_lock);
2666f7917c00SJeff Kirsher 
2667f7917c00SJeff Kirsher 		if (link_fault) {
2668f7917c00SJeff Kirsher 			t3_link_fault(adapter, i);
2669f7917c00SJeff Kirsher 			continue;
2670f7917c00SJeff Kirsher 		}
2671f7917c00SJeff Kirsher 
2672f7917c00SJeff Kirsher 		if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2673f7917c00SJeff Kirsher 			t3_xgm_intr_disable(adapter, i);
2674f7917c00SJeff Kirsher 			t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2675f7917c00SJeff Kirsher 
2676f7917c00SJeff Kirsher 			t3_link_changed(adapter, i);
2677f7917c00SJeff Kirsher 			t3_xgm_intr_enable(adapter, i);
2678f7917c00SJeff Kirsher 		}
2679f7917c00SJeff Kirsher 	}
2680f7917c00SJeff Kirsher }
2681f7917c00SJeff Kirsher 
2682f7917c00SJeff Kirsher static void check_t3b2_mac(struct adapter *adapter)
2683f7917c00SJeff Kirsher {
2684f7917c00SJeff Kirsher 	int i;
2685f7917c00SJeff Kirsher 
2686f7917c00SJeff Kirsher 	if (!rtnl_trylock())	/* synchronize with ifdown */
2687f7917c00SJeff Kirsher 		return;
2688f7917c00SJeff Kirsher 
2689f7917c00SJeff Kirsher 	for_each_port(adapter, i) {
2690f7917c00SJeff Kirsher 		struct net_device *dev = adapter->port[i];
2691f7917c00SJeff Kirsher 		struct port_info *p = netdev_priv(dev);
2692f7917c00SJeff Kirsher 		int status;
2693f7917c00SJeff Kirsher 
2694f7917c00SJeff Kirsher 		if (!netif_running(dev))
2695f7917c00SJeff Kirsher 			continue;
2696f7917c00SJeff Kirsher 
2697f7917c00SJeff Kirsher 		status = 0;
2698f7917c00SJeff Kirsher 		if (netif_running(dev) && netif_carrier_ok(dev))
2699f7917c00SJeff Kirsher 			status = t3b2_mac_watchdog_task(&p->mac);
2700f7917c00SJeff Kirsher 		if (status == 1)
2701f7917c00SJeff Kirsher 			p->mac.stats.num_toggled++;
2702f7917c00SJeff Kirsher 		else if (status == 2) {
2703f7917c00SJeff Kirsher 			struct cmac *mac = &p->mac;
2704f7917c00SJeff Kirsher 
2705f7917c00SJeff Kirsher 			t3_mac_set_mtu(mac, dev->mtu);
2706f7917c00SJeff Kirsher 			t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2707f7917c00SJeff Kirsher 			cxgb_set_rxmode(dev);
2708f7917c00SJeff Kirsher 			t3_link_start(&p->phy, mac, &p->link_config);
2709f7917c00SJeff Kirsher 			t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2710f7917c00SJeff Kirsher 			t3_port_intr_enable(adapter, p->port_id);
2711f7917c00SJeff Kirsher 			p->mac.stats.num_resets++;
2712f7917c00SJeff Kirsher 		}
2713f7917c00SJeff Kirsher 	}
2714f7917c00SJeff Kirsher 	rtnl_unlock();
2715f7917c00SJeff Kirsher }
2716f7917c00SJeff Kirsher 
2717f7917c00SJeff Kirsher 
2718f7917c00SJeff Kirsher static void t3_adap_check_task(struct work_struct *work)
2719f7917c00SJeff Kirsher {
2720f7917c00SJeff Kirsher 	struct adapter *adapter = container_of(work, struct adapter,
2721f7917c00SJeff Kirsher 					       adap_check_task.work);
2722f7917c00SJeff Kirsher 	const struct adapter_params *p = &adapter->params;
2723f7917c00SJeff Kirsher 	int port;
2724f7917c00SJeff Kirsher 	unsigned int v, status, reset;
2725f7917c00SJeff Kirsher 
2726f7917c00SJeff Kirsher 	adapter->check_task_cnt++;
2727f7917c00SJeff Kirsher 
2728f7917c00SJeff Kirsher 	check_link_status(adapter);
2729f7917c00SJeff Kirsher 
2730f7917c00SJeff Kirsher 	/* Accumulate MAC stats if needed */
2731f7917c00SJeff Kirsher 	if (!p->linkpoll_period ||
2732f7917c00SJeff Kirsher 	    (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2733f7917c00SJeff Kirsher 	    p->stats_update_period) {
2734f7917c00SJeff Kirsher 		mac_stats_update(adapter);
2735f7917c00SJeff Kirsher 		adapter->check_task_cnt = 0;
2736f7917c00SJeff Kirsher 	}
2737f7917c00SJeff Kirsher 
2738f7917c00SJeff Kirsher 	if (p->rev == T3_REV_B2)
2739f7917c00SJeff Kirsher 		check_t3b2_mac(adapter);
2740f7917c00SJeff Kirsher 
2741f7917c00SJeff Kirsher 	/*
2742f7917c00SJeff Kirsher 	 * Scan the XGMAC's to check for various conditions which we want to
2743f7917c00SJeff Kirsher 	 * monitor in a periodic polling manner rather than via an interrupt
2744f7917c00SJeff Kirsher 	 * condition.  This is used for conditions which would otherwise flood
2745f7917c00SJeff Kirsher 	 * the system with interrupts and we only really need to know that the
2746f7917c00SJeff Kirsher 	 * conditions are "happening" ...  For each condition we count the
2747f7917c00SJeff Kirsher 	 * detection of the condition and reset it for the next polling loop.
2748f7917c00SJeff Kirsher 	 */
2749f7917c00SJeff Kirsher 	for_each_port(adapter, port) {
2750f7917c00SJeff Kirsher 		struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
2751f7917c00SJeff Kirsher 		u32 cause;
2752f7917c00SJeff Kirsher 
2753f7917c00SJeff Kirsher 		cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2754f7917c00SJeff Kirsher 		reset = 0;
2755f7917c00SJeff Kirsher 		if (cause & F_RXFIFO_OVERFLOW) {
2756f7917c00SJeff Kirsher 			mac->stats.rx_fifo_ovfl++;
2757f7917c00SJeff Kirsher 			reset |= F_RXFIFO_OVERFLOW;
2758f7917c00SJeff Kirsher 		}
2759f7917c00SJeff Kirsher 
2760f7917c00SJeff Kirsher 		t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2761f7917c00SJeff Kirsher 	}
2762f7917c00SJeff Kirsher 
2763f7917c00SJeff Kirsher 	/*
2764f7917c00SJeff Kirsher 	 * We do the same as above for FL_EMPTY interrupts.
2765f7917c00SJeff Kirsher 	 */
2766f7917c00SJeff Kirsher 	status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2767f7917c00SJeff Kirsher 	reset = 0;
2768f7917c00SJeff Kirsher 
2769f7917c00SJeff Kirsher 	if (status & F_FLEMPTY) {
2770f7917c00SJeff Kirsher 		struct sge_qset *qs = &adapter->sge.qs[0];
2771f7917c00SJeff Kirsher 		int i = 0;
2772f7917c00SJeff Kirsher 
2773f7917c00SJeff Kirsher 		reset |= F_FLEMPTY;
2774f7917c00SJeff Kirsher 
2775f7917c00SJeff Kirsher 		v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2776f7917c00SJeff Kirsher 		    0xffff;
2777f7917c00SJeff Kirsher 
2778f7917c00SJeff Kirsher 		while (v) {
2779f7917c00SJeff Kirsher 			qs->fl[i].empty += (v & 1);
2780f7917c00SJeff Kirsher 			if (i)
2781f7917c00SJeff Kirsher 				qs++;
2782f7917c00SJeff Kirsher 			i ^= 1;
2783f7917c00SJeff Kirsher 			v >>= 1;
2784f7917c00SJeff Kirsher 		}
2785f7917c00SJeff Kirsher 	}
2786f7917c00SJeff Kirsher 
2787f7917c00SJeff Kirsher 	t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2788f7917c00SJeff Kirsher 
2789f7917c00SJeff Kirsher 	/* Schedule the next check update if any port is active. */
2790f7917c00SJeff Kirsher 	spin_lock_irq(&adapter->work_lock);
2791f7917c00SJeff Kirsher 	if (adapter->open_device_map & PORT_MASK)
2792f7917c00SJeff Kirsher 		schedule_chk_task(adapter);
2793f7917c00SJeff Kirsher 	spin_unlock_irq(&adapter->work_lock);
2794f7917c00SJeff Kirsher }
2795f7917c00SJeff Kirsher 
2796f7917c00SJeff Kirsher static void db_full_task(struct work_struct *work)
2797f7917c00SJeff Kirsher {
2798f7917c00SJeff Kirsher 	struct adapter *adapter = container_of(work, struct adapter,
2799f7917c00SJeff Kirsher 					       db_full_task);
2800f7917c00SJeff Kirsher 
2801f7917c00SJeff Kirsher 	cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2802f7917c00SJeff Kirsher }
2803f7917c00SJeff Kirsher 
2804f7917c00SJeff Kirsher static void db_empty_task(struct work_struct *work)
2805f7917c00SJeff Kirsher {
2806f7917c00SJeff Kirsher 	struct adapter *adapter = container_of(work, struct adapter,
2807f7917c00SJeff Kirsher 					       db_empty_task);
2808f7917c00SJeff Kirsher 
2809f7917c00SJeff Kirsher 	cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2810f7917c00SJeff Kirsher }
2811f7917c00SJeff Kirsher 
2812f7917c00SJeff Kirsher static void db_drop_task(struct work_struct *work)
2813f7917c00SJeff Kirsher {
2814f7917c00SJeff Kirsher 	struct adapter *adapter = container_of(work, struct adapter,
2815f7917c00SJeff Kirsher 					       db_drop_task);
2816f7917c00SJeff Kirsher 	unsigned long delay = 1000;
2817f7917c00SJeff Kirsher 	unsigned short r;
2818f7917c00SJeff Kirsher 
2819f7917c00SJeff Kirsher 	cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2820f7917c00SJeff Kirsher 
2821f7917c00SJeff Kirsher 	/*
2822f7917c00SJeff Kirsher 	 * Sleep a while before ringing the driver qset dbs.
2823f7917c00SJeff Kirsher 	 * The delay is between 1000-2023 usecs.
2824f7917c00SJeff Kirsher 	 */
2825f7917c00SJeff Kirsher 	get_random_bytes(&r, 2);
2826f7917c00SJeff Kirsher 	delay += r & 1023;
2827f7917c00SJeff Kirsher 	set_current_state(TASK_UNINTERRUPTIBLE);
2828f7917c00SJeff Kirsher 	schedule_timeout(usecs_to_jiffies(delay));
2829f7917c00SJeff Kirsher 	ring_dbs(adapter);
2830f7917c00SJeff Kirsher }
2831f7917c00SJeff Kirsher 
2832f7917c00SJeff Kirsher /*
2833f7917c00SJeff Kirsher  * Processes external (PHY) interrupts in process context.
2834f7917c00SJeff Kirsher  */
2835f7917c00SJeff Kirsher static void ext_intr_task(struct work_struct *work)
2836f7917c00SJeff Kirsher {
2837f7917c00SJeff Kirsher 	struct adapter *adapter = container_of(work, struct adapter,
2838f7917c00SJeff Kirsher 					       ext_intr_handler_task);
2839f7917c00SJeff Kirsher 	int i;
2840f7917c00SJeff Kirsher 
2841f7917c00SJeff Kirsher 	/* Disable link fault interrupts */
2842f7917c00SJeff Kirsher 	for_each_port(adapter, i) {
2843f7917c00SJeff Kirsher 		struct net_device *dev = adapter->port[i];
2844f7917c00SJeff Kirsher 		struct port_info *p = netdev_priv(dev);
2845f7917c00SJeff Kirsher 
2846f7917c00SJeff Kirsher 		t3_xgm_intr_disable(adapter, i);
2847f7917c00SJeff Kirsher 		t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2848f7917c00SJeff Kirsher 	}
2849f7917c00SJeff Kirsher 
2850f7917c00SJeff Kirsher 	/* Re-enable link fault interrupts */
2851f7917c00SJeff Kirsher 	t3_phy_intr_handler(adapter);
2852f7917c00SJeff Kirsher 
2853f7917c00SJeff Kirsher 	for_each_port(adapter, i)
2854f7917c00SJeff Kirsher 		t3_xgm_intr_enable(adapter, i);
2855f7917c00SJeff Kirsher 
2856f7917c00SJeff Kirsher 	/* Now reenable external interrupts */
2857f7917c00SJeff Kirsher 	spin_lock_irq(&adapter->work_lock);
2858f7917c00SJeff Kirsher 	if (adapter->slow_intr_mask) {
2859f7917c00SJeff Kirsher 		adapter->slow_intr_mask |= F_T3DBG;
2860f7917c00SJeff Kirsher 		t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2861f7917c00SJeff Kirsher 		t3_write_reg(adapter, A_PL_INT_ENABLE0,
2862f7917c00SJeff Kirsher 			     adapter->slow_intr_mask);
2863f7917c00SJeff Kirsher 	}
2864f7917c00SJeff Kirsher 	spin_unlock_irq(&adapter->work_lock);
2865f7917c00SJeff Kirsher }
2866f7917c00SJeff Kirsher 
2867f7917c00SJeff Kirsher /*
2868f7917c00SJeff Kirsher  * Interrupt-context handler for external (PHY) interrupts.
2869f7917c00SJeff Kirsher  */
2870f7917c00SJeff Kirsher void t3_os_ext_intr_handler(struct adapter *adapter)
2871f7917c00SJeff Kirsher {
2872f7917c00SJeff Kirsher 	/*
2873f7917c00SJeff Kirsher 	 * Schedule a task to handle external interrupts as they may be slow
2874f7917c00SJeff Kirsher 	 * and we use a mutex to protect MDIO registers.  We disable PHY
2875f7917c00SJeff Kirsher 	 * interrupts in the meantime and let the task reenable them when
2876f7917c00SJeff Kirsher 	 * it's done.
2877f7917c00SJeff Kirsher 	 */
2878f7917c00SJeff Kirsher 	spin_lock(&adapter->work_lock);
2879f7917c00SJeff Kirsher 	if (adapter->slow_intr_mask) {
2880f7917c00SJeff Kirsher 		adapter->slow_intr_mask &= ~F_T3DBG;
2881f7917c00SJeff Kirsher 		t3_write_reg(adapter, A_PL_INT_ENABLE0,
2882f7917c00SJeff Kirsher 			     adapter->slow_intr_mask);
2883f7917c00SJeff Kirsher 		queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2884f7917c00SJeff Kirsher 	}
2885f7917c00SJeff Kirsher 	spin_unlock(&adapter->work_lock);
2886f7917c00SJeff Kirsher }
2887f7917c00SJeff Kirsher 
2888f7917c00SJeff Kirsher void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2889f7917c00SJeff Kirsher {
2890f7917c00SJeff Kirsher 	struct net_device *netdev = adapter->port[port_id];
2891f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(netdev);
2892f7917c00SJeff Kirsher 
2893f7917c00SJeff Kirsher 	spin_lock(&adapter->work_lock);
2894f7917c00SJeff Kirsher 	pi->link_fault = 1;
2895f7917c00SJeff Kirsher 	spin_unlock(&adapter->work_lock);
2896f7917c00SJeff Kirsher }
2897f7917c00SJeff Kirsher 
2898f7917c00SJeff Kirsher static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2899f7917c00SJeff Kirsher {
2900f7917c00SJeff Kirsher 	int i, ret = 0;
2901f7917c00SJeff Kirsher 
2902f7917c00SJeff Kirsher 	if (is_offload(adapter) &&
2903f7917c00SJeff Kirsher 	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2904f7917c00SJeff Kirsher 		cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2905f7917c00SJeff Kirsher 		offload_close(&adapter->tdev);
2906f7917c00SJeff Kirsher 	}
2907f7917c00SJeff Kirsher 
2908f7917c00SJeff Kirsher 	/* Stop all ports */
2909f7917c00SJeff Kirsher 	for_each_port(adapter, i) {
2910f7917c00SJeff Kirsher 		struct net_device *netdev = adapter->port[i];
2911f7917c00SJeff Kirsher 
2912f7917c00SJeff Kirsher 		if (netif_running(netdev))
2913f7917c00SJeff Kirsher 			__cxgb_close(netdev, on_wq);
2914f7917c00SJeff Kirsher 	}
2915f7917c00SJeff Kirsher 
2916f7917c00SJeff Kirsher 	/* Stop SGE timers */
2917f7917c00SJeff Kirsher 	t3_stop_sge_timers(adapter);
2918f7917c00SJeff Kirsher 
2919f7917c00SJeff Kirsher 	adapter->flags &= ~FULL_INIT_DONE;
2920f7917c00SJeff Kirsher 
2921f7917c00SJeff Kirsher 	if (reset)
2922f7917c00SJeff Kirsher 		ret = t3_reset_adapter(adapter);
2923f7917c00SJeff Kirsher 
2924f7917c00SJeff Kirsher 	pci_disable_device(adapter->pdev);
2925f7917c00SJeff Kirsher 
2926f7917c00SJeff Kirsher 	return ret;
2927f7917c00SJeff Kirsher }
2928f7917c00SJeff Kirsher 
2929f7917c00SJeff Kirsher static int t3_reenable_adapter(struct adapter *adapter)
2930f7917c00SJeff Kirsher {
2931f7917c00SJeff Kirsher 	if (pci_enable_device(adapter->pdev)) {
2932f7917c00SJeff Kirsher 		dev_err(&adapter->pdev->dev,
2933f7917c00SJeff Kirsher 			"Cannot re-enable PCI device after reset.\n");
2934f7917c00SJeff Kirsher 		goto err;
2935f7917c00SJeff Kirsher 	}
2936f7917c00SJeff Kirsher 	pci_set_master(adapter->pdev);
2937f7917c00SJeff Kirsher 	pci_restore_state(adapter->pdev);
2938f7917c00SJeff Kirsher 	pci_save_state(adapter->pdev);
2939f7917c00SJeff Kirsher 
2940f7917c00SJeff Kirsher 	/* Free sge resources */
2941f7917c00SJeff Kirsher 	t3_free_sge_resources(adapter);
2942f7917c00SJeff Kirsher 
2943f7917c00SJeff Kirsher 	if (t3_replay_prep_adapter(adapter))
2944f7917c00SJeff Kirsher 		goto err;
2945f7917c00SJeff Kirsher 
2946f7917c00SJeff Kirsher 	return 0;
2947f7917c00SJeff Kirsher err:
2948f7917c00SJeff Kirsher 	return -1;
2949f7917c00SJeff Kirsher }
2950f7917c00SJeff Kirsher 
2951f7917c00SJeff Kirsher static void t3_resume_ports(struct adapter *adapter)
2952f7917c00SJeff Kirsher {
2953f7917c00SJeff Kirsher 	int i;
2954f7917c00SJeff Kirsher 
2955f7917c00SJeff Kirsher 	/* Restart the ports */
2956f7917c00SJeff Kirsher 	for_each_port(adapter, i) {
2957f7917c00SJeff Kirsher 		struct net_device *netdev = adapter->port[i];
2958f7917c00SJeff Kirsher 
2959f7917c00SJeff Kirsher 		if (netif_running(netdev)) {
2960f7917c00SJeff Kirsher 			if (cxgb_open(netdev)) {
2961f7917c00SJeff Kirsher 				dev_err(&adapter->pdev->dev,
2962f7917c00SJeff Kirsher 					"can't bring device back up"
2963f7917c00SJeff Kirsher 					" after reset\n");
2964f7917c00SJeff Kirsher 				continue;
2965f7917c00SJeff Kirsher 			}
2966f7917c00SJeff Kirsher 		}
2967f7917c00SJeff Kirsher 	}
2968f7917c00SJeff Kirsher 
2969f7917c00SJeff Kirsher 	if (is_offload(adapter) && !ofld_disable)
2970f7917c00SJeff Kirsher 		cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2971f7917c00SJeff Kirsher }
2972f7917c00SJeff Kirsher 
2973f7917c00SJeff Kirsher /*
2974f7917c00SJeff Kirsher  * processes a fatal error.
2975f7917c00SJeff Kirsher  * Bring the ports down, reset the chip, bring the ports back up.
2976f7917c00SJeff Kirsher  */
2977f7917c00SJeff Kirsher static void fatal_error_task(struct work_struct *work)
2978f7917c00SJeff Kirsher {
2979f7917c00SJeff Kirsher 	struct adapter *adapter = container_of(work, struct adapter,
2980f7917c00SJeff Kirsher 					       fatal_error_handler_task);
2981f7917c00SJeff Kirsher 	int err = 0;
2982f7917c00SJeff Kirsher 
2983f7917c00SJeff Kirsher 	rtnl_lock();
2984f7917c00SJeff Kirsher 	err = t3_adapter_error(adapter, 1, 1);
2985f7917c00SJeff Kirsher 	if (!err)
2986f7917c00SJeff Kirsher 		err = t3_reenable_adapter(adapter);
2987f7917c00SJeff Kirsher 	if (!err)
2988f7917c00SJeff Kirsher 		t3_resume_ports(adapter);
2989f7917c00SJeff Kirsher 
2990f7917c00SJeff Kirsher 	CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2991f7917c00SJeff Kirsher 	rtnl_unlock();
2992f7917c00SJeff Kirsher }
2993f7917c00SJeff Kirsher 
2994f7917c00SJeff Kirsher void t3_fatal_err(struct adapter *adapter)
2995f7917c00SJeff Kirsher {
2996f7917c00SJeff Kirsher 	unsigned int fw_status[4];
2997f7917c00SJeff Kirsher 
2998f7917c00SJeff Kirsher 	if (adapter->flags & FULL_INIT_DONE) {
2999f7917c00SJeff Kirsher 		t3_sge_stop(adapter);
3000f7917c00SJeff Kirsher 		t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
3001f7917c00SJeff Kirsher 		t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
3002f7917c00SJeff Kirsher 		t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
3003f7917c00SJeff Kirsher 		t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
3004f7917c00SJeff Kirsher 
3005f7917c00SJeff Kirsher 		spin_lock(&adapter->work_lock);
3006f7917c00SJeff Kirsher 		t3_intr_disable(adapter);
3007f7917c00SJeff Kirsher 		queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
3008f7917c00SJeff Kirsher 		spin_unlock(&adapter->work_lock);
3009f7917c00SJeff Kirsher 	}
3010f7917c00SJeff Kirsher 	CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
3011f7917c00SJeff Kirsher 	if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
3012f7917c00SJeff Kirsher 		CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
3013f7917c00SJeff Kirsher 			 fw_status[0], fw_status[1],
3014f7917c00SJeff Kirsher 			 fw_status[2], fw_status[3]);
3015f7917c00SJeff Kirsher }
3016f7917c00SJeff Kirsher 
3017f7917c00SJeff Kirsher /**
3018f7917c00SJeff Kirsher  * t3_io_error_detected - called when PCI error is detected
3019f7917c00SJeff Kirsher  * @pdev: Pointer to PCI device
3020f7917c00SJeff Kirsher  * @state: The current pci connection state
3021f7917c00SJeff Kirsher  *
3022f7917c00SJeff Kirsher  * This function is called after a PCI bus error affecting
3023f7917c00SJeff Kirsher  * this device has been detected.
3024f7917c00SJeff Kirsher  */
3025f7917c00SJeff Kirsher static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3026f7917c00SJeff Kirsher 					     pci_channel_state_t state)
3027f7917c00SJeff Kirsher {
3028f7917c00SJeff Kirsher 	struct adapter *adapter = pci_get_drvdata(pdev);
3029f7917c00SJeff Kirsher 
3030f7917c00SJeff Kirsher 	if (state == pci_channel_io_perm_failure)
3031f7917c00SJeff Kirsher 		return PCI_ERS_RESULT_DISCONNECT;
3032f7917c00SJeff Kirsher 
3033f7917c00SJeff Kirsher 	t3_adapter_error(adapter, 0, 0);
3034f7917c00SJeff Kirsher 
3035f7917c00SJeff Kirsher 	/* Request a slot reset. */
3036f7917c00SJeff Kirsher 	return PCI_ERS_RESULT_NEED_RESET;
3037f7917c00SJeff Kirsher }
3038f7917c00SJeff Kirsher 
3039f7917c00SJeff Kirsher /**
3040f7917c00SJeff Kirsher  * t3_io_slot_reset - called after the pci bus has been reset.
3041f7917c00SJeff Kirsher  * @pdev: Pointer to PCI device
3042f7917c00SJeff Kirsher  *
3043f7917c00SJeff Kirsher  * Restart the card from scratch, as if from a cold-boot.
3044f7917c00SJeff Kirsher  */
3045f7917c00SJeff Kirsher static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3046f7917c00SJeff Kirsher {
3047f7917c00SJeff Kirsher 	struct adapter *adapter = pci_get_drvdata(pdev);
3048f7917c00SJeff Kirsher 
3049f7917c00SJeff Kirsher 	if (!t3_reenable_adapter(adapter))
3050f7917c00SJeff Kirsher 		return PCI_ERS_RESULT_RECOVERED;
3051f7917c00SJeff Kirsher 
3052f7917c00SJeff Kirsher 	return PCI_ERS_RESULT_DISCONNECT;
3053f7917c00SJeff Kirsher }
3054f7917c00SJeff Kirsher 
3055f7917c00SJeff Kirsher /**
3056f7917c00SJeff Kirsher  * t3_io_resume - called when traffic can start flowing again.
3057f7917c00SJeff Kirsher  * @pdev: Pointer to PCI device
3058f7917c00SJeff Kirsher  *
3059f7917c00SJeff Kirsher  * This callback is called when the error recovery driver tells us that
3060f7917c00SJeff Kirsher  * its OK to resume normal operation.
3061f7917c00SJeff Kirsher  */
3062f7917c00SJeff Kirsher static void t3_io_resume(struct pci_dev *pdev)
3063f7917c00SJeff Kirsher {
3064f7917c00SJeff Kirsher 	struct adapter *adapter = pci_get_drvdata(pdev);
3065f7917c00SJeff Kirsher 
3066f7917c00SJeff Kirsher 	CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3067f7917c00SJeff Kirsher 		 t3_read_reg(adapter, A_PCIE_PEX_ERR));
3068f7917c00SJeff Kirsher 
30697cc47d13SBenjamin Herrenschmidt 	rtnl_lock();
3070f7917c00SJeff Kirsher 	t3_resume_ports(adapter);
30717cc47d13SBenjamin Herrenschmidt 	rtnl_unlock();
3072f7917c00SJeff Kirsher }
3073f7917c00SJeff Kirsher 
30743646f0e5SStephen Hemminger static const struct pci_error_handlers t3_err_handler = {
3075f7917c00SJeff Kirsher 	.error_detected = t3_io_error_detected,
3076f7917c00SJeff Kirsher 	.slot_reset = t3_io_slot_reset,
3077f7917c00SJeff Kirsher 	.resume = t3_io_resume,
3078f7917c00SJeff Kirsher };
3079f7917c00SJeff Kirsher 
3080f7917c00SJeff Kirsher /*
3081f7917c00SJeff Kirsher  * Set the number of qsets based on the number of CPUs and the number of ports,
3082f7917c00SJeff Kirsher  * not to exceed the number of available qsets, assuming there are enough qsets
3083f7917c00SJeff Kirsher  * per port in HW.
3084f7917c00SJeff Kirsher  */
3085f7917c00SJeff Kirsher static void set_nqsets(struct adapter *adap)
3086f7917c00SJeff Kirsher {
3087f7917c00SJeff Kirsher 	int i, j = 0;
3088dbfa6001SYuval Mintz 	int num_cpus = netif_get_num_default_rss_queues();
3089f7917c00SJeff Kirsher 	int hwports = adap->params.nports;
3090f7917c00SJeff Kirsher 	int nqsets = adap->msix_nvectors - 1;
3091f7917c00SJeff Kirsher 
3092f7917c00SJeff Kirsher 	if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3093f7917c00SJeff Kirsher 		if (hwports == 2 &&
3094f7917c00SJeff Kirsher 		    (hwports * nqsets > SGE_QSETS ||
3095f7917c00SJeff Kirsher 		     num_cpus >= nqsets / hwports))
3096f7917c00SJeff Kirsher 			nqsets /= hwports;
3097f7917c00SJeff Kirsher 		if (nqsets > num_cpus)
3098f7917c00SJeff Kirsher 			nqsets = num_cpus;
3099f7917c00SJeff Kirsher 		if (nqsets < 1 || hwports == 4)
3100f7917c00SJeff Kirsher 			nqsets = 1;
3101f7917c00SJeff Kirsher 	} else
3102f7917c00SJeff Kirsher 		nqsets = 1;
3103f7917c00SJeff Kirsher 
3104f7917c00SJeff Kirsher 	for_each_port(adap, i) {
3105f7917c00SJeff Kirsher 		struct port_info *pi = adap2pinfo(adap, i);
3106f7917c00SJeff Kirsher 
3107f7917c00SJeff Kirsher 		pi->first_qset = j;
3108f7917c00SJeff Kirsher 		pi->nqsets = nqsets;
3109f7917c00SJeff Kirsher 		j = pi->first_qset + nqsets;
3110f7917c00SJeff Kirsher 
3111f7917c00SJeff Kirsher 		dev_info(&adap->pdev->dev,
3112f7917c00SJeff Kirsher 			 "Port %d using %d queue sets.\n", i, nqsets);
3113f7917c00SJeff Kirsher 	}
3114f7917c00SJeff Kirsher }
3115f7917c00SJeff Kirsher 
31162109eaabSBill Pemberton static int cxgb_enable_msix(struct adapter *adap)
3117f7917c00SJeff Kirsher {
3118f7917c00SJeff Kirsher 	struct msix_entry entries[SGE_QSETS + 1];
3119f7917c00SJeff Kirsher 	int vectors;
3120fc1d0bf1SAlexander Gordeev 	int i;
3121f7917c00SJeff Kirsher 
3122f7917c00SJeff Kirsher 	vectors = ARRAY_SIZE(entries);
3123f7917c00SJeff Kirsher 	for (i = 0; i < vectors; ++i)
3124f7917c00SJeff Kirsher 		entries[i].entry = i;
3125f7917c00SJeff Kirsher 
3126fc1d0bf1SAlexander Gordeev 	vectors = pci_enable_msix_range(adap->pdev, entries,
3127fc1d0bf1SAlexander Gordeev 					adap->params.nports + 1, vectors);
3128fc1d0bf1SAlexander Gordeev 	if (vectors < 0)
3129fc1d0bf1SAlexander Gordeev 		return vectors;
3130f7917c00SJeff Kirsher 
3131f7917c00SJeff Kirsher 	for (i = 0; i < vectors; ++i)
3132f7917c00SJeff Kirsher 		adap->msix_info[i].vec = entries[i].vector;
3133f7917c00SJeff Kirsher 	adap->msix_nvectors = vectors;
3134f7917c00SJeff Kirsher 
3135fc1d0bf1SAlexander Gordeev 	return 0;
3136f7917c00SJeff Kirsher }
3137f7917c00SJeff Kirsher 
31381dd06ae8SGreg Kroah-Hartman static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
3139f7917c00SJeff Kirsher {
3140f7917c00SJeff Kirsher 	static const char *pci_variant[] = {
3141f7917c00SJeff Kirsher 		"PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3142f7917c00SJeff Kirsher 	};
3143f7917c00SJeff Kirsher 
3144f7917c00SJeff Kirsher 	int i;
3145f7917c00SJeff Kirsher 	char buf[80];
3146f7917c00SJeff Kirsher 
3147f7917c00SJeff Kirsher 	if (is_pcie(adap))
3148f7917c00SJeff Kirsher 		snprintf(buf, sizeof(buf), "%s x%d",
3149f7917c00SJeff Kirsher 			 pci_variant[adap->params.pci.variant],
3150f7917c00SJeff Kirsher 			 adap->params.pci.width);
3151f7917c00SJeff Kirsher 	else
3152f7917c00SJeff Kirsher 		snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3153f7917c00SJeff Kirsher 			 pci_variant[adap->params.pci.variant],
3154f7917c00SJeff Kirsher 			 adap->params.pci.speed, adap->params.pci.width);
3155f7917c00SJeff Kirsher 
3156f7917c00SJeff Kirsher 	for_each_port(adap, i) {
3157f7917c00SJeff Kirsher 		struct net_device *dev = adap->port[i];
3158f7917c00SJeff Kirsher 		const struct port_info *pi = netdev_priv(dev);
3159f7917c00SJeff Kirsher 
3160f7917c00SJeff Kirsher 		if (!test_bit(i, &adap->registered_device_map))
3161f7917c00SJeff Kirsher 			continue;
3162428ac43fSJoe Perches 		netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
3163428ac43fSJoe Perches 			    ai->desc, pi->phy.desc,
3164f7917c00SJeff Kirsher 			    is_offload(adap) ? "R" : "", adap->params.rev, buf,
3165f7917c00SJeff Kirsher 			    (adap->flags & USING_MSIX) ? " MSI-X" :
3166f7917c00SJeff Kirsher 			    (adap->flags & USING_MSI) ? " MSI" : "");
3167f7917c00SJeff Kirsher 		if (adap->name == dev->name && adap->params.vpd.mclk)
3168428ac43fSJoe Perches 			pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3169f7917c00SJeff Kirsher 			       adap->name, t3_mc7_size(&adap->cm) >> 20,
3170f7917c00SJeff Kirsher 			       t3_mc7_size(&adap->pmtx) >> 20,
3171f7917c00SJeff Kirsher 			       t3_mc7_size(&adap->pmrx) >> 20,
3172f7917c00SJeff Kirsher 			       adap->params.vpd.sn);
3173f7917c00SJeff Kirsher 	}
3174f7917c00SJeff Kirsher }
3175f7917c00SJeff Kirsher 
3176f7917c00SJeff Kirsher static const struct net_device_ops cxgb_netdev_ops = {
3177f7917c00SJeff Kirsher 	.ndo_open		= cxgb_open,
3178f7917c00SJeff Kirsher 	.ndo_stop		= cxgb_close,
3179f7917c00SJeff Kirsher 	.ndo_start_xmit		= t3_eth_xmit,
3180f7917c00SJeff Kirsher 	.ndo_get_stats		= cxgb_get_stats,
3181f7917c00SJeff Kirsher 	.ndo_validate_addr	= eth_validate_addr,
3182afc4b13dSJiri Pirko 	.ndo_set_rx_mode	= cxgb_set_rxmode,
3183f7917c00SJeff Kirsher 	.ndo_do_ioctl		= cxgb_ioctl,
3184f7917c00SJeff Kirsher 	.ndo_change_mtu		= cxgb_change_mtu,
3185f7917c00SJeff Kirsher 	.ndo_set_mac_address	= cxgb_set_mac_addr,
3186f7917c00SJeff Kirsher 	.ndo_fix_features	= cxgb_fix_features,
3187f7917c00SJeff Kirsher 	.ndo_set_features	= cxgb_set_features,
3188f7917c00SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
3189f7917c00SJeff Kirsher 	.ndo_poll_controller	= cxgb_netpoll,
3190f7917c00SJeff Kirsher #endif
3191f7917c00SJeff Kirsher };
3192f7917c00SJeff Kirsher 
31932109eaabSBill Pemberton static void cxgb3_init_iscsi_mac(struct net_device *dev)
3194f7917c00SJeff Kirsher {
3195f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
3196f7917c00SJeff Kirsher 
3197f7917c00SJeff Kirsher 	memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3198f7917c00SJeff Kirsher 	pi->iscsic.mac_addr[3] |= 0x80;
3199f7917c00SJeff Kirsher }
3200f7917c00SJeff Kirsher 
32011d962ecfSbrenohl@br.ibm.com #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
32021d962ecfSbrenohl@br.ibm.com #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
32031d962ecfSbrenohl@br.ibm.com 			NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
32041dd06ae8SGreg Kroah-Hartman static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3205f7917c00SJeff Kirsher {
3206f7917c00SJeff Kirsher 	int i, err, pci_using_dac = 0;
3207f7917c00SJeff Kirsher 	resource_size_t mmio_start, mmio_len;
3208f7917c00SJeff Kirsher 	const struct adapter_info *ai;
3209f7917c00SJeff Kirsher 	struct adapter *adapter = NULL;
3210f7917c00SJeff Kirsher 	struct port_info *pi;
3211f7917c00SJeff Kirsher 
3212428ac43fSJoe Perches 	pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
3213f7917c00SJeff Kirsher 
3214f7917c00SJeff Kirsher 	if (!cxgb3_wq) {
3215f7917c00SJeff Kirsher 		cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3216f7917c00SJeff Kirsher 		if (!cxgb3_wq) {
3217428ac43fSJoe Perches 			pr_err("cannot initialize work queue\n");
3218f7917c00SJeff Kirsher 			return -ENOMEM;
3219f7917c00SJeff Kirsher 		}
3220f7917c00SJeff Kirsher 	}
3221f7917c00SJeff Kirsher 
3222f7917c00SJeff Kirsher 	err = pci_enable_device(pdev);
3223f7917c00SJeff Kirsher 	if (err) {
3224f7917c00SJeff Kirsher 		dev_err(&pdev->dev, "cannot enable PCI device\n");
3225f7917c00SJeff Kirsher 		goto out;
3226f7917c00SJeff Kirsher 	}
3227f7917c00SJeff Kirsher 
3228f7917c00SJeff Kirsher 	err = pci_request_regions(pdev, DRV_NAME);
3229f7917c00SJeff Kirsher 	if (err) {
3230f7917c00SJeff Kirsher 		/* Just info, some other driver may have claimed the device. */
3231f7917c00SJeff Kirsher 		dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3232f7917c00SJeff Kirsher 		goto out_disable_device;
3233f7917c00SJeff Kirsher 	}
3234f7917c00SJeff Kirsher 
3235f7917c00SJeff Kirsher 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3236f7917c00SJeff Kirsher 		pci_using_dac = 1;
3237f7917c00SJeff Kirsher 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3238f7917c00SJeff Kirsher 		if (err) {
3239f7917c00SJeff Kirsher 			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3240f7917c00SJeff Kirsher 			       "coherent allocations\n");
3241f7917c00SJeff Kirsher 			goto out_release_regions;
3242f7917c00SJeff Kirsher 		}
3243f7917c00SJeff Kirsher 	} else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3244f7917c00SJeff Kirsher 		dev_err(&pdev->dev, "no usable DMA configuration\n");
3245f7917c00SJeff Kirsher 		goto out_release_regions;
3246f7917c00SJeff Kirsher 	}
3247f7917c00SJeff Kirsher 
3248f7917c00SJeff Kirsher 	pci_set_master(pdev);
3249f7917c00SJeff Kirsher 	pci_save_state(pdev);
3250f7917c00SJeff Kirsher 
3251f7917c00SJeff Kirsher 	mmio_start = pci_resource_start(pdev, 0);
3252f7917c00SJeff Kirsher 	mmio_len = pci_resource_len(pdev, 0);
3253f7917c00SJeff Kirsher 	ai = t3_get_adapter_info(ent->driver_data);
3254f7917c00SJeff Kirsher 
3255f7917c00SJeff Kirsher 	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3256f7917c00SJeff Kirsher 	if (!adapter) {
3257f7917c00SJeff Kirsher 		err = -ENOMEM;
3258f7917c00SJeff Kirsher 		goto out_release_regions;
3259f7917c00SJeff Kirsher 	}
3260f7917c00SJeff Kirsher 
3261f7917c00SJeff Kirsher 	adapter->nofail_skb =
3262f7917c00SJeff Kirsher 		alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3263f7917c00SJeff Kirsher 	if (!adapter->nofail_skb) {
3264f7917c00SJeff Kirsher 		dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3265f7917c00SJeff Kirsher 		err = -ENOMEM;
3266f7917c00SJeff Kirsher 		goto out_free_adapter;
3267f7917c00SJeff Kirsher 	}
3268f7917c00SJeff Kirsher 
3269f7917c00SJeff Kirsher 	adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3270f7917c00SJeff Kirsher 	if (!adapter->regs) {
3271f7917c00SJeff Kirsher 		dev_err(&pdev->dev, "cannot map device registers\n");
3272f7917c00SJeff Kirsher 		err = -ENOMEM;
3273f7917c00SJeff Kirsher 		goto out_free_adapter;
3274f7917c00SJeff Kirsher 	}
3275f7917c00SJeff Kirsher 
3276f7917c00SJeff Kirsher 	adapter->pdev = pdev;
3277f7917c00SJeff Kirsher 	adapter->name = pci_name(pdev);
3278f7917c00SJeff Kirsher 	adapter->msg_enable = dflt_msg_enable;
3279f7917c00SJeff Kirsher 	adapter->mmio_len = mmio_len;
3280f7917c00SJeff Kirsher 
3281f7917c00SJeff Kirsher 	mutex_init(&adapter->mdio_lock);
3282f7917c00SJeff Kirsher 	spin_lock_init(&adapter->work_lock);
3283f7917c00SJeff Kirsher 	spin_lock_init(&adapter->stats_lock);
3284f7917c00SJeff Kirsher 
3285f7917c00SJeff Kirsher 	INIT_LIST_HEAD(&adapter->adapter_list);
3286f7917c00SJeff Kirsher 	INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3287f7917c00SJeff Kirsher 	INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3288f7917c00SJeff Kirsher 
3289f7917c00SJeff Kirsher 	INIT_WORK(&adapter->db_full_task, db_full_task);
3290f7917c00SJeff Kirsher 	INIT_WORK(&adapter->db_empty_task, db_empty_task);
3291f7917c00SJeff Kirsher 	INIT_WORK(&adapter->db_drop_task, db_drop_task);
3292f7917c00SJeff Kirsher 
3293f7917c00SJeff Kirsher 	INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3294f7917c00SJeff Kirsher 
3295f7917c00SJeff Kirsher 	for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3296f7917c00SJeff Kirsher 		struct net_device *netdev;
3297f7917c00SJeff Kirsher 
3298f7917c00SJeff Kirsher 		netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3299f7917c00SJeff Kirsher 		if (!netdev) {
3300f7917c00SJeff Kirsher 			err = -ENOMEM;
3301f7917c00SJeff Kirsher 			goto out_free_dev;
3302f7917c00SJeff Kirsher 		}
3303f7917c00SJeff Kirsher 
3304f7917c00SJeff Kirsher 		SET_NETDEV_DEV(netdev, &pdev->dev);
3305f7917c00SJeff Kirsher 
3306f7917c00SJeff Kirsher 		adapter->port[i] = netdev;
3307f7917c00SJeff Kirsher 		pi = netdev_priv(netdev);
3308f7917c00SJeff Kirsher 		pi->adapter = adapter;
3309f7917c00SJeff Kirsher 		pi->port_id = i;
3310f7917c00SJeff Kirsher 		netif_carrier_off(netdev);
3311f7917c00SJeff Kirsher 		netdev->irq = pdev->irq;
3312f7917c00SJeff Kirsher 		netdev->mem_start = mmio_start;
3313f7917c00SJeff Kirsher 		netdev->mem_end = mmio_start + mmio_len - 1;
3314f7917c00SJeff Kirsher 		netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3315f646968fSPatrick McHardy 			NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
3316f646968fSPatrick McHardy 		netdev->features |= netdev->hw_features |
3317f646968fSPatrick McHardy 				    NETIF_F_HW_VLAN_CTAG_TX;
33181d962ecfSbrenohl@br.ibm.com 		netdev->vlan_features |= netdev->features & VLAN_FEAT;
3319f7917c00SJeff Kirsher 		if (pci_using_dac)
3320f7917c00SJeff Kirsher 			netdev->features |= NETIF_F_HIGHDMA;
3321f7917c00SJeff Kirsher 
3322f7917c00SJeff Kirsher 		netdev->netdev_ops = &cxgb_netdev_ops;
33237ad24ea4SWilfried Klaebe 		netdev->ethtool_ops = &cxgb_ethtool_ops;
3324d894be57SJarod Wilson 		netdev->min_mtu = 81;
3325d894be57SJarod Wilson 		netdev->max_mtu = ETH_MAX_MTU;
33268fc79766SArjun Vynipadath 		netdev->dev_port = pi->port_id;
3327f7917c00SJeff Kirsher 	}
3328f7917c00SJeff Kirsher 
3329f7917c00SJeff Kirsher 	pci_set_drvdata(pdev, adapter);
3330f7917c00SJeff Kirsher 	if (t3_prep_adapter(adapter, ai, 1) < 0) {
3331f7917c00SJeff Kirsher 		err = -ENODEV;
3332f7917c00SJeff Kirsher 		goto out_free_dev;
3333f7917c00SJeff Kirsher 	}
3334f7917c00SJeff Kirsher 
3335f7917c00SJeff Kirsher 	/*
3336f7917c00SJeff Kirsher 	 * The card is now ready to go.  If any errors occur during device
3337f7917c00SJeff Kirsher 	 * registration we do not fail the whole card but rather proceed only
3338f7917c00SJeff Kirsher 	 * with the ports we manage to register successfully.  However we must
3339f7917c00SJeff Kirsher 	 * register at least one net device.
3340f7917c00SJeff Kirsher 	 */
3341f7917c00SJeff Kirsher 	for_each_port(adapter, i) {
3342f7917c00SJeff Kirsher 		err = register_netdev(adapter->port[i]);
3343f7917c00SJeff Kirsher 		if (err)
3344f7917c00SJeff Kirsher 			dev_warn(&pdev->dev,
3345f7917c00SJeff Kirsher 				 "cannot register net device %s, skipping\n",
3346f7917c00SJeff Kirsher 				 adapter->port[i]->name);
3347f7917c00SJeff Kirsher 		else {
3348f7917c00SJeff Kirsher 			/*
3349f7917c00SJeff Kirsher 			 * Change the name we use for messages to the name of
3350f7917c00SJeff Kirsher 			 * the first successfully registered interface.
3351f7917c00SJeff Kirsher 			 */
3352f7917c00SJeff Kirsher 			if (!adapter->registered_device_map)
3353f7917c00SJeff Kirsher 				adapter->name = adapter->port[i]->name;
3354f7917c00SJeff Kirsher 
3355f7917c00SJeff Kirsher 			__set_bit(i, &adapter->registered_device_map);
3356f7917c00SJeff Kirsher 		}
3357f7917c00SJeff Kirsher 	}
3358f7917c00SJeff Kirsher 	if (!adapter->registered_device_map) {
3359f7917c00SJeff Kirsher 		dev_err(&pdev->dev, "could not register any net devices\n");
3360f7917c00SJeff Kirsher 		goto out_free_dev;
3361f7917c00SJeff Kirsher 	}
3362f7917c00SJeff Kirsher 
3363f7917c00SJeff Kirsher 	for_each_port(adapter, i)
3364f7917c00SJeff Kirsher 		cxgb3_init_iscsi_mac(adapter->port[i]);
3365f7917c00SJeff Kirsher 
3366f7917c00SJeff Kirsher 	/* Driver's ready. Reflect it on LEDs */
3367f7917c00SJeff Kirsher 	t3_led_ready(adapter);
3368f7917c00SJeff Kirsher 
3369f7917c00SJeff Kirsher 	if (is_offload(adapter)) {
3370f7917c00SJeff Kirsher 		__set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3371f7917c00SJeff Kirsher 		cxgb3_adapter_ofld(adapter);
3372f7917c00SJeff Kirsher 	}
3373f7917c00SJeff Kirsher 
3374f7917c00SJeff Kirsher 	/* See what interrupts we'll be using */
3375f7917c00SJeff Kirsher 	if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3376f7917c00SJeff Kirsher 		adapter->flags |= USING_MSIX;
3377f7917c00SJeff Kirsher 	else if (msi > 0 && pci_enable_msi(pdev) == 0)
3378f7917c00SJeff Kirsher 		adapter->flags |= USING_MSI;
3379f7917c00SJeff Kirsher 
3380f7917c00SJeff Kirsher 	set_nqsets(adapter);
3381f7917c00SJeff Kirsher 
3382f7917c00SJeff Kirsher 	err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3383f7917c00SJeff Kirsher 				 &cxgb3_attr_group);
33847c099773SZhouyang Jia 	if (err) {
33857c099773SZhouyang Jia 		dev_err(&pdev->dev, "cannot create sysfs group\n");
33867c099773SZhouyang Jia 		goto out_close_led;
33877c099773SZhouyang Jia 	}
3388f7917c00SJeff Kirsher 
3389f7917c00SJeff Kirsher 	print_port_info(adapter, ai);
3390f7917c00SJeff Kirsher 	return 0;
3391f7917c00SJeff Kirsher 
33927c099773SZhouyang Jia out_close_led:
33937c099773SZhouyang Jia 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
33947c099773SZhouyang Jia 
3395f7917c00SJeff Kirsher out_free_dev:
3396f7917c00SJeff Kirsher 	iounmap(adapter->regs);
3397f7917c00SJeff Kirsher 	for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3398f7917c00SJeff Kirsher 		if (adapter->port[i])
3399f7917c00SJeff Kirsher 			free_netdev(adapter->port[i]);
3400f7917c00SJeff Kirsher 
3401f7917c00SJeff Kirsher out_free_adapter:
3402f7917c00SJeff Kirsher 	kfree(adapter);
3403f7917c00SJeff Kirsher 
3404f7917c00SJeff Kirsher out_release_regions:
3405f7917c00SJeff Kirsher 	pci_release_regions(pdev);
3406f7917c00SJeff Kirsher out_disable_device:
3407f7917c00SJeff Kirsher 	pci_disable_device(pdev);
3408f7917c00SJeff Kirsher out:
3409f7917c00SJeff Kirsher 	return err;
3410f7917c00SJeff Kirsher }
3411f7917c00SJeff Kirsher 
34122109eaabSBill Pemberton static void remove_one(struct pci_dev *pdev)
3413f7917c00SJeff Kirsher {
3414f7917c00SJeff Kirsher 	struct adapter *adapter = pci_get_drvdata(pdev);
3415f7917c00SJeff Kirsher 
3416f7917c00SJeff Kirsher 	if (adapter) {
3417f7917c00SJeff Kirsher 		int i;
3418f7917c00SJeff Kirsher 
3419f7917c00SJeff Kirsher 		t3_sge_stop(adapter);
3420f7917c00SJeff Kirsher 		sysfs_remove_group(&adapter->port[0]->dev.kobj,
3421f7917c00SJeff Kirsher 				   &cxgb3_attr_group);
3422f7917c00SJeff Kirsher 
3423f7917c00SJeff Kirsher 		if (is_offload(adapter)) {
3424f7917c00SJeff Kirsher 			cxgb3_adapter_unofld(adapter);
3425f7917c00SJeff Kirsher 			if (test_bit(OFFLOAD_DEVMAP_BIT,
3426f7917c00SJeff Kirsher 				     &adapter->open_device_map))
3427f7917c00SJeff Kirsher 				offload_close(&adapter->tdev);
3428f7917c00SJeff Kirsher 		}
3429f7917c00SJeff Kirsher 
3430f7917c00SJeff Kirsher 		for_each_port(adapter, i)
3431f7917c00SJeff Kirsher 		    if (test_bit(i, &adapter->registered_device_map))
3432f7917c00SJeff Kirsher 			unregister_netdev(adapter->port[i]);
3433f7917c00SJeff Kirsher 
3434f7917c00SJeff Kirsher 		t3_stop_sge_timers(adapter);
3435f7917c00SJeff Kirsher 		t3_free_sge_resources(adapter);
3436f7917c00SJeff Kirsher 		cxgb_disable_msi(adapter);
3437f7917c00SJeff Kirsher 
3438f7917c00SJeff Kirsher 		for_each_port(adapter, i)
3439f7917c00SJeff Kirsher 			if (adapter->port[i])
3440f7917c00SJeff Kirsher 				free_netdev(adapter->port[i]);
3441f7917c00SJeff Kirsher 
3442f7917c00SJeff Kirsher 		iounmap(adapter->regs);
3443f7917c00SJeff Kirsher 		if (adapter->nofail_skb)
3444f7917c00SJeff Kirsher 			kfree_skb(adapter->nofail_skb);
3445f7917c00SJeff Kirsher 		kfree(adapter);
3446f7917c00SJeff Kirsher 		pci_release_regions(pdev);
3447f7917c00SJeff Kirsher 		pci_disable_device(pdev);
3448f7917c00SJeff Kirsher 	}
3449f7917c00SJeff Kirsher }
3450f7917c00SJeff Kirsher 
3451f7917c00SJeff Kirsher static struct pci_driver driver = {
3452f7917c00SJeff Kirsher 	.name = DRV_NAME,
3453f7917c00SJeff Kirsher 	.id_table = cxgb3_pci_tbl,
3454f7917c00SJeff Kirsher 	.probe = init_one,
34552109eaabSBill Pemberton 	.remove = remove_one,
3456f7917c00SJeff Kirsher 	.err_handler = &t3_err_handler,
3457f7917c00SJeff Kirsher };
3458f7917c00SJeff Kirsher 
3459f7917c00SJeff Kirsher static int __init cxgb3_init_module(void)
3460f7917c00SJeff Kirsher {
3461f7917c00SJeff Kirsher 	int ret;
3462f7917c00SJeff Kirsher 
3463f7917c00SJeff Kirsher 	cxgb3_offload_init();
3464f7917c00SJeff Kirsher 
3465f7917c00SJeff Kirsher 	ret = pci_register_driver(&driver);
3466f7917c00SJeff Kirsher 	return ret;
3467f7917c00SJeff Kirsher }
3468f7917c00SJeff Kirsher 
3469f7917c00SJeff Kirsher static void __exit cxgb3_cleanup_module(void)
3470f7917c00SJeff Kirsher {
3471f7917c00SJeff Kirsher 	pci_unregister_driver(&driver);
3472f7917c00SJeff Kirsher 	if (cxgb3_wq)
3473f7917c00SJeff Kirsher 		destroy_workqueue(cxgb3_wq);
3474f7917c00SJeff Kirsher }
3475f7917c00SJeff Kirsher 
3476f7917c00SJeff Kirsher module_init(cxgb3_init_module);
3477f7917c00SJeff Kirsher module_exit(cxgb3_cleanup_module);
3478