1 /*
2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/if_vlan.h>
42 #include <linux/mdio.h>
43 #include <linux/sockios.h>
44 #include <linux/workqueue.h>
45 #include <linux/proc_fs.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/firmware.h>
48 #include <linux/log2.h>
49 #include <linux/stringify.h>
50 #include <linux/sched.h>
51 #include <linux/slab.h>
52 #include <linux/uaccess.h>
53 #include <linux/nospec.h>
54 
55 #include "common.h"
56 #include "cxgb3_ioctl.h"
57 #include "regs.h"
58 #include "cxgb3_offload.h"
59 #include "version.h"
60 
61 #include "cxgb3_ctl_defs.h"
62 #include "t3_cpl.h"
63 #include "firmware_exports.h"
64 
65 enum {
66 	MAX_TXQ_ENTRIES = 16384,
67 	MAX_CTRL_TXQ_ENTRIES = 1024,
68 	MAX_RSPQ_ENTRIES = 16384,
69 	MAX_RX_BUFFERS = 16384,
70 	MAX_RX_JUMBO_BUFFERS = 16384,
71 	MIN_TXQ_ENTRIES = 4,
72 	MIN_CTRL_TXQ_ENTRIES = 4,
73 	MIN_RSPQ_ENTRIES = 32,
74 	MIN_FL_ENTRIES = 32
75 };
76 
77 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
78 
79 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
80 			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
81 			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
82 
83 #define EEPROM_MAGIC 0x38E2F10C
84 
85 #define CH_DEVICE(devid, idx) \
86 	{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
87 
88 static const struct pci_device_id cxgb3_pci_tbl[] = {
89 	CH_DEVICE(0x20, 0),	/* PE9000 */
90 	CH_DEVICE(0x21, 1),	/* T302E */
91 	CH_DEVICE(0x22, 2),	/* T310E */
92 	CH_DEVICE(0x23, 3),	/* T320X */
93 	CH_DEVICE(0x24, 1),	/* T302X */
94 	CH_DEVICE(0x25, 3),	/* T320E */
95 	CH_DEVICE(0x26, 2),	/* T310X */
96 	CH_DEVICE(0x30, 2),	/* T3B10 */
97 	CH_DEVICE(0x31, 3),	/* T3B20 */
98 	CH_DEVICE(0x32, 1),	/* T3B02 */
99 	CH_DEVICE(0x35, 6),	/* T3C20-derived T3C10 */
100 	CH_DEVICE(0x36, 3),	/* S320E-CR */
101 	CH_DEVICE(0x37, 7),	/* N320E-G2 */
102 	{0,}
103 };
104 
105 MODULE_DESCRIPTION(DRV_DESC);
106 MODULE_AUTHOR("Chelsio Communications");
107 MODULE_LICENSE("Dual BSD/GPL");
108 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
109 
110 static int dflt_msg_enable = DFLT_MSG_ENABLE;
111 
112 module_param(dflt_msg_enable, int, 0644);
113 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
114 
115 /*
116  * The driver uses the best interrupt scheme available on a platform in the
117  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
118  * of these schemes the driver may consider as follows:
119  *
120  * msi = 2: choose from among all three options
121  * msi = 1: only consider MSI and pin interrupts
122  * msi = 0: force pin interrupts
123  */
124 static int msi = 2;
125 
126 module_param(msi, int, 0644);
127 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
128 
129 /*
130  * The driver enables offload as a default.
131  * To disable it, use ofld_disable = 1.
132  */
133 
134 static int ofld_disable = 0;
135 
136 module_param(ofld_disable, int, 0644);
137 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
138 
139 /*
140  * We have work elements that we need to cancel when an interface is taken
141  * down.  Normally the work elements would be executed by keventd but that
142  * can deadlock because of linkwatch.  If our close method takes the rtnl
143  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
144  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
145  * for our work to complete.  Get our own work queue to solve this.
146  */
147 struct workqueue_struct *cxgb3_wq;
148 
149 /**
150  *	link_report - show link status and link speed/duplex
151  *	@dev: the port whose settings are to be reported
152  *
153  *	Shows the link status, speed, and duplex of a port.
154  */
155 static void link_report(struct net_device *dev)
156 {
157 	if (!netif_carrier_ok(dev))
158 		netdev_info(dev, "link down\n");
159 	else {
160 		const char *s = "10Mbps";
161 		const struct port_info *p = netdev_priv(dev);
162 
163 		switch (p->link_config.speed) {
164 		case SPEED_10000:
165 			s = "10Gbps";
166 			break;
167 		case SPEED_1000:
168 			s = "1000Mbps";
169 			break;
170 		case SPEED_100:
171 			s = "100Mbps";
172 			break;
173 		}
174 
175 		netdev_info(dev, "link up, %s, %s-duplex\n",
176 			    s, p->link_config.duplex == DUPLEX_FULL
177 			    ? "full" : "half");
178 	}
179 }
180 
181 static void enable_tx_fifo_drain(struct adapter *adapter,
182 				 struct port_info *pi)
183 {
184 	t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
185 			 F_ENDROPPKT);
186 	t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
187 	t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
188 	t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
189 }
190 
191 static void disable_tx_fifo_drain(struct adapter *adapter,
192 				  struct port_info *pi)
193 {
194 	t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
195 			 F_ENDROPPKT, 0);
196 }
197 
198 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
199 {
200 	struct net_device *dev = adap->port[port_id];
201 	struct port_info *pi = netdev_priv(dev);
202 
203 	if (state == netif_carrier_ok(dev))
204 		return;
205 
206 	if (state) {
207 		struct cmac *mac = &pi->mac;
208 
209 		netif_carrier_on(dev);
210 
211 		disable_tx_fifo_drain(adap, pi);
212 
213 		/* Clear local faults */
214 		t3_xgm_intr_disable(adap, pi->port_id);
215 		t3_read_reg(adap, A_XGM_INT_STATUS +
216 				    pi->mac.offset);
217 		t3_write_reg(adap,
218 			     A_XGM_INT_CAUSE + pi->mac.offset,
219 			     F_XGM_INT);
220 
221 		t3_set_reg_field(adap,
222 				 A_XGM_INT_ENABLE +
223 				 pi->mac.offset,
224 				 F_XGM_INT, F_XGM_INT);
225 		t3_xgm_intr_enable(adap, pi->port_id);
226 
227 		t3_mac_enable(mac, MAC_DIRECTION_TX);
228 	} else {
229 		netif_carrier_off(dev);
230 
231 		/* Flush TX FIFO */
232 		enable_tx_fifo_drain(adap, pi);
233 	}
234 	link_report(dev);
235 }
236 
237 /**
238  *	t3_os_link_changed - handle link status changes
239  *	@adapter: the adapter associated with the link change
240  *	@port_id: the port index whose limk status has changed
241  *	@link_stat: the new status of the link
242  *	@speed: the new speed setting
243  *	@duplex: the new duplex setting
244  *	@pause: the new flow-control setting
245  *
246  *	This is the OS-dependent handler for link status changes.  The OS
247  *	neutral handler takes care of most of the processing for these events,
248  *	then calls this handler for any OS-specific processing.
249  */
250 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
251 			int speed, int duplex, int pause)
252 {
253 	struct net_device *dev = adapter->port[port_id];
254 	struct port_info *pi = netdev_priv(dev);
255 	struct cmac *mac = &pi->mac;
256 
257 	/* Skip changes from disabled ports. */
258 	if (!netif_running(dev))
259 		return;
260 
261 	if (link_stat != netif_carrier_ok(dev)) {
262 		if (link_stat) {
263 			disable_tx_fifo_drain(adapter, pi);
264 
265 			t3_mac_enable(mac, MAC_DIRECTION_RX);
266 
267 			/* Clear local faults */
268 			t3_xgm_intr_disable(adapter, pi->port_id);
269 			t3_read_reg(adapter, A_XGM_INT_STATUS +
270 				    pi->mac.offset);
271 			t3_write_reg(adapter,
272 				     A_XGM_INT_CAUSE + pi->mac.offset,
273 				     F_XGM_INT);
274 
275 			t3_set_reg_field(adapter,
276 					 A_XGM_INT_ENABLE + pi->mac.offset,
277 					 F_XGM_INT, F_XGM_INT);
278 			t3_xgm_intr_enable(adapter, pi->port_id);
279 
280 			netif_carrier_on(dev);
281 		} else {
282 			netif_carrier_off(dev);
283 
284 			t3_xgm_intr_disable(adapter, pi->port_id);
285 			t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
286 			t3_set_reg_field(adapter,
287 					 A_XGM_INT_ENABLE + pi->mac.offset,
288 					 F_XGM_INT, 0);
289 
290 			if (is_10G(adapter))
291 				pi->phy.ops->power_down(&pi->phy, 1);
292 
293 			t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
294 			t3_mac_disable(mac, MAC_DIRECTION_RX);
295 			t3_link_start(&pi->phy, mac, &pi->link_config);
296 
297 			/* Flush TX FIFO */
298 			enable_tx_fifo_drain(adapter, pi);
299 		}
300 
301 		link_report(dev);
302 	}
303 }
304 
305 /**
306  *	t3_os_phymod_changed - handle PHY module changes
307  *	@adap: the adapter associated with the link change
308  *	@port_id: the port index whose limk status has changed
309  *
310  *	This is the OS-dependent handler for PHY module changes.  It is
311  *	invoked when a PHY module is removed or inserted for any OS-specific
312  *	processing.
313  */
314 void t3_os_phymod_changed(struct adapter *adap, int port_id)
315 {
316 	static const char *mod_str[] = {
317 		NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
318 	};
319 
320 	const struct net_device *dev = adap->port[port_id];
321 	const struct port_info *pi = netdev_priv(dev);
322 
323 	if (pi->phy.modtype == phy_modtype_none)
324 		netdev_info(dev, "PHY module unplugged\n");
325 	else
326 		netdev_info(dev, "%s PHY module inserted\n",
327 			    mod_str[pi->phy.modtype]);
328 }
329 
330 static void cxgb_set_rxmode(struct net_device *dev)
331 {
332 	struct port_info *pi = netdev_priv(dev);
333 
334 	t3_mac_set_rx_mode(&pi->mac, dev);
335 }
336 
337 /**
338  *	link_start - enable a port
339  *	@dev: the device to enable
340  *
341  *	Performs the MAC and PHY actions needed to enable a port.
342  */
343 static void link_start(struct net_device *dev)
344 {
345 	struct port_info *pi = netdev_priv(dev);
346 	struct cmac *mac = &pi->mac;
347 
348 	t3_mac_reset(mac);
349 	t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
350 	t3_mac_set_mtu(mac, dev->mtu);
351 	t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
352 	t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
353 	t3_mac_set_rx_mode(mac, dev);
354 	t3_link_start(&pi->phy, mac, &pi->link_config);
355 	t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
356 }
357 
358 static inline void cxgb_disable_msi(struct adapter *adapter)
359 {
360 	if (adapter->flags & USING_MSIX) {
361 		pci_disable_msix(adapter->pdev);
362 		adapter->flags &= ~USING_MSIX;
363 	} else if (adapter->flags & USING_MSI) {
364 		pci_disable_msi(adapter->pdev);
365 		adapter->flags &= ~USING_MSI;
366 	}
367 }
368 
369 /*
370  * Interrupt handler for asynchronous events used with MSI-X.
371  */
372 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
373 {
374 	t3_slow_intr_handler(cookie);
375 	return IRQ_HANDLED;
376 }
377 
378 /*
379  * Name the MSI-X interrupts.
380  */
381 static void name_msix_vecs(struct adapter *adap)
382 {
383 	int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
384 
385 	snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
386 	adap->msix_info[0].desc[n] = 0;
387 
388 	for_each_port(adap, j) {
389 		struct net_device *d = adap->port[j];
390 		const struct port_info *pi = netdev_priv(d);
391 
392 		for (i = 0; i < pi->nqsets; i++, msi_idx++) {
393 			snprintf(adap->msix_info[msi_idx].desc, n,
394 				 "%s-%d", d->name, pi->first_qset + i);
395 			adap->msix_info[msi_idx].desc[n] = 0;
396 		}
397 	}
398 }
399 
400 static int request_msix_data_irqs(struct adapter *adap)
401 {
402 	int i, j, err, qidx = 0;
403 
404 	for_each_port(adap, i) {
405 		int nqsets = adap2pinfo(adap, i)->nqsets;
406 
407 		for (j = 0; j < nqsets; ++j) {
408 			err = request_irq(adap->msix_info[qidx + 1].vec,
409 					  t3_intr_handler(adap,
410 							  adap->sge.qs[qidx].
411 							  rspq.polling), 0,
412 					  adap->msix_info[qidx + 1].desc,
413 					  &adap->sge.qs[qidx]);
414 			if (err) {
415 				while (--qidx >= 0)
416 					free_irq(adap->msix_info[qidx + 1].vec,
417 						 &adap->sge.qs[qidx]);
418 				return err;
419 			}
420 			qidx++;
421 		}
422 	}
423 	return 0;
424 }
425 
426 static void free_irq_resources(struct adapter *adapter)
427 {
428 	if (adapter->flags & USING_MSIX) {
429 		int i, n = 0;
430 
431 		free_irq(adapter->msix_info[0].vec, adapter);
432 		for_each_port(adapter, i)
433 			n += adap2pinfo(adapter, i)->nqsets;
434 
435 		for (i = 0; i < n; ++i)
436 			free_irq(adapter->msix_info[i + 1].vec,
437 				 &adapter->sge.qs[i]);
438 	} else
439 		free_irq(adapter->pdev->irq, adapter);
440 }
441 
442 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
443 			      unsigned long n)
444 {
445 	int attempts = 10;
446 
447 	while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
448 		if (!--attempts)
449 			return -ETIMEDOUT;
450 		msleep(10);
451 	}
452 	return 0;
453 }
454 
455 static int init_tp_parity(struct adapter *adap)
456 {
457 	int i;
458 	struct sk_buff *skb;
459 	struct cpl_set_tcb_field *greq;
460 	unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
461 
462 	t3_tp_set_offload_mode(adap, 1);
463 
464 	for (i = 0; i < 16; i++) {
465 		struct cpl_smt_write_req *req;
466 
467 		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
468 		if (!skb)
469 			skb = adap->nofail_skb;
470 		if (!skb)
471 			goto alloc_skb_fail;
472 
473 		req = __skb_put_zero(skb, sizeof(*req));
474 		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
475 		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
476 		req->mtu_idx = NMTUS - 1;
477 		req->iff = i;
478 		t3_mgmt_tx(adap, skb);
479 		if (skb == adap->nofail_skb) {
480 			await_mgmt_replies(adap, cnt, i + 1);
481 			adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
482 			if (!adap->nofail_skb)
483 				goto alloc_skb_fail;
484 		}
485 	}
486 
487 	for (i = 0; i < 2048; i++) {
488 		struct cpl_l2t_write_req *req;
489 
490 		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
491 		if (!skb)
492 			skb = adap->nofail_skb;
493 		if (!skb)
494 			goto alloc_skb_fail;
495 
496 		req = __skb_put_zero(skb, sizeof(*req));
497 		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
498 		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
499 		req->params = htonl(V_L2T_W_IDX(i));
500 		t3_mgmt_tx(adap, skb);
501 		if (skb == adap->nofail_skb) {
502 			await_mgmt_replies(adap, cnt, 16 + i + 1);
503 			adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
504 			if (!adap->nofail_skb)
505 				goto alloc_skb_fail;
506 		}
507 	}
508 
509 	for (i = 0; i < 2048; i++) {
510 		struct cpl_rte_write_req *req;
511 
512 		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
513 		if (!skb)
514 			skb = adap->nofail_skb;
515 		if (!skb)
516 			goto alloc_skb_fail;
517 
518 		req = __skb_put_zero(skb, sizeof(*req));
519 		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
520 		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
521 		req->l2t_idx = htonl(V_L2T_W_IDX(i));
522 		t3_mgmt_tx(adap, skb);
523 		if (skb == adap->nofail_skb) {
524 			await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
525 			adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
526 			if (!adap->nofail_skb)
527 				goto alloc_skb_fail;
528 		}
529 	}
530 
531 	skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
532 	if (!skb)
533 		skb = adap->nofail_skb;
534 	if (!skb)
535 		goto alloc_skb_fail;
536 
537 	greq = __skb_put_zero(skb, sizeof(*greq));
538 	greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
539 	OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
540 	greq->mask = cpu_to_be64(1);
541 	t3_mgmt_tx(adap, skb);
542 
543 	i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
544 	if (skb == adap->nofail_skb) {
545 		i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
546 		adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
547 	}
548 
549 	t3_tp_set_offload_mode(adap, 0);
550 	return i;
551 
552 alloc_skb_fail:
553 	t3_tp_set_offload_mode(adap, 0);
554 	return -ENOMEM;
555 }
556 
557 /**
558  *	setup_rss - configure RSS
559  *	@adap: the adapter
560  *
561  *	Sets up RSS to distribute packets to multiple receive queues.  We
562  *	configure the RSS CPU lookup table to distribute to the number of HW
563  *	receive queues, and the response queue lookup table to narrow that
564  *	down to the response queues actually configured for each port.
565  *	We always configure the RSS mapping for two ports since the mapping
566  *	table has plenty of entries.
567  */
568 static void setup_rss(struct adapter *adap)
569 {
570 	int i;
571 	unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
572 	unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
573 	u8 cpus[SGE_QSETS + 1];
574 	u16 rspq_map[RSS_TABLE_SIZE + 1];
575 
576 	for (i = 0; i < SGE_QSETS; ++i)
577 		cpus[i] = i;
578 	cpus[SGE_QSETS] = 0xff;	/* terminator */
579 
580 	for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
581 		rspq_map[i] = i % nq0;
582 		rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
583 	}
584 	rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
585 
586 	t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
587 		      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
588 		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
589 }
590 
591 static void ring_dbs(struct adapter *adap)
592 {
593 	int i, j;
594 
595 	for (i = 0; i < SGE_QSETS; i++) {
596 		struct sge_qset *qs = &adap->sge.qs[i];
597 
598 		if (qs->adap)
599 			for (j = 0; j < SGE_TXQ_PER_SET; j++)
600 				t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
601 	}
602 }
603 
604 static void init_napi(struct adapter *adap)
605 {
606 	int i;
607 
608 	for (i = 0; i < SGE_QSETS; i++) {
609 		struct sge_qset *qs = &adap->sge.qs[i];
610 
611 		if (qs->adap)
612 			netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll);
613 	}
614 
615 	/*
616 	 * netif_napi_add() can be called only once per napi_struct because it
617 	 * adds each new napi_struct to a list.  Be careful not to call it a
618 	 * second time, e.g., during EEH recovery, by making a note of it.
619 	 */
620 	adap->flags |= NAPI_INIT;
621 }
622 
623 /*
624  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
625  * both netdevices representing interfaces and the dummy ones for the extra
626  * queues.
627  */
628 static void quiesce_rx(struct adapter *adap)
629 {
630 	int i;
631 
632 	for (i = 0; i < SGE_QSETS; i++)
633 		if (adap->sge.qs[i].adap)
634 			napi_disable(&adap->sge.qs[i].napi);
635 }
636 
637 static void enable_all_napi(struct adapter *adap)
638 {
639 	int i;
640 	for (i = 0; i < SGE_QSETS; i++)
641 		if (adap->sge.qs[i].adap)
642 			napi_enable(&adap->sge.qs[i].napi);
643 }
644 
645 /**
646  *	setup_sge_qsets - configure SGE Tx/Rx/response queues
647  *	@adap: the adapter
648  *
649  *	Determines how many sets of SGE queues to use and initializes them.
650  *	We support multiple queue sets per port if we have MSI-X, otherwise
651  *	just one queue set per port.
652  */
653 static int setup_sge_qsets(struct adapter *adap)
654 {
655 	int i, j, err, irq_idx = 0, qset_idx = 0;
656 	unsigned int ntxq = SGE_TXQ_PER_SET;
657 
658 	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
659 		irq_idx = -1;
660 
661 	for_each_port(adap, i) {
662 		struct net_device *dev = adap->port[i];
663 		struct port_info *pi = netdev_priv(dev);
664 
665 		pi->qs = &adap->sge.qs[pi->first_qset];
666 		for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
667 			err = t3_sge_alloc_qset(adap, qset_idx, 1,
668 				(adap->flags & USING_MSIX) ? qset_idx + 1 :
669 							     irq_idx,
670 				&adap->params.sge.qset[qset_idx], ntxq, dev,
671 				netdev_get_tx_queue(dev, j));
672 			if (err) {
673 				t3_free_sge_resources(adap);
674 				return err;
675 			}
676 		}
677 	}
678 
679 	return 0;
680 }
681 
682 static ssize_t attr_show(struct device *d, char *buf,
683 			 ssize_t(*format) (struct net_device *, char *))
684 {
685 	ssize_t len;
686 
687 	/* Synchronize with ioctls that may shut down the device */
688 	rtnl_lock();
689 	len = (*format) (to_net_dev(d), buf);
690 	rtnl_unlock();
691 	return len;
692 }
693 
694 static ssize_t attr_store(struct device *d,
695 			  const char *buf, size_t len,
696 			  ssize_t(*set) (struct net_device *, unsigned int),
697 			  unsigned int min_val, unsigned int max_val)
698 {
699 	ssize_t ret;
700 	unsigned int val;
701 
702 	if (!capable(CAP_NET_ADMIN))
703 		return -EPERM;
704 
705 	ret = kstrtouint(buf, 0, &val);
706 	if (ret)
707 		return ret;
708 	if (val < min_val || val > max_val)
709 		return -EINVAL;
710 
711 	rtnl_lock();
712 	ret = (*set) (to_net_dev(d), val);
713 	if (!ret)
714 		ret = len;
715 	rtnl_unlock();
716 	return ret;
717 }
718 
719 #define CXGB3_SHOW(name, val_expr) \
720 static ssize_t format_##name(struct net_device *dev, char *buf) \
721 { \
722 	struct port_info *pi = netdev_priv(dev); \
723 	struct adapter *adap = pi->adapter; \
724 	return sprintf(buf, "%u\n", val_expr); \
725 } \
726 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
727 			   char *buf) \
728 { \
729 	return attr_show(d, buf, format_##name); \
730 }
731 
732 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
733 {
734 	struct port_info *pi = netdev_priv(dev);
735 	struct adapter *adap = pi->adapter;
736 	int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
737 
738 	if (adap->flags & FULL_INIT_DONE)
739 		return -EBUSY;
740 	if (val && adap->params.rev == 0)
741 		return -EINVAL;
742 	if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
743 	    min_tids)
744 		return -EINVAL;
745 	adap->params.mc5.nfilters = val;
746 	return 0;
747 }
748 
749 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
750 			      const char *buf, size_t len)
751 {
752 	return attr_store(d, buf, len, set_nfilters, 0, ~0);
753 }
754 
755 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
756 {
757 	struct port_info *pi = netdev_priv(dev);
758 	struct adapter *adap = pi->adapter;
759 
760 	if (adap->flags & FULL_INIT_DONE)
761 		return -EBUSY;
762 	if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
763 	    MC5_MIN_TIDS)
764 		return -EINVAL;
765 	adap->params.mc5.nservers = val;
766 	return 0;
767 }
768 
769 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
770 			      const char *buf, size_t len)
771 {
772 	return attr_store(d, buf, len, set_nservers, 0, ~0);
773 }
774 
775 #define CXGB3_ATTR_R(name, val_expr) \
776 CXGB3_SHOW(name, val_expr) \
777 static DEVICE_ATTR(name, 0444, show_##name, NULL)
778 
779 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
780 CXGB3_SHOW(name, val_expr) \
781 static DEVICE_ATTR(name, 0644, show_##name, store_method)
782 
783 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
784 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
785 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
786 
787 static struct attribute *cxgb3_attrs[] = {
788 	&dev_attr_cam_size.attr,
789 	&dev_attr_nfilters.attr,
790 	&dev_attr_nservers.attr,
791 	NULL
792 };
793 
794 static const struct attribute_group cxgb3_attr_group = {
795 	.attrs = cxgb3_attrs,
796 };
797 
798 static ssize_t tm_attr_show(struct device *d,
799 			    char *buf, int sched)
800 {
801 	struct port_info *pi = netdev_priv(to_net_dev(d));
802 	struct adapter *adap = pi->adapter;
803 	unsigned int v, addr, bpt, cpt;
804 	ssize_t len;
805 
806 	addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
807 	rtnl_lock();
808 	t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
809 	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
810 	if (sched & 1)
811 		v >>= 16;
812 	bpt = (v >> 8) & 0xff;
813 	cpt = v & 0xff;
814 	if (!cpt)
815 		len = sprintf(buf, "disabled\n");
816 	else {
817 		v = (adap->params.vpd.cclk * 1000) / cpt;
818 		len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
819 	}
820 	rtnl_unlock();
821 	return len;
822 }
823 
824 static ssize_t tm_attr_store(struct device *d,
825 			     const char *buf, size_t len, int sched)
826 {
827 	struct port_info *pi = netdev_priv(to_net_dev(d));
828 	struct adapter *adap = pi->adapter;
829 	unsigned int val;
830 	ssize_t ret;
831 
832 	if (!capable(CAP_NET_ADMIN))
833 		return -EPERM;
834 
835 	ret = kstrtouint(buf, 0, &val);
836 	if (ret)
837 		return ret;
838 	if (val > 10000000)
839 		return -EINVAL;
840 
841 	rtnl_lock();
842 	ret = t3_config_sched(adap, val, sched);
843 	if (!ret)
844 		ret = len;
845 	rtnl_unlock();
846 	return ret;
847 }
848 
849 #define TM_ATTR(name, sched) \
850 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
851 			   char *buf) \
852 { \
853 	return tm_attr_show(d, buf, sched); \
854 } \
855 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
856 			    const char *buf, size_t len) \
857 { \
858 	return tm_attr_store(d, buf, len, sched); \
859 } \
860 static DEVICE_ATTR(name, 0644, show_##name, store_##name)
861 
862 TM_ATTR(sched0, 0);
863 TM_ATTR(sched1, 1);
864 TM_ATTR(sched2, 2);
865 TM_ATTR(sched3, 3);
866 TM_ATTR(sched4, 4);
867 TM_ATTR(sched5, 5);
868 TM_ATTR(sched6, 6);
869 TM_ATTR(sched7, 7);
870 
871 static struct attribute *offload_attrs[] = {
872 	&dev_attr_sched0.attr,
873 	&dev_attr_sched1.attr,
874 	&dev_attr_sched2.attr,
875 	&dev_attr_sched3.attr,
876 	&dev_attr_sched4.attr,
877 	&dev_attr_sched5.attr,
878 	&dev_attr_sched6.attr,
879 	&dev_attr_sched7.attr,
880 	NULL
881 };
882 
883 static const struct attribute_group offload_attr_group = {
884 	.attrs = offload_attrs,
885 };
886 
887 /*
888  * Sends an sk_buff to an offload queue driver
889  * after dealing with any active network taps.
890  */
891 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
892 {
893 	int ret;
894 
895 	local_bh_disable();
896 	ret = t3_offload_tx(tdev, skb);
897 	local_bh_enable();
898 	return ret;
899 }
900 
901 static int write_smt_entry(struct adapter *adapter, int idx)
902 {
903 	struct cpl_smt_write_req *req;
904 	struct port_info *pi = netdev_priv(adapter->port[idx]);
905 	struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
906 
907 	if (!skb)
908 		return -ENOMEM;
909 
910 	req = __skb_put(skb, sizeof(*req));
911 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
912 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
913 	req->mtu_idx = NMTUS - 1;	/* should be 0 but there's a T3 bug */
914 	req->iff = idx;
915 	memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
916 	memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
917 	skb->priority = 1;
918 	offload_tx(&adapter->tdev, skb);
919 	return 0;
920 }
921 
922 static int init_smt(struct adapter *adapter)
923 {
924 	int i;
925 
926 	for_each_port(adapter, i)
927 	    write_smt_entry(adapter, i);
928 	return 0;
929 }
930 
931 static void init_port_mtus(struct adapter *adapter)
932 {
933 	unsigned int mtus = adapter->port[0]->mtu;
934 
935 	if (adapter->port[1])
936 		mtus |= adapter->port[1]->mtu << 16;
937 	t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
938 }
939 
940 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
941 			      int hi, int port)
942 {
943 	struct sk_buff *skb;
944 	struct mngt_pktsched_wr *req;
945 	int ret;
946 
947 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
948 	if (!skb)
949 		skb = adap->nofail_skb;
950 	if (!skb)
951 		return -ENOMEM;
952 
953 	req = skb_put(skb, sizeof(*req));
954 	req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
955 	req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
956 	req->sched = sched;
957 	req->idx = qidx;
958 	req->min = lo;
959 	req->max = hi;
960 	req->binding = port;
961 	ret = t3_mgmt_tx(adap, skb);
962 	if (skb == adap->nofail_skb) {
963 		adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
964 					     GFP_KERNEL);
965 		if (!adap->nofail_skb)
966 			ret = -ENOMEM;
967 	}
968 
969 	return ret;
970 }
971 
972 static int bind_qsets(struct adapter *adap)
973 {
974 	int i, j, err = 0;
975 
976 	for_each_port(adap, i) {
977 		const struct port_info *pi = adap2pinfo(adap, i);
978 
979 		for (j = 0; j < pi->nqsets; ++j) {
980 			int ret = send_pktsched_cmd(adap, 1,
981 						    pi->first_qset + j, -1,
982 						    -1, i);
983 			if (ret)
984 				err = ret;
985 		}
986 	}
987 
988 	return err;
989 }
990 
991 #define FW_VERSION __stringify(FW_VERSION_MAJOR) "."			\
992 	__stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
993 #define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
994 #define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "."		\
995 	__stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
996 #define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
997 #define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
998 #define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
999 #define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
1000 MODULE_FIRMWARE(FW_FNAME);
1001 MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
1002 MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
1003 MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
1004 MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
1005 MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
1006 
1007 static inline const char *get_edc_fw_name(int edc_idx)
1008 {
1009 	const char *fw_name = NULL;
1010 
1011 	switch (edc_idx) {
1012 	case EDC_OPT_AEL2005:
1013 		fw_name = AEL2005_OPT_EDC_NAME;
1014 		break;
1015 	case EDC_TWX_AEL2005:
1016 		fw_name = AEL2005_TWX_EDC_NAME;
1017 		break;
1018 	case EDC_TWX_AEL2020:
1019 		fw_name = AEL2020_TWX_EDC_NAME;
1020 		break;
1021 	}
1022 	return fw_name;
1023 }
1024 
1025 int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1026 {
1027 	struct adapter *adapter = phy->adapter;
1028 	const struct firmware *fw;
1029 	const char *fw_name;
1030 	u32 csum;
1031 	const __be32 *p;
1032 	u16 *cache = phy->phy_cache;
1033 	int i, ret = -EINVAL;
1034 
1035 	fw_name = get_edc_fw_name(edc_idx);
1036 	if (fw_name)
1037 		ret = request_firmware(&fw, fw_name, &adapter->pdev->dev);
1038 	if (ret < 0) {
1039 		dev_err(&adapter->pdev->dev,
1040 			"could not upgrade firmware: unable to load %s\n",
1041 			fw_name);
1042 		return ret;
1043 	}
1044 
1045 	/* check size, take checksum in account */
1046 	if (fw->size > size + 4) {
1047 		CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1048 		       (unsigned int)fw->size, size + 4);
1049 		ret = -EINVAL;
1050 	}
1051 
1052 	/* compute checksum */
1053 	p = (const __be32 *)fw->data;
1054 	for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1055 		csum += ntohl(p[i]);
1056 
1057 	if (csum != 0xffffffff) {
1058 		CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1059 		       csum);
1060 		ret = -EINVAL;
1061 	}
1062 
1063 	for (i = 0; i < size / 4 ; i++) {
1064 		*cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1065 		*cache++ = be32_to_cpu(p[i]) & 0xffff;
1066 	}
1067 
1068 	release_firmware(fw);
1069 
1070 	return ret;
1071 }
1072 
1073 static int upgrade_fw(struct adapter *adap)
1074 {
1075 	int ret;
1076 	const struct firmware *fw;
1077 	struct device *dev = &adap->pdev->dev;
1078 
1079 	ret = request_firmware(&fw, FW_FNAME, dev);
1080 	if (ret < 0) {
1081 		dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1082 			FW_FNAME);
1083 		return ret;
1084 	}
1085 	ret = t3_load_fw(adap, fw->data, fw->size);
1086 	release_firmware(fw);
1087 
1088 	if (ret == 0)
1089 		dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1090 			 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1091 	else
1092 		dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1093 			FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1094 
1095 	return ret;
1096 }
1097 
1098 static inline char t3rev2char(struct adapter *adapter)
1099 {
1100 	char rev = 0;
1101 
1102 	switch(adapter->params.rev) {
1103 	case T3_REV_B:
1104 	case T3_REV_B2:
1105 		rev = 'b';
1106 		break;
1107 	case T3_REV_C:
1108 		rev = 'c';
1109 		break;
1110 	}
1111 	return rev;
1112 }
1113 
1114 static int update_tpsram(struct adapter *adap)
1115 {
1116 	const struct firmware *tpsram;
1117 	char buf[64];
1118 	struct device *dev = &adap->pdev->dev;
1119 	int ret;
1120 	char rev;
1121 
1122 	rev = t3rev2char(adap);
1123 	if (!rev)
1124 		return 0;
1125 
1126 	snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1127 
1128 	ret = request_firmware(&tpsram, buf, dev);
1129 	if (ret < 0) {
1130 		dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1131 			buf);
1132 		return ret;
1133 	}
1134 
1135 	ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1136 	if (ret)
1137 		goto release_tpsram;
1138 
1139 	ret = t3_set_proto_sram(adap, tpsram->data);
1140 	if (ret == 0)
1141 		dev_info(dev,
1142 			 "successful update of protocol engine "
1143 			 "to %d.%d.%d\n",
1144 			 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1145 	else
1146 		dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1147 			TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1148 	if (ret)
1149 		dev_err(dev, "loading protocol SRAM failed\n");
1150 
1151 release_tpsram:
1152 	release_firmware(tpsram);
1153 
1154 	return ret;
1155 }
1156 
1157 /**
1158  * t3_synchronize_rx - wait for current Rx processing on a port to complete
1159  * @adap: the adapter
1160  * @p: the port
1161  *
1162  * Ensures that current Rx processing on any of the queues associated with
1163  * the given port completes before returning.  We do this by acquiring and
1164  * releasing the locks of the response queues associated with the port.
1165  */
1166 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1167 {
1168 	int i;
1169 
1170 	for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1171 		struct sge_rspq *q = &adap->sge.qs[i].rspq;
1172 
1173 		spin_lock_irq(&q->lock);
1174 		spin_unlock_irq(&q->lock);
1175 	}
1176 }
1177 
1178 static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
1179 {
1180 	struct port_info *pi = netdev_priv(dev);
1181 	struct adapter *adapter = pi->adapter;
1182 
1183 	if (adapter->params.rev > 0) {
1184 		t3_set_vlan_accel(adapter, 1 << pi->port_id,
1185 				  features & NETIF_F_HW_VLAN_CTAG_RX);
1186 	} else {
1187 		/* single control for all ports */
1188 		unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
1189 
1190 		for_each_port(adapter, i)
1191 			have_vlans |=
1192 				adapter->port[i]->features &
1193 				NETIF_F_HW_VLAN_CTAG_RX;
1194 
1195 		t3_set_vlan_accel(adapter, 1, have_vlans);
1196 	}
1197 	t3_synchronize_rx(adapter, pi);
1198 }
1199 
1200 /**
1201  *	cxgb_up - enable the adapter
1202  *	@adap: adapter being enabled
1203  *
1204  *	Called when the first port is enabled, this function performs the
1205  *	actions necessary to make an adapter operational, such as completing
1206  *	the initialization of HW modules, and enabling interrupts.
1207  *
1208  *	Must be called with the rtnl lock held.
1209  */
1210 static int cxgb_up(struct adapter *adap)
1211 {
1212 	int i, err;
1213 
1214 	if (!(adap->flags & FULL_INIT_DONE)) {
1215 		err = t3_check_fw_version(adap);
1216 		if (err == -EINVAL) {
1217 			err = upgrade_fw(adap);
1218 			CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1219 				FW_VERSION_MAJOR, FW_VERSION_MINOR,
1220 				FW_VERSION_MICRO, err ? "failed" : "succeeded");
1221 		}
1222 
1223 		err = t3_check_tpsram_version(adap);
1224 		if (err == -EINVAL) {
1225 			err = update_tpsram(adap);
1226 			CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1227 				TP_VERSION_MAJOR, TP_VERSION_MINOR,
1228 				TP_VERSION_MICRO, err ? "failed" : "succeeded");
1229 		}
1230 
1231 		/*
1232 		 * Clear interrupts now to catch errors if t3_init_hw fails.
1233 		 * We clear them again later as initialization may trigger
1234 		 * conditions that can interrupt.
1235 		 */
1236 		t3_intr_clear(adap);
1237 
1238 		err = t3_init_hw(adap, 0);
1239 		if (err)
1240 			goto out;
1241 
1242 		t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1243 		t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1244 
1245 		err = setup_sge_qsets(adap);
1246 		if (err)
1247 			goto out;
1248 
1249 		for_each_port(adap, i)
1250 			cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
1251 
1252 		setup_rss(adap);
1253 		if (!(adap->flags & NAPI_INIT))
1254 			init_napi(adap);
1255 
1256 		t3_start_sge_timers(adap);
1257 		adap->flags |= FULL_INIT_DONE;
1258 	}
1259 
1260 	t3_intr_clear(adap);
1261 
1262 	if (adap->flags & USING_MSIX) {
1263 		name_msix_vecs(adap);
1264 		err = request_irq(adap->msix_info[0].vec,
1265 				  t3_async_intr_handler, 0,
1266 				  adap->msix_info[0].desc, adap);
1267 		if (err)
1268 			goto irq_err;
1269 
1270 		err = request_msix_data_irqs(adap);
1271 		if (err) {
1272 			free_irq(adap->msix_info[0].vec, adap);
1273 			goto irq_err;
1274 		}
1275 	} else {
1276 		err = request_irq(adap->pdev->irq,
1277 				  t3_intr_handler(adap, adap->sge.qs[0].rspq.polling),
1278 				  (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
1279 				  adap->name, adap);
1280 		if (err)
1281 			goto irq_err;
1282 	}
1283 
1284 	enable_all_napi(adap);
1285 	t3_sge_start(adap);
1286 	t3_intr_enable(adap);
1287 
1288 	if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1289 	    is_offload(adap) && init_tp_parity(adap) == 0)
1290 		adap->flags |= TP_PARITY_INIT;
1291 
1292 	if (adap->flags & TP_PARITY_INIT) {
1293 		t3_write_reg(adap, A_TP_INT_CAUSE,
1294 			     F_CMCACHEPERR | F_ARPLUTPERR);
1295 		t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1296 	}
1297 
1298 	if (!(adap->flags & QUEUES_BOUND)) {
1299 		int ret = bind_qsets(adap);
1300 
1301 		if (ret < 0) {
1302 			CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1303 			t3_intr_disable(adap);
1304 			free_irq_resources(adap);
1305 			err = ret;
1306 			goto out;
1307 		}
1308 		adap->flags |= QUEUES_BOUND;
1309 	}
1310 
1311 out:
1312 	return err;
1313 irq_err:
1314 	CH_ERR(adap, "request_irq failed, err %d\n", err);
1315 	goto out;
1316 }
1317 
1318 /*
1319  * Release resources when all the ports and offloading have been stopped.
1320  */
1321 static void cxgb_down(struct adapter *adapter, int on_wq)
1322 {
1323 	t3_sge_stop(adapter);
1324 	spin_lock_irq(&adapter->work_lock);	/* sync with PHY intr task */
1325 	t3_intr_disable(adapter);
1326 	spin_unlock_irq(&adapter->work_lock);
1327 
1328 	free_irq_resources(adapter);
1329 	quiesce_rx(adapter);
1330 	t3_sge_stop(adapter);
1331 	if (!on_wq)
1332 		flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1333 }
1334 
1335 static void schedule_chk_task(struct adapter *adap)
1336 {
1337 	unsigned int timeo;
1338 
1339 	timeo = adap->params.linkpoll_period ?
1340 	    (HZ * adap->params.linkpoll_period) / 10 :
1341 	    adap->params.stats_update_period * HZ;
1342 	if (timeo)
1343 		queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1344 }
1345 
1346 static int offload_open(struct net_device *dev)
1347 {
1348 	struct port_info *pi = netdev_priv(dev);
1349 	struct adapter *adapter = pi->adapter;
1350 	struct t3cdev *tdev = dev2t3cdev(dev);
1351 	int adap_up = adapter->open_device_map & PORT_MASK;
1352 	int err;
1353 
1354 	if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1355 		return 0;
1356 
1357 	if (!adap_up && (err = cxgb_up(adapter)) < 0)
1358 		goto out;
1359 
1360 	t3_tp_set_offload_mode(adapter, 1);
1361 	tdev->lldev = adapter->port[0];
1362 	err = cxgb3_offload_activate(adapter);
1363 	if (err)
1364 		goto out;
1365 
1366 	init_port_mtus(adapter);
1367 	t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1368 		     adapter->params.b_wnd,
1369 		     adapter->params.rev == 0 ?
1370 		     adapter->port[0]->mtu : 0xffff);
1371 	init_smt(adapter);
1372 
1373 	if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1374 		dev_dbg(&dev->dev, "cannot create sysfs group\n");
1375 
1376 	/* Call back all registered clients */
1377 	cxgb3_add_clients(tdev);
1378 
1379 out:
1380 	/* restore them in case the offload module has changed them */
1381 	if (err) {
1382 		t3_tp_set_offload_mode(adapter, 0);
1383 		clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1384 		cxgb3_set_dummy_ops(tdev);
1385 	}
1386 	return err;
1387 }
1388 
1389 static int offload_close(struct t3cdev *tdev)
1390 {
1391 	struct adapter *adapter = tdev2adap(tdev);
1392 	struct t3c_data *td = T3C_DATA(tdev);
1393 
1394 	if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1395 		return 0;
1396 
1397 	/* Call back all registered clients */
1398 	cxgb3_remove_clients(tdev);
1399 
1400 	sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1401 
1402 	/* Flush work scheduled while releasing TIDs */
1403 	flush_work(&td->tid_release_task);
1404 
1405 	tdev->lldev = NULL;
1406 	cxgb3_set_dummy_ops(tdev);
1407 	t3_tp_set_offload_mode(adapter, 0);
1408 	clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1409 
1410 	if (!adapter->open_device_map)
1411 		cxgb_down(adapter, 0);
1412 
1413 	cxgb3_offload_deactivate(adapter);
1414 	return 0;
1415 }
1416 
1417 static int cxgb_open(struct net_device *dev)
1418 {
1419 	struct port_info *pi = netdev_priv(dev);
1420 	struct adapter *adapter = pi->adapter;
1421 	int other_ports = adapter->open_device_map & PORT_MASK;
1422 	int err;
1423 
1424 	if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1425 		return err;
1426 
1427 	set_bit(pi->port_id, &adapter->open_device_map);
1428 	if (is_offload(adapter) && !ofld_disable) {
1429 		err = offload_open(dev);
1430 		if (err)
1431 			pr_warn("Could not initialize offload capabilities\n");
1432 	}
1433 
1434 	netif_set_real_num_tx_queues(dev, pi->nqsets);
1435 	err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1436 	if (err)
1437 		return err;
1438 	link_start(dev);
1439 	t3_port_intr_enable(adapter, pi->port_id);
1440 	netif_tx_start_all_queues(dev);
1441 	if (!other_ports)
1442 		schedule_chk_task(adapter);
1443 
1444 	cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1445 	return 0;
1446 }
1447 
1448 static int __cxgb_close(struct net_device *dev, int on_wq)
1449 {
1450 	struct port_info *pi = netdev_priv(dev);
1451 	struct adapter *adapter = pi->adapter;
1452 
1453 
1454 	if (!adapter->open_device_map)
1455 		return 0;
1456 
1457 	/* Stop link fault interrupts */
1458 	t3_xgm_intr_disable(adapter, pi->port_id);
1459 	t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1460 
1461 	t3_port_intr_disable(adapter, pi->port_id);
1462 	netif_tx_stop_all_queues(dev);
1463 	pi->phy.ops->power_down(&pi->phy, 1);
1464 	netif_carrier_off(dev);
1465 	t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1466 
1467 	spin_lock_irq(&adapter->work_lock);	/* sync with update task */
1468 	clear_bit(pi->port_id, &adapter->open_device_map);
1469 	spin_unlock_irq(&adapter->work_lock);
1470 
1471 	if (!(adapter->open_device_map & PORT_MASK))
1472 		cancel_delayed_work_sync(&adapter->adap_check_task);
1473 
1474 	if (!adapter->open_device_map)
1475 		cxgb_down(adapter, on_wq);
1476 
1477 	cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1478 	return 0;
1479 }
1480 
1481 static int cxgb_close(struct net_device *dev)
1482 {
1483 	return __cxgb_close(dev, 0);
1484 }
1485 
1486 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1487 {
1488 	struct port_info *pi = netdev_priv(dev);
1489 	struct adapter *adapter = pi->adapter;
1490 	struct net_device_stats *ns = &dev->stats;
1491 	const struct mac_stats *pstats;
1492 
1493 	spin_lock(&adapter->stats_lock);
1494 	pstats = t3_mac_update_stats(&pi->mac);
1495 	spin_unlock(&adapter->stats_lock);
1496 
1497 	ns->tx_bytes = pstats->tx_octets;
1498 	ns->tx_packets = pstats->tx_frames;
1499 	ns->rx_bytes = pstats->rx_octets;
1500 	ns->rx_packets = pstats->rx_frames;
1501 	ns->multicast = pstats->rx_mcast_frames;
1502 
1503 	ns->tx_errors = pstats->tx_underrun;
1504 	ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1505 	    pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1506 	    pstats->rx_fifo_ovfl;
1507 
1508 	/* detailed rx_errors */
1509 	ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1510 	ns->rx_over_errors = 0;
1511 	ns->rx_crc_errors = pstats->rx_fcs_errs;
1512 	ns->rx_frame_errors = pstats->rx_symbol_errs;
1513 	ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1514 	ns->rx_missed_errors = pstats->rx_cong_drops;
1515 
1516 	/* detailed tx_errors */
1517 	ns->tx_aborted_errors = 0;
1518 	ns->tx_carrier_errors = 0;
1519 	ns->tx_fifo_errors = pstats->tx_underrun;
1520 	ns->tx_heartbeat_errors = 0;
1521 	ns->tx_window_errors = 0;
1522 	return ns;
1523 }
1524 
1525 static u32 get_msglevel(struct net_device *dev)
1526 {
1527 	struct port_info *pi = netdev_priv(dev);
1528 	struct adapter *adapter = pi->adapter;
1529 
1530 	return adapter->msg_enable;
1531 }
1532 
1533 static void set_msglevel(struct net_device *dev, u32 val)
1534 {
1535 	struct port_info *pi = netdev_priv(dev);
1536 	struct adapter *adapter = pi->adapter;
1537 
1538 	adapter->msg_enable = val;
1539 }
1540 
1541 static const char stats_strings[][ETH_GSTRING_LEN] = {
1542 	"TxOctetsOK         ",
1543 	"TxFramesOK         ",
1544 	"TxMulticastFramesOK",
1545 	"TxBroadcastFramesOK",
1546 	"TxPauseFrames      ",
1547 	"TxUnderrun         ",
1548 	"TxExtUnderrun      ",
1549 
1550 	"TxFrames64         ",
1551 	"TxFrames65To127    ",
1552 	"TxFrames128To255   ",
1553 	"TxFrames256To511   ",
1554 	"TxFrames512To1023  ",
1555 	"TxFrames1024To1518 ",
1556 	"TxFrames1519ToMax  ",
1557 
1558 	"RxOctetsOK         ",
1559 	"RxFramesOK         ",
1560 	"RxMulticastFramesOK",
1561 	"RxBroadcastFramesOK",
1562 	"RxPauseFrames      ",
1563 	"RxFCSErrors        ",
1564 	"RxSymbolErrors     ",
1565 	"RxShortErrors      ",
1566 	"RxJabberErrors     ",
1567 	"RxLengthErrors     ",
1568 	"RxFIFOoverflow     ",
1569 
1570 	"RxFrames64         ",
1571 	"RxFrames65To127    ",
1572 	"RxFrames128To255   ",
1573 	"RxFrames256To511   ",
1574 	"RxFrames512To1023  ",
1575 	"RxFrames1024To1518 ",
1576 	"RxFrames1519ToMax  ",
1577 
1578 	"PhyFIFOErrors      ",
1579 	"TSO                ",
1580 	"VLANextractions    ",
1581 	"VLANinsertions     ",
1582 	"TxCsumOffload      ",
1583 	"RxCsumGood         ",
1584 	"LroAggregated      ",
1585 	"LroFlushed         ",
1586 	"LroNoDesc          ",
1587 	"RxDrops            ",
1588 
1589 	"CheckTXEnToggled   ",
1590 	"CheckResets        ",
1591 
1592 	"LinkFaults         ",
1593 };
1594 
1595 static int get_sset_count(struct net_device *dev, int sset)
1596 {
1597 	switch (sset) {
1598 	case ETH_SS_STATS:
1599 		return ARRAY_SIZE(stats_strings);
1600 	default:
1601 		return -EOPNOTSUPP;
1602 	}
1603 }
1604 
1605 #define T3_REGMAP_SIZE (3 * 1024)
1606 
1607 static int get_regs_len(struct net_device *dev)
1608 {
1609 	return T3_REGMAP_SIZE;
1610 }
1611 
1612 static int get_eeprom_len(struct net_device *dev)
1613 {
1614 	return EEPROMSIZE;
1615 }
1616 
1617 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1618 {
1619 	struct port_info *pi = netdev_priv(dev);
1620 	struct adapter *adapter = pi->adapter;
1621 	u32 fw_vers = 0;
1622 	u32 tp_vers = 0;
1623 
1624 	spin_lock(&adapter->stats_lock);
1625 	t3_get_fw_version(adapter, &fw_vers);
1626 	t3_get_tp_version(adapter, &tp_vers);
1627 	spin_unlock(&adapter->stats_lock);
1628 
1629 	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
1630 	strscpy(info->bus_info, pci_name(adapter->pdev),
1631 		sizeof(info->bus_info));
1632 	if (fw_vers)
1633 		snprintf(info->fw_version, sizeof(info->fw_version),
1634 			 "%s %u.%u.%u TP %u.%u.%u",
1635 			 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1636 			 G_FW_VERSION_MAJOR(fw_vers),
1637 			 G_FW_VERSION_MINOR(fw_vers),
1638 			 G_FW_VERSION_MICRO(fw_vers),
1639 			 G_TP_VERSION_MAJOR(tp_vers),
1640 			 G_TP_VERSION_MINOR(tp_vers),
1641 			 G_TP_VERSION_MICRO(tp_vers));
1642 }
1643 
1644 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1645 {
1646 	if (stringset == ETH_SS_STATS)
1647 		memcpy(data, stats_strings, sizeof(stats_strings));
1648 }
1649 
1650 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1651 					    struct port_info *p, int idx)
1652 {
1653 	int i;
1654 	unsigned long tot = 0;
1655 
1656 	for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1657 		tot += adapter->sge.qs[i].port_stats[idx];
1658 	return tot;
1659 }
1660 
1661 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1662 		      u64 *data)
1663 {
1664 	struct port_info *pi = netdev_priv(dev);
1665 	struct adapter *adapter = pi->adapter;
1666 	const struct mac_stats *s;
1667 
1668 	spin_lock(&adapter->stats_lock);
1669 	s = t3_mac_update_stats(&pi->mac);
1670 	spin_unlock(&adapter->stats_lock);
1671 
1672 	*data++ = s->tx_octets;
1673 	*data++ = s->tx_frames;
1674 	*data++ = s->tx_mcast_frames;
1675 	*data++ = s->tx_bcast_frames;
1676 	*data++ = s->tx_pause;
1677 	*data++ = s->tx_underrun;
1678 	*data++ = s->tx_fifo_urun;
1679 
1680 	*data++ = s->tx_frames_64;
1681 	*data++ = s->tx_frames_65_127;
1682 	*data++ = s->tx_frames_128_255;
1683 	*data++ = s->tx_frames_256_511;
1684 	*data++ = s->tx_frames_512_1023;
1685 	*data++ = s->tx_frames_1024_1518;
1686 	*data++ = s->tx_frames_1519_max;
1687 
1688 	*data++ = s->rx_octets;
1689 	*data++ = s->rx_frames;
1690 	*data++ = s->rx_mcast_frames;
1691 	*data++ = s->rx_bcast_frames;
1692 	*data++ = s->rx_pause;
1693 	*data++ = s->rx_fcs_errs;
1694 	*data++ = s->rx_symbol_errs;
1695 	*data++ = s->rx_short;
1696 	*data++ = s->rx_jabber;
1697 	*data++ = s->rx_too_long;
1698 	*data++ = s->rx_fifo_ovfl;
1699 
1700 	*data++ = s->rx_frames_64;
1701 	*data++ = s->rx_frames_65_127;
1702 	*data++ = s->rx_frames_128_255;
1703 	*data++ = s->rx_frames_256_511;
1704 	*data++ = s->rx_frames_512_1023;
1705 	*data++ = s->rx_frames_1024_1518;
1706 	*data++ = s->rx_frames_1519_max;
1707 
1708 	*data++ = pi->phy.fifo_errors;
1709 
1710 	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1711 	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1712 	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1713 	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1714 	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1715 	*data++ = 0;
1716 	*data++ = 0;
1717 	*data++ = 0;
1718 	*data++ = s->rx_cong_drops;
1719 
1720 	*data++ = s->num_toggled;
1721 	*data++ = s->num_resets;
1722 
1723 	*data++ = s->link_faults;
1724 }
1725 
1726 static inline void reg_block_dump(struct adapter *ap, void *buf,
1727 				  unsigned int start, unsigned int end)
1728 {
1729 	u32 *p = buf + start;
1730 
1731 	for (; start <= end; start += sizeof(u32))
1732 		*p++ = t3_read_reg(ap, start);
1733 }
1734 
1735 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1736 		     void *buf)
1737 {
1738 	struct port_info *pi = netdev_priv(dev);
1739 	struct adapter *ap = pi->adapter;
1740 
1741 	/*
1742 	 * Version scheme:
1743 	 * bits 0..9: chip version
1744 	 * bits 10..15: chip revision
1745 	 * bit 31: set for PCIe cards
1746 	 */
1747 	regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1748 
1749 	/*
1750 	 * We skip the MAC statistics registers because they are clear-on-read.
1751 	 * Also reading multi-register stats would need to synchronize with the
1752 	 * periodic mac stats accumulation.  Hard to justify the complexity.
1753 	 */
1754 	memset(buf, 0, T3_REGMAP_SIZE);
1755 	reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1756 	reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1757 	reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1758 	reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1759 	reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1760 	reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1761 		       XGM_REG(A_XGM_SERDES_STAT3, 1));
1762 	reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1763 		       XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1764 }
1765 
1766 static int restart_autoneg(struct net_device *dev)
1767 {
1768 	struct port_info *p = netdev_priv(dev);
1769 
1770 	if (!netif_running(dev))
1771 		return -EAGAIN;
1772 	if (p->link_config.autoneg != AUTONEG_ENABLE)
1773 		return -EINVAL;
1774 	p->phy.ops->autoneg_restart(&p->phy);
1775 	return 0;
1776 }
1777 
1778 static int set_phys_id(struct net_device *dev,
1779 		       enum ethtool_phys_id_state state)
1780 {
1781 	struct port_info *pi = netdev_priv(dev);
1782 	struct adapter *adapter = pi->adapter;
1783 
1784 	switch (state) {
1785 	case ETHTOOL_ID_ACTIVE:
1786 		return 1;	/* cycle on/off once per second */
1787 
1788 	case ETHTOOL_ID_OFF:
1789 		t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
1790 		break;
1791 
1792 	case ETHTOOL_ID_ON:
1793 	case ETHTOOL_ID_INACTIVE:
1794 		t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1795 			 F_GPIO0_OUT_VAL);
1796 	}
1797 
1798 	return 0;
1799 }
1800 
1801 static int get_link_ksettings(struct net_device *dev,
1802 			      struct ethtool_link_ksettings *cmd)
1803 {
1804 	struct port_info *p = netdev_priv(dev);
1805 	u32 supported;
1806 
1807 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1808 						p->link_config.supported);
1809 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1810 						p->link_config.advertising);
1811 
1812 	if (netif_carrier_ok(dev)) {
1813 		cmd->base.speed = p->link_config.speed;
1814 		cmd->base.duplex = p->link_config.duplex;
1815 	} else {
1816 		cmd->base.speed = SPEED_UNKNOWN;
1817 		cmd->base.duplex = DUPLEX_UNKNOWN;
1818 	}
1819 
1820 	ethtool_convert_link_mode_to_legacy_u32(&supported,
1821 						cmd->link_modes.supported);
1822 
1823 	cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1824 	cmd->base.phy_address = p->phy.mdio.prtad;
1825 	cmd->base.autoneg = p->link_config.autoneg;
1826 	return 0;
1827 }
1828 
1829 static int speed_duplex_to_caps(int speed, int duplex)
1830 {
1831 	int cap = 0;
1832 
1833 	switch (speed) {
1834 	case SPEED_10:
1835 		if (duplex == DUPLEX_FULL)
1836 			cap = SUPPORTED_10baseT_Full;
1837 		else
1838 			cap = SUPPORTED_10baseT_Half;
1839 		break;
1840 	case SPEED_100:
1841 		if (duplex == DUPLEX_FULL)
1842 			cap = SUPPORTED_100baseT_Full;
1843 		else
1844 			cap = SUPPORTED_100baseT_Half;
1845 		break;
1846 	case SPEED_1000:
1847 		if (duplex == DUPLEX_FULL)
1848 			cap = SUPPORTED_1000baseT_Full;
1849 		else
1850 			cap = SUPPORTED_1000baseT_Half;
1851 		break;
1852 	case SPEED_10000:
1853 		if (duplex == DUPLEX_FULL)
1854 			cap = SUPPORTED_10000baseT_Full;
1855 	}
1856 	return cap;
1857 }
1858 
1859 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1860 		      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1861 		      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1862 		      ADVERTISED_10000baseT_Full)
1863 
1864 static int set_link_ksettings(struct net_device *dev,
1865 			      const struct ethtool_link_ksettings *cmd)
1866 {
1867 	struct port_info *p = netdev_priv(dev);
1868 	struct link_config *lc = &p->link_config;
1869 	u32 advertising;
1870 
1871 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
1872 						cmd->link_modes.advertising);
1873 
1874 	if (!(lc->supported & SUPPORTED_Autoneg)) {
1875 		/*
1876 		 * PHY offers a single speed/duplex.  See if that's what's
1877 		 * being requested.
1878 		 */
1879 		if (cmd->base.autoneg == AUTONEG_DISABLE) {
1880 			u32 speed = cmd->base.speed;
1881 			int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1882 			if (lc->supported & cap)
1883 				return 0;
1884 		}
1885 		return -EINVAL;
1886 	}
1887 
1888 	if (cmd->base.autoneg == AUTONEG_DISABLE) {
1889 		u32 speed = cmd->base.speed;
1890 		int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1891 
1892 		if (!(lc->supported & cap) || (speed == SPEED_1000))
1893 			return -EINVAL;
1894 		lc->requested_speed = speed;
1895 		lc->requested_duplex = cmd->base.duplex;
1896 		lc->advertising = 0;
1897 	} else {
1898 		advertising &= ADVERTISED_MASK;
1899 		advertising &= lc->supported;
1900 		if (!advertising)
1901 			return -EINVAL;
1902 		lc->requested_speed = SPEED_INVALID;
1903 		lc->requested_duplex = DUPLEX_INVALID;
1904 		lc->advertising = advertising | ADVERTISED_Autoneg;
1905 	}
1906 	lc->autoneg = cmd->base.autoneg;
1907 	if (netif_running(dev))
1908 		t3_link_start(&p->phy, &p->mac, lc);
1909 	return 0;
1910 }
1911 
1912 static void get_pauseparam(struct net_device *dev,
1913 			   struct ethtool_pauseparam *epause)
1914 {
1915 	struct port_info *p = netdev_priv(dev);
1916 
1917 	epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1918 	epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1919 	epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1920 }
1921 
1922 static int set_pauseparam(struct net_device *dev,
1923 			  struct ethtool_pauseparam *epause)
1924 {
1925 	struct port_info *p = netdev_priv(dev);
1926 	struct link_config *lc = &p->link_config;
1927 
1928 	if (epause->autoneg == AUTONEG_DISABLE)
1929 		lc->requested_fc = 0;
1930 	else if (lc->supported & SUPPORTED_Autoneg)
1931 		lc->requested_fc = PAUSE_AUTONEG;
1932 	else
1933 		return -EINVAL;
1934 
1935 	if (epause->rx_pause)
1936 		lc->requested_fc |= PAUSE_RX;
1937 	if (epause->tx_pause)
1938 		lc->requested_fc |= PAUSE_TX;
1939 	if (lc->autoneg == AUTONEG_ENABLE) {
1940 		if (netif_running(dev))
1941 			t3_link_start(&p->phy, &p->mac, lc);
1942 	} else {
1943 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1944 		if (netif_running(dev))
1945 			t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1946 	}
1947 	return 0;
1948 }
1949 
1950 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
1951 			  struct kernel_ethtool_ringparam *kernel_e,
1952 			  struct netlink_ext_ack *extack)
1953 {
1954 	struct port_info *pi = netdev_priv(dev);
1955 	struct adapter *adapter = pi->adapter;
1956 	const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1957 
1958 	e->rx_max_pending = MAX_RX_BUFFERS;
1959 	e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1960 	e->tx_max_pending = MAX_TXQ_ENTRIES;
1961 
1962 	e->rx_pending = q->fl_size;
1963 	e->rx_mini_pending = q->rspq_size;
1964 	e->rx_jumbo_pending = q->jumbo_size;
1965 	e->tx_pending = q->txq_size[0];
1966 }
1967 
1968 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
1969 			 struct kernel_ethtool_ringparam *kernel_e,
1970 			 struct netlink_ext_ack *extack)
1971 {
1972 	struct port_info *pi = netdev_priv(dev);
1973 	struct adapter *adapter = pi->adapter;
1974 	struct qset_params *q;
1975 	int i;
1976 
1977 	if (e->rx_pending > MAX_RX_BUFFERS ||
1978 	    e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1979 	    e->tx_pending > MAX_TXQ_ENTRIES ||
1980 	    e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1981 	    e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1982 	    e->rx_pending < MIN_FL_ENTRIES ||
1983 	    e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1984 	    e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1985 		return -EINVAL;
1986 
1987 	if (adapter->flags & FULL_INIT_DONE)
1988 		return -EBUSY;
1989 
1990 	q = &adapter->params.sge.qset[pi->first_qset];
1991 	for (i = 0; i < pi->nqsets; ++i, ++q) {
1992 		q->rspq_size = e->rx_mini_pending;
1993 		q->fl_size = e->rx_pending;
1994 		q->jumbo_size = e->rx_jumbo_pending;
1995 		q->txq_size[0] = e->tx_pending;
1996 		q->txq_size[1] = e->tx_pending;
1997 		q->txq_size[2] = e->tx_pending;
1998 	}
1999 	return 0;
2000 }
2001 
2002 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c,
2003 			struct kernel_ethtool_coalesce *kernel_coal,
2004 			struct netlink_ext_ack *extack)
2005 {
2006 	struct port_info *pi = netdev_priv(dev);
2007 	struct adapter *adapter = pi->adapter;
2008 	struct qset_params *qsp;
2009 	struct sge_qset *qs;
2010 	int i;
2011 
2012 	if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
2013 		return -EINVAL;
2014 
2015 	for (i = 0; i < pi->nqsets; i++) {
2016 		qsp = &adapter->params.sge.qset[i];
2017 		qs = &adapter->sge.qs[i];
2018 		qsp->coalesce_usecs = c->rx_coalesce_usecs;
2019 		t3_update_qset_coalesce(qs, qsp);
2020 	}
2021 
2022 	return 0;
2023 }
2024 
2025 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c,
2026 			struct kernel_ethtool_coalesce *kernel_coal,
2027 			struct netlink_ext_ack *extack)
2028 {
2029 	struct port_info *pi = netdev_priv(dev);
2030 	struct adapter *adapter = pi->adapter;
2031 	struct qset_params *q = adapter->params.sge.qset;
2032 
2033 	c->rx_coalesce_usecs = q->coalesce_usecs;
2034 	return 0;
2035 }
2036 
2037 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2038 		      u8 * data)
2039 {
2040 	struct port_info *pi = netdev_priv(dev);
2041 	struct adapter *adapter = pi->adapter;
2042 	int cnt;
2043 
2044 	e->magic = EEPROM_MAGIC;
2045 	cnt = pci_read_vpd(adapter->pdev, e->offset, e->len, data);
2046 	if (cnt < 0)
2047 		return cnt;
2048 
2049 	e->len = cnt;
2050 
2051 	return 0;
2052 }
2053 
2054 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2055 		      u8 * data)
2056 {
2057 	struct port_info *pi = netdev_priv(dev);
2058 	struct adapter *adapter = pi->adapter;
2059 	u32 aligned_offset, aligned_len;
2060 	u8 *buf;
2061 	int err;
2062 
2063 	if (eeprom->magic != EEPROM_MAGIC)
2064 		return -EINVAL;
2065 
2066 	aligned_offset = eeprom->offset & ~3;
2067 	aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2068 
2069 	if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2070 		buf = kmalloc(aligned_len, GFP_KERNEL);
2071 		if (!buf)
2072 			return -ENOMEM;
2073 		err = pci_read_vpd(adapter->pdev, aligned_offset, aligned_len,
2074 				   buf);
2075 		if (err < 0)
2076 			goto out;
2077 		memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2078 	} else
2079 		buf = data;
2080 
2081 	err = t3_seeprom_wp(adapter, 0);
2082 	if (err)
2083 		goto out;
2084 
2085 	err = pci_write_vpd(adapter->pdev, aligned_offset, aligned_len, buf);
2086 	if (err >= 0)
2087 		err = t3_seeprom_wp(adapter, 1);
2088 out:
2089 	if (buf != data)
2090 		kfree(buf);
2091 	return err < 0 ? err : 0;
2092 }
2093 
2094 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2095 {
2096 	wol->supported = 0;
2097 	wol->wolopts = 0;
2098 	memset(&wol->sopass, 0, sizeof(wol->sopass));
2099 }
2100 
2101 static const struct ethtool_ops cxgb_ethtool_ops = {
2102 	.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
2103 	.get_drvinfo = get_drvinfo,
2104 	.get_msglevel = get_msglevel,
2105 	.set_msglevel = set_msglevel,
2106 	.get_ringparam = get_sge_param,
2107 	.set_ringparam = set_sge_param,
2108 	.get_coalesce = get_coalesce,
2109 	.set_coalesce = set_coalesce,
2110 	.get_eeprom_len = get_eeprom_len,
2111 	.get_eeprom = get_eeprom,
2112 	.set_eeprom = set_eeprom,
2113 	.get_pauseparam = get_pauseparam,
2114 	.set_pauseparam = set_pauseparam,
2115 	.get_link = ethtool_op_get_link,
2116 	.get_strings = get_strings,
2117 	.set_phys_id = set_phys_id,
2118 	.nway_reset = restart_autoneg,
2119 	.get_sset_count = get_sset_count,
2120 	.get_ethtool_stats = get_stats,
2121 	.get_regs_len = get_regs_len,
2122 	.get_regs = get_regs,
2123 	.get_wol = get_wol,
2124 	.get_link_ksettings = get_link_ksettings,
2125 	.set_link_ksettings = set_link_ksettings,
2126 };
2127 
2128 static int in_range(int val, int lo, int hi)
2129 {
2130 	return val < 0 || (val <= hi && val >= lo);
2131 }
2132 
2133 static int cxgb_siocdevprivate(struct net_device *dev,
2134 			       struct ifreq *ifreq,
2135 			       void __user *useraddr,
2136 			       int cmd)
2137 {
2138 	struct port_info *pi = netdev_priv(dev);
2139 	struct adapter *adapter = pi->adapter;
2140 	int ret;
2141 
2142 	if (cmd != SIOCCHIOCTL)
2143 		return -EOPNOTSUPP;
2144 
2145 	if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2146 		return -EFAULT;
2147 
2148 	switch (cmd) {
2149 	case CHELSIO_SET_QSET_PARAMS:{
2150 		int i;
2151 		struct qset_params *q;
2152 		struct ch_qset_params t;
2153 		int q1 = pi->first_qset;
2154 		int nqsets = pi->nqsets;
2155 
2156 		if (!capable(CAP_NET_ADMIN))
2157 			return -EPERM;
2158 		if (copy_from_user(&t, useraddr, sizeof(t)))
2159 			return -EFAULT;
2160 		if (t.cmd != CHELSIO_SET_QSET_PARAMS)
2161 			return -EINVAL;
2162 		if (t.qset_idx >= SGE_QSETS)
2163 			return -EINVAL;
2164 		if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2165 		    !in_range(t.cong_thres, 0, 255) ||
2166 		    !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2167 			      MAX_TXQ_ENTRIES) ||
2168 		    !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2169 			      MAX_TXQ_ENTRIES) ||
2170 		    !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2171 			      MAX_CTRL_TXQ_ENTRIES) ||
2172 		    !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2173 			      MAX_RX_BUFFERS) ||
2174 		    !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2175 			      MAX_RX_JUMBO_BUFFERS) ||
2176 		    !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2177 			      MAX_RSPQ_ENTRIES))
2178 			return -EINVAL;
2179 
2180 		if ((adapter->flags & FULL_INIT_DONE) &&
2181 			(t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2182 			t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2183 			t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2184 			t.polling >= 0 || t.cong_thres >= 0))
2185 			return -EBUSY;
2186 
2187 		/* Allow setting of any available qset when offload enabled */
2188 		if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2189 			q1 = 0;
2190 			for_each_port(adapter, i) {
2191 				pi = adap2pinfo(adapter, i);
2192 				nqsets += pi->first_qset + pi->nqsets;
2193 			}
2194 		}
2195 
2196 		if (t.qset_idx < q1)
2197 			return -EINVAL;
2198 		if (t.qset_idx > q1 + nqsets - 1)
2199 			return -EINVAL;
2200 
2201 		q = &adapter->params.sge.qset[t.qset_idx];
2202 
2203 		if (t.rspq_size >= 0)
2204 			q->rspq_size = t.rspq_size;
2205 		if (t.fl_size[0] >= 0)
2206 			q->fl_size = t.fl_size[0];
2207 		if (t.fl_size[1] >= 0)
2208 			q->jumbo_size = t.fl_size[1];
2209 		if (t.txq_size[0] >= 0)
2210 			q->txq_size[0] = t.txq_size[0];
2211 		if (t.txq_size[1] >= 0)
2212 			q->txq_size[1] = t.txq_size[1];
2213 		if (t.txq_size[2] >= 0)
2214 			q->txq_size[2] = t.txq_size[2];
2215 		if (t.cong_thres >= 0)
2216 			q->cong_thres = t.cong_thres;
2217 		if (t.intr_lat >= 0) {
2218 			struct sge_qset *qs =
2219 				&adapter->sge.qs[t.qset_idx];
2220 
2221 			q->coalesce_usecs = t.intr_lat;
2222 			t3_update_qset_coalesce(qs, q);
2223 		}
2224 		if (t.polling >= 0) {
2225 			if (adapter->flags & USING_MSIX)
2226 				q->polling = t.polling;
2227 			else {
2228 				/* No polling with INTx for T3A */
2229 				if (adapter->params.rev == 0 &&
2230 					!(adapter->flags & USING_MSI))
2231 					t.polling = 0;
2232 
2233 				for (i = 0; i < SGE_QSETS; i++) {
2234 					q = &adapter->params.sge.
2235 						qset[i];
2236 					q->polling = t.polling;
2237 				}
2238 			}
2239 		}
2240 
2241 		if (t.lro >= 0) {
2242 			if (t.lro)
2243 				dev->wanted_features |= NETIF_F_GRO;
2244 			else
2245 				dev->wanted_features &= ~NETIF_F_GRO;
2246 			netdev_update_features(dev);
2247 		}
2248 
2249 		break;
2250 	}
2251 	case CHELSIO_GET_QSET_PARAMS:{
2252 		struct qset_params *q;
2253 		struct ch_qset_params t;
2254 		int q1 = pi->first_qset;
2255 		int nqsets = pi->nqsets;
2256 		int i;
2257 
2258 		if (copy_from_user(&t, useraddr, sizeof(t)))
2259 			return -EFAULT;
2260 
2261 		if (t.cmd != CHELSIO_GET_QSET_PARAMS)
2262 			return -EINVAL;
2263 
2264 		/* Display qsets for all ports when offload enabled */
2265 		if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2266 			q1 = 0;
2267 			for_each_port(adapter, i) {
2268 				pi = adap2pinfo(adapter, i);
2269 				nqsets = pi->first_qset + pi->nqsets;
2270 			}
2271 		}
2272 
2273 		if (t.qset_idx >= nqsets)
2274 			return -EINVAL;
2275 		t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
2276 
2277 		q = &adapter->params.sge.qset[q1 + t.qset_idx];
2278 		t.rspq_size = q->rspq_size;
2279 		t.txq_size[0] = q->txq_size[0];
2280 		t.txq_size[1] = q->txq_size[1];
2281 		t.txq_size[2] = q->txq_size[2];
2282 		t.fl_size[0] = q->fl_size;
2283 		t.fl_size[1] = q->jumbo_size;
2284 		t.polling = q->polling;
2285 		t.lro = !!(dev->features & NETIF_F_GRO);
2286 		t.intr_lat = q->coalesce_usecs;
2287 		t.cong_thres = q->cong_thres;
2288 		t.qnum = q1;
2289 
2290 		if (adapter->flags & USING_MSIX)
2291 			t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2292 		else
2293 			t.vector = adapter->pdev->irq;
2294 
2295 		if (copy_to_user(useraddr, &t, sizeof(t)))
2296 			return -EFAULT;
2297 		break;
2298 	}
2299 	case CHELSIO_SET_QSET_NUM:{
2300 		struct ch_reg edata;
2301 		unsigned int i, first_qset = 0, other_qsets = 0;
2302 
2303 		if (!capable(CAP_NET_ADMIN))
2304 			return -EPERM;
2305 		if (adapter->flags & FULL_INIT_DONE)
2306 			return -EBUSY;
2307 		if (copy_from_user(&edata, useraddr, sizeof(edata)))
2308 			return -EFAULT;
2309 		if (edata.cmd != CHELSIO_SET_QSET_NUM)
2310 			return -EINVAL;
2311 		if (edata.val < 1 ||
2312 			(edata.val > 1 && !(adapter->flags & USING_MSIX)))
2313 			return -EINVAL;
2314 
2315 		for_each_port(adapter, i)
2316 			if (adapter->port[i] && adapter->port[i] != dev)
2317 				other_qsets += adap2pinfo(adapter, i)->nqsets;
2318 
2319 		if (edata.val + other_qsets > SGE_QSETS)
2320 			return -EINVAL;
2321 
2322 		pi->nqsets = edata.val;
2323 
2324 		for_each_port(adapter, i)
2325 			if (adapter->port[i]) {
2326 				pi = adap2pinfo(adapter, i);
2327 				pi->first_qset = first_qset;
2328 				first_qset += pi->nqsets;
2329 			}
2330 		break;
2331 	}
2332 	case CHELSIO_GET_QSET_NUM:{
2333 		struct ch_reg edata;
2334 
2335 		memset(&edata, 0, sizeof(struct ch_reg));
2336 
2337 		edata.cmd = CHELSIO_GET_QSET_NUM;
2338 		edata.val = pi->nqsets;
2339 		if (copy_to_user(useraddr, &edata, sizeof(edata)))
2340 			return -EFAULT;
2341 		break;
2342 	}
2343 	case CHELSIO_LOAD_FW:{
2344 		u8 *fw_data;
2345 		struct ch_mem_range t;
2346 
2347 		if (!capable(CAP_SYS_RAWIO))
2348 			return -EPERM;
2349 		if (copy_from_user(&t, useraddr, sizeof(t)))
2350 			return -EFAULT;
2351 		if (t.cmd != CHELSIO_LOAD_FW)
2352 			return -EINVAL;
2353 		/* Check t.len sanity ? */
2354 		fw_data = memdup_user(useraddr + sizeof(t), t.len);
2355 		if (IS_ERR(fw_data))
2356 			return PTR_ERR(fw_data);
2357 
2358 		ret = t3_load_fw(adapter, fw_data, t.len);
2359 		kfree(fw_data);
2360 		if (ret)
2361 			return ret;
2362 		break;
2363 	}
2364 	case CHELSIO_SETMTUTAB:{
2365 		struct ch_mtus m;
2366 		int i;
2367 
2368 		if (!is_offload(adapter))
2369 			return -EOPNOTSUPP;
2370 		if (!capable(CAP_NET_ADMIN))
2371 			return -EPERM;
2372 		if (offload_running(adapter))
2373 			return -EBUSY;
2374 		if (copy_from_user(&m, useraddr, sizeof(m)))
2375 			return -EFAULT;
2376 		if (m.cmd != CHELSIO_SETMTUTAB)
2377 			return -EINVAL;
2378 		if (m.nmtus != NMTUS)
2379 			return -EINVAL;
2380 		if (m.mtus[0] < 81)	/* accommodate SACK */
2381 			return -EINVAL;
2382 
2383 		/* MTUs must be in ascending order */
2384 		for (i = 1; i < NMTUS; ++i)
2385 			if (m.mtus[i] < m.mtus[i - 1])
2386 				return -EINVAL;
2387 
2388 		memcpy(adapter->params.mtus, m.mtus,
2389 			sizeof(adapter->params.mtus));
2390 		break;
2391 	}
2392 	case CHELSIO_GET_PM:{
2393 		struct tp_params *p = &adapter->params.tp;
2394 		struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2395 
2396 		if (!is_offload(adapter))
2397 			return -EOPNOTSUPP;
2398 		m.tx_pg_sz = p->tx_pg_size;
2399 		m.tx_num_pg = p->tx_num_pgs;
2400 		m.rx_pg_sz = p->rx_pg_size;
2401 		m.rx_num_pg = p->rx_num_pgs;
2402 		m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2403 		if (copy_to_user(useraddr, &m, sizeof(m)))
2404 			return -EFAULT;
2405 		break;
2406 	}
2407 	case CHELSIO_SET_PM:{
2408 		struct ch_pm m;
2409 		struct tp_params *p = &adapter->params.tp;
2410 
2411 		if (!is_offload(adapter))
2412 			return -EOPNOTSUPP;
2413 		if (!capable(CAP_NET_ADMIN))
2414 			return -EPERM;
2415 		if (adapter->flags & FULL_INIT_DONE)
2416 			return -EBUSY;
2417 		if (copy_from_user(&m, useraddr, sizeof(m)))
2418 			return -EFAULT;
2419 		if (m.cmd != CHELSIO_SET_PM)
2420 			return -EINVAL;
2421 		if (!is_power_of_2(m.rx_pg_sz) ||
2422 			!is_power_of_2(m.tx_pg_sz))
2423 			return -EINVAL;	/* not power of 2 */
2424 		if (!(m.rx_pg_sz & 0x14000))
2425 			return -EINVAL;	/* not 16KB or 64KB */
2426 		if (!(m.tx_pg_sz & 0x1554000))
2427 			return -EINVAL;
2428 		if (m.tx_num_pg == -1)
2429 			m.tx_num_pg = p->tx_num_pgs;
2430 		if (m.rx_num_pg == -1)
2431 			m.rx_num_pg = p->rx_num_pgs;
2432 		if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2433 			return -EINVAL;
2434 		if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2435 			m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2436 			return -EINVAL;
2437 		p->rx_pg_size = m.rx_pg_sz;
2438 		p->tx_pg_size = m.tx_pg_sz;
2439 		p->rx_num_pgs = m.rx_num_pg;
2440 		p->tx_num_pgs = m.tx_num_pg;
2441 		break;
2442 	}
2443 	case CHELSIO_GET_MEM:{
2444 		struct ch_mem_range t;
2445 		struct mc7 *mem;
2446 		u64 buf[32];
2447 
2448 		if (!is_offload(adapter))
2449 			return -EOPNOTSUPP;
2450 		if (!capable(CAP_NET_ADMIN))
2451 			return -EPERM;
2452 		if (!(adapter->flags & FULL_INIT_DONE))
2453 			return -EIO;	/* need the memory controllers */
2454 		if (copy_from_user(&t, useraddr, sizeof(t)))
2455 			return -EFAULT;
2456 		if (t.cmd != CHELSIO_GET_MEM)
2457 			return -EINVAL;
2458 		if ((t.addr & 7) || (t.len & 7))
2459 			return -EINVAL;
2460 		if (t.mem_id == MEM_CM)
2461 			mem = &adapter->cm;
2462 		else if (t.mem_id == MEM_PMRX)
2463 			mem = &adapter->pmrx;
2464 		else if (t.mem_id == MEM_PMTX)
2465 			mem = &adapter->pmtx;
2466 		else
2467 			return -EINVAL;
2468 
2469 		/*
2470 		 * Version scheme:
2471 		 * bits 0..9: chip version
2472 		 * bits 10..15: chip revision
2473 		 */
2474 		t.version = 3 | (adapter->params.rev << 10);
2475 		if (copy_to_user(useraddr, &t, sizeof(t)))
2476 			return -EFAULT;
2477 
2478 		/*
2479 		 * Read 256 bytes at a time as len can be large and we don't
2480 		 * want to use huge intermediate buffers.
2481 		 */
2482 		useraddr += sizeof(t);	/* advance to start of buffer */
2483 		while (t.len) {
2484 			unsigned int chunk =
2485 				min_t(unsigned int, t.len, sizeof(buf));
2486 
2487 			ret =
2488 				t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2489 						buf);
2490 			if (ret)
2491 				return ret;
2492 			if (copy_to_user(useraddr, buf, chunk))
2493 				return -EFAULT;
2494 			useraddr += chunk;
2495 			t.addr += chunk;
2496 			t.len -= chunk;
2497 		}
2498 		break;
2499 	}
2500 	case CHELSIO_SET_TRACE_FILTER:{
2501 		struct ch_trace t;
2502 		const struct trace_params *tp;
2503 
2504 		if (!capable(CAP_NET_ADMIN))
2505 			return -EPERM;
2506 		if (!offload_running(adapter))
2507 			return -EAGAIN;
2508 		if (copy_from_user(&t, useraddr, sizeof(t)))
2509 			return -EFAULT;
2510 		if (t.cmd != CHELSIO_SET_TRACE_FILTER)
2511 			return -EINVAL;
2512 
2513 		tp = (const struct trace_params *)&t.sip;
2514 		if (t.config_tx)
2515 			t3_config_trace_filter(adapter, tp, 0,
2516 						t.invert_match,
2517 						t.trace_tx);
2518 		if (t.config_rx)
2519 			t3_config_trace_filter(adapter, tp, 1,
2520 						t.invert_match,
2521 						t.trace_rx);
2522 		break;
2523 	}
2524 	default:
2525 		return -EOPNOTSUPP;
2526 	}
2527 	return 0;
2528 }
2529 
2530 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2531 {
2532 	struct mii_ioctl_data *data = if_mii(req);
2533 	struct port_info *pi = netdev_priv(dev);
2534 	struct adapter *adapter = pi->adapter;
2535 
2536 	switch (cmd) {
2537 	case SIOCGMIIREG:
2538 	case SIOCSMIIREG:
2539 		/* Convert phy_id from older PRTAD/DEVAD format */
2540 		if (is_10G(adapter) &&
2541 		    !mdio_phy_id_is_c45(data->phy_id) &&
2542 		    (data->phy_id & 0x1f00) &&
2543 		    !(data->phy_id & 0xe0e0))
2544 			data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2545 						       data->phy_id & 0x1f);
2546 		fallthrough;
2547 	case SIOCGMIIPHY:
2548 		return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2549 	default:
2550 		return -EOPNOTSUPP;
2551 	}
2552 }
2553 
2554 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2555 {
2556 	struct port_info *pi = netdev_priv(dev);
2557 	struct adapter *adapter = pi->adapter;
2558 	int ret;
2559 
2560 	if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2561 		return ret;
2562 	dev->mtu = new_mtu;
2563 	init_port_mtus(adapter);
2564 	if (adapter->params.rev == 0 && offload_running(adapter))
2565 		t3_load_mtus(adapter, adapter->params.mtus,
2566 			     adapter->params.a_wnd, adapter->params.b_wnd,
2567 			     adapter->port[0]->mtu);
2568 	return 0;
2569 }
2570 
2571 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2572 {
2573 	struct port_info *pi = netdev_priv(dev);
2574 	struct adapter *adapter = pi->adapter;
2575 	struct sockaddr *addr = p;
2576 
2577 	if (!is_valid_ether_addr(addr->sa_data))
2578 		return -EADDRNOTAVAIL;
2579 
2580 	eth_hw_addr_set(dev, addr->sa_data);
2581 	t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2582 	if (offload_running(adapter))
2583 		write_smt_entry(adapter, pi->port_id);
2584 	return 0;
2585 }
2586 
2587 static netdev_features_t cxgb_fix_features(struct net_device *dev,
2588 	netdev_features_t features)
2589 {
2590 	/*
2591 	 * Since there is no support for separate rx/tx vlan accel
2592 	 * enable/disable make sure tx flag is always in same state as rx.
2593 	 */
2594 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2595 		features |= NETIF_F_HW_VLAN_CTAG_TX;
2596 	else
2597 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2598 
2599 	return features;
2600 }
2601 
2602 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2603 {
2604 	netdev_features_t changed = dev->features ^ features;
2605 
2606 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2607 		cxgb_vlan_mode(dev, features);
2608 
2609 	return 0;
2610 }
2611 
2612 #ifdef CONFIG_NET_POLL_CONTROLLER
2613 static void cxgb_netpoll(struct net_device *dev)
2614 {
2615 	struct port_info *pi = netdev_priv(dev);
2616 	struct adapter *adapter = pi->adapter;
2617 	int qidx;
2618 
2619 	for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2620 		struct sge_qset *qs = &adapter->sge.qs[qidx];
2621 		void *source;
2622 
2623 		if (adapter->flags & USING_MSIX)
2624 			source = qs;
2625 		else
2626 			source = adapter;
2627 
2628 		t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2629 	}
2630 }
2631 #endif
2632 
2633 /*
2634  * Periodic accumulation of MAC statistics.
2635  */
2636 static void mac_stats_update(struct adapter *adapter)
2637 {
2638 	int i;
2639 
2640 	for_each_port(adapter, i) {
2641 		struct net_device *dev = adapter->port[i];
2642 		struct port_info *p = netdev_priv(dev);
2643 
2644 		if (netif_running(dev)) {
2645 			spin_lock(&adapter->stats_lock);
2646 			t3_mac_update_stats(&p->mac);
2647 			spin_unlock(&adapter->stats_lock);
2648 		}
2649 	}
2650 }
2651 
2652 static void check_link_status(struct adapter *adapter)
2653 {
2654 	int i;
2655 
2656 	for_each_port(adapter, i) {
2657 		struct net_device *dev = adapter->port[i];
2658 		struct port_info *p = netdev_priv(dev);
2659 		int link_fault;
2660 
2661 		spin_lock_irq(&adapter->work_lock);
2662 		link_fault = p->link_fault;
2663 		spin_unlock_irq(&adapter->work_lock);
2664 
2665 		if (link_fault) {
2666 			t3_link_fault(adapter, i);
2667 			continue;
2668 		}
2669 
2670 		if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2671 			t3_xgm_intr_disable(adapter, i);
2672 			t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2673 
2674 			t3_link_changed(adapter, i);
2675 			t3_xgm_intr_enable(adapter, i);
2676 		}
2677 	}
2678 }
2679 
2680 static void check_t3b2_mac(struct adapter *adapter)
2681 {
2682 	int i;
2683 
2684 	if (!rtnl_trylock())	/* synchronize with ifdown */
2685 		return;
2686 
2687 	for_each_port(adapter, i) {
2688 		struct net_device *dev = adapter->port[i];
2689 		struct port_info *p = netdev_priv(dev);
2690 		int status;
2691 
2692 		if (!netif_running(dev))
2693 			continue;
2694 
2695 		status = 0;
2696 		if (netif_running(dev) && netif_carrier_ok(dev))
2697 			status = t3b2_mac_watchdog_task(&p->mac);
2698 		if (status == 1)
2699 			p->mac.stats.num_toggled++;
2700 		else if (status == 2) {
2701 			struct cmac *mac = &p->mac;
2702 
2703 			t3_mac_set_mtu(mac, dev->mtu);
2704 			t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2705 			cxgb_set_rxmode(dev);
2706 			t3_link_start(&p->phy, mac, &p->link_config);
2707 			t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2708 			t3_port_intr_enable(adapter, p->port_id);
2709 			p->mac.stats.num_resets++;
2710 		}
2711 	}
2712 	rtnl_unlock();
2713 }
2714 
2715 
2716 static void t3_adap_check_task(struct work_struct *work)
2717 {
2718 	struct adapter *adapter = container_of(work, struct adapter,
2719 					       adap_check_task.work);
2720 	const struct adapter_params *p = &adapter->params;
2721 	int port;
2722 	unsigned int v, status, reset;
2723 
2724 	adapter->check_task_cnt++;
2725 
2726 	check_link_status(adapter);
2727 
2728 	/* Accumulate MAC stats if needed */
2729 	if (!p->linkpoll_period ||
2730 	    (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2731 	    p->stats_update_period) {
2732 		mac_stats_update(adapter);
2733 		adapter->check_task_cnt = 0;
2734 	}
2735 
2736 	if (p->rev == T3_REV_B2)
2737 		check_t3b2_mac(adapter);
2738 
2739 	/*
2740 	 * Scan the XGMAC's to check for various conditions which we want to
2741 	 * monitor in a periodic polling manner rather than via an interrupt
2742 	 * condition.  This is used for conditions which would otherwise flood
2743 	 * the system with interrupts and we only really need to know that the
2744 	 * conditions are "happening" ...  For each condition we count the
2745 	 * detection of the condition and reset it for the next polling loop.
2746 	 */
2747 	for_each_port(adapter, port) {
2748 		struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
2749 		u32 cause;
2750 
2751 		cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2752 		reset = 0;
2753 		if (cause & F_RXFIFO_OVERFLOW) {
2754 			mac->stats.rx_fifo_ovfl++;
2755 			reset |= F_RXFIFO_OVERFLOW;
2756 		}
2757 
2758 		t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2759 	}
2760 
2761 	/*
2762 	 * We do the same as above for FL_EMPTY interrupts.
2763 	 */
2764 	status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2765 	reset = 0;
2766 
2767 	if (status & F_FLEMPTY) {
2768 		struct sge_qset *qs = &adapter->sge.qs[0];
2769 		int i = 0;
2770 
2771 		reset |= F_FLEMPTY;
2772 
2773 		v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2774 		    0xffff;
2775 
2776 		while (v) {
2777 			qs->fl[i].empty += (v & 1);
2778 			if (i)
2779 				qs++;
2780 			i ^= 1;
2781 			v >>= 1;
2782 		}
2783 	}
2784 
2785 	t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2786 
2787 	/* Schedule the next check update if any port is active. */
2788 	spin_lock_irq(&adapter->work_lock);
2789 	if (adapter->open_device_map & PORT_MASK)
2790 		schedule_chk_task(adapter);
2791 	spin_unlock_irq(&adapter->work_lock);
2792 }
2793 
2794 static void db_full_task(struct work_struct *work)
2795 {
2796 	struct adapter *adapter = container_of(work, struct adapter,
2797 					       db_full_task);
2798 
2799 	cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2800 }
2801 
2802 static void db_empty_task(struct work_struct *work)
2803 {
2804 	struct adapter *adapter = container_of(work, struct adapter,
2805 					       db_empty_task);
2806 
2807 	cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2808 }
2809 
2810 static void db_drop_task(struct work_struct *work)
2811 {
2812 	struct adapter *adapter = container_of(work, struct adapter,
2813 					       db_drop_task);
2814 	unsigned long delay = 1000;
2815 	unsigned short r;
2816 
2817 	cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2818 
2819 	/*
2820 	 * Sleep a while before ringing the driver qset dbs.
2821 	 * The delay is between 1000-2023 usecs.
2822 	 */
2823 	get_random_bytes(&r, 2);
2824 	delay += r & 1023;
2825 	set_current_state(TASK_UNINTERRUPTIBLE);
2826 	schedule_timeout(usecs_to_jiffies(delay));
2827 	ring_dbs(adapter);
2828 }
2829 
2830 /*
2831  * Processes external (PHY) interrupts in process context.
2832  */
2833 static void ext_intr_task(struct work_struct *work)
2834 {
2835 	struct adapter *adapter = container_of(work, struct adapter,
2836 					       ext_intr_handler_task);
2837 	int i;
2838 
2839 	/* Disable link fault interrupts */
2840 	for_each_port(adapter, i) {
2841 		struct net_device *dev = adapter->port[i];
2842 		struct port_info *p = netdev_priv(dev);
2843 
2844 		t3_xgm_intr_disable(adapter, i);
2845 		t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2846 	}
2847 
2848 	/* Re-enable link fault interrupts */
2849 	t3_phy_intr_handler(adapter);
2850 
2851 	for_each_port(adapter, i)
2852 		t3_xgm_intr_enable(adapter, i);
2853 
2854 	/* Now reenable external interrupts */
2855 	spin_lock_irq(&adapter->work_lock);
2856 	if (adapter->slow_intr_mask) {
2857 		adapter->slow_intr_mask |= F_T3DBG;
2858 		t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2859 		t3_write_reg(adapter, A_PL_INT_ENABLE0,
2860 			     adapter->slow_intr_mask);
2861 	}
2862 	spin_unlock_irq(&adapter->work_lock);
2863 }
2864 
2865 /*
2866  * Interrupt-context handler for external (PHY) interrupts.
2867  */
2868 void t3_os_ext_intr_handler(struct adapter *adapter)
2869 {
2870 	/*
2871 	 * Schedule a task to handle external interrupts as they may be slow
2872 	 * and we use a mutex to protect MDIO registers.  We disable PHY
2873 	 * interrupts in the meantime and let the task reenable them when
2874 	 * it's done.
2875 	 */
2876 	spin_lock(&adapter->work_lock);
2877 	if (adapter->slow_intr_mask) {
2878 		adapter->slow_intr_mask &= ~F_T3DBG;
2879 		t3_write_reg(adapter, A_PL_INT_ENABLE0,
2880 			     adapter->slow_intr_mask);
2881 		queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2882 	}
2883 	spin_unlock(&adapter->work_lock);
2884 }
2885 
2886 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2887 {
2888 	struct net_device *netdev = adapter->port[port_id];
2889 	struct port_info *pi = netdev_priv(netdev);
2890 
2891 	spin_lock(&adapter->work_lock);
2892 	pi->link_fault = 1;
2893 	spin_unlock(&adapter->work_lock);
2894 }
2895 
2896 static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2897 {
2898 	int i, ret = 0;
2899 
2900 	if (is_offload(adapter) &&
2901 	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2902 		cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2903 		offload_close(&adapter->tdev);
2904 	}
2905 
2906 	/* Stop all ports */
2907 	for_each_port(adapter, i) {
2908 		struct net_device *netdev = adapter->port[i];
2909 
2910 		if (netif_running(netdev))
2911 			__cxgb_close(netdev, on_wq);
2912 	}
2913 
2914 	/* Stop SGE timers */
2915 	t3_stop_sge_timers(adapter);
2916 
2917 	adapter->flags &= ~FULL_INIT_DONE;
2918 
2919 	if (reset)
2920 		ret = t3_reset_adapter(adapter);
2921 
2922 	pci_disable_device(adapter->pdev);
2923 
2924 	return ret;
2925 }
2926 
2927 static int t3_reenable_adapter(struct adapter *adapter)
2928 {
2929 	if (pci_enable_device(adapter->pdev)) {
2930 		dev_err(&adapter->pdev->dev,
2931 			"Cannot re-enable PCI device after reset.\n");
2932 		goto err;
2933 	}
2934 	pci_set_master(adapter->pdev);
2935 	pci_restore_state(adapter->pdev);
2936 	pci_save_state(adapter->pdev);
2937 
2938 	/* Free sge resources */
2939 	t3_free_sge_resources(adapter);
2940 
2941 	if (t3_replay_prep_adapter(adapter))
2942 		goto err;
2943 
2944 	return 0;
2945 err:
2946 	return -1;
2947 }
2948 
2949 static void t3_resume_ports(struct adapter *adapter)
2950 {
2951 	int i;
2952 
2953 	/* Restart the ports */
2954 	for_each_port(adapter, i) {
2955 		struct net_device *netdev = adapter->port[i];
2956 
2957 		if (netif_running(netdev)) {
2958 			if (cxgb_open(netdev)) {
2959 				dev_err(&adapter->pdev->dev,
2960 					"can't bring device back up"
2961 					" after reset\n");
2962 				continue;
2963 			}
2964 		}
2965 	}
2966 
2967 	if (is_offload(adapter) && !ofld_disable)
2968 		cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2969 }
2970 
2971 /*
2972  * processes a fatal error.
2973  * Bring the ports down, reset the chip, bring the ports back up.
2974  */
2975 static void fatal_error_task(struct work_struct *work)
2976 {
2977 	struct adapter *adapter = container_of(work, struct adapter,
2978 					       fatal_error_handler_task);
2979 	int err = 0;
2980 
2981 	rtnl_lock();
2982 	err = t3_adapter_error(adapter, 1, 1);
2983 	if (!err)
2984 		err = t3_reenable_adapter(adapter);
2985 	if (!err)
2986 		t3_resume_ports(adapter);
2987 
2988 	CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2989 	rtnl_unlock();
2990 }
2991 
2992 void t3_fatal_err(struct adapter *adapter)
2993 {
2994 	unsigned int fw_status[4];
2995 
2996 	if (adapter->flags & FULL_INIT_DONE) {
2997 		t3_sge_stop_dma(adapter);
2998 		t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2999 		t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
3000 		t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
3001 		t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
3002 
3003 		spin_lock(&adapter->work_lock);
3004 		t3_intr_disable(adapter);
3005 		queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
3006 		spin_unlock(&adapter->work_lock);
3007 	}
3008 	CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
3009 	if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
3010 		CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
3011 			 fw_status[0], fw_status[1],
3012 			 fw_status[2], fw_status[3]);
3013 }
3014 
3015 /**
3016  * t3_io_error_detected - called when PCI error is detected
3017  * @pdev: Pointer to PCI device
3018  * @state: The current pci connection state
3019  *
3020  * This function is called after a PCI bus error affecting
3021  * this device has been detected.
3022  */
3023 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3024 					     pci_channel_state_t state)
3025 {
3026 	struct adapter *adapter = pci_get_drvdata(pdev);
3027 
3028 	if (state == pci_channel_io_perm_failure)
3029 		return PCI_ERS_RESULT_DISCONNECT;
3030 
3031 	t3_adapter_error(adapter, 0, 0);
3032 
3033 	/* Request a slot reset. */
3034 	return PCI_ERS_RESULT_NEED_RESET;
3035 }
3036 
3037 /**
3038  * t3_io_slot_reset - called after the pci bus has been reset.
3039  * @pdev: Pointer to PCI device
3040  *
3041  * Restart the card from scratch, as if from a cold-boot.
3042  */
3043 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3044 {
3045 	struct adapter *adapter = pci_get_drvdata(pdev);
3046 
3047 	if (!t3_reenable_adapter(adapter))
3048 		return PCI_ERS_RESULT_RECOVERED;
3049 
3050 	return PCI_ERS_RESULT_DISCONNECT;
3051 }
3052 
3053 /**
3054  * t3_io_resume - called when traffic can start flowing again.
3055  * @pdev: Pointer to PCI device
3056  *
3057  * This callback is called when the error recovery driver tells us that
3058  * its OK to resume normal operation.
3059  */
3060 static void t3_io_resume(struct pci_dev *pdev)
3061 {
3062 	struct adapter *adapter = pci_get_drvdata(pdev);
3063 
3064 	CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3065 		 t3_read_reg(adapter, A_PCIE_PEX_ERR));
3066 
3067 	rtnl_lock();
3068 	t3_resume_ports(adapter);
3069 	rtnl_unlock();
3070 }
3071 
3072 static const struct pci_error_handlers t3_err_handler = {
3073 	.error_detected = t3_io_error_detected,
3074 	.slot_reset = t3_io_slot_reset,
3075 	.resume = t3_io_resume,
3076 };
3077 
3078 /*
3079  * Set the number of qsets based on the number of CPUs and the number of ports,
3080  * not to exceed the number of available qsets, assuming there are enough qsets
3081  * per port in HW.
3082  */
3083 static void set_nqsets(struct adapter *adap)
3084 {
3085 	int i, j = 0;
3086 	int num_cpus = netif_get_num_default_rss_queues();
3087 	int hwports = adap->params.nports;
3088 	int nqsets = adap->msix_nvectors - 1;
3089 
3090 	if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3091 		if (hwports == 2 &&
3092 		    (hwports * nqsets > SGE_QSETS ||
3093 		     num_cpus >= nqsets / hwports))
3094 			nqsets /= hwports;
3095 		if (nqsets > num_cpus)
3096 			nqsets = num_cpus;
3097 		if (nqsets < 1 || hwports == 4)
3098 			nqsets = 1;
3099 	} else {
3100 		nqsets = 1;
3101 	}
3102 
3103 	for_each_port(adap, i) {
3104 		struct port_info *pi = adap2pinfo(adap, i);
3105 
3106 		pi->first_qset = j;
3107 		pi->nqsets = nqsets;
3108 		j = pi->first_qset + nqsets;
3109 
3110 		dev_info(&adap->pdev->dev,
3111 			 "Port %d using %d queue sets.\n", i, nqsets);
3112 	}
3113 }
3114 
3115 static int cxgb_enable_msix(struct adapter *adap)
3116 {
3117 	struct msix_entry entries[SGE_QSETS + 1];
3118 	int vectors;
3119 	int i;
3120 
3121 	vectors = ARRAY_SIZE(entries);
3122 	for (i = 0; i < vectors; ++i)
3123 		entries[i].entry = i;
3124 
3125 	vectors = pci_enable_msix_range(adap->pdev, entries,
3126 					adap->params.nports + 1, vectors);
3127 	if (vectors < 0)
3128 		return vectors;
3129 
3130 	for (i = 0; i < vectors; ++i)
3131 		adap->msix_info[i].vec = entries[i].vector;
3132 	adap->msix_nvectors = vectors;
3133 
3134 	return 0;
3135 }
3136 
3137 static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
3138 {
3139 	static const char *pci_variant[] = {
3140 		"PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3141 	};
3142 
3143 	int i;
3144 	char buf[80];
3145 
3146 	if (is_pcie(adap))
3147 		snprintf(buf, sizeof(buf), "%s x%d",
3148 			 pci_variant[adap->params.pci.variant],
3149 			 adap->params.pci.width);
3150 	else
3151 		snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3152 			 pci_variant[adap->params.pci.variant],
3153 			 adap->params.pci.speed, adap->params.pci.width);
3154 
3155 	for_each_port(adap, i) {
3156 		struct net_device *dev = adap->port[i];
3157 		const struct port_info *pi = netdev_priv(dev);
3158 
3159 		if (!test_bit(i, &adap->registered_device_map))
3160 			continue;
3161 		netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
3162 			    ai->desc, pi->phy.desc,
3163 			    is_offload(adap) ? "R" : "", adap->params.rev, buf,
3164 			    (adap->flags & USING_MSIX) ? " MSI-X" :
3165 			    (adap->flags & USING_MSI) ? " MSI" : "");
3166 		if (adap->name == dev->name && adap->params.vpd.mclk)
3167 			pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3168 			       adap->name, t3_mc7_size(&adap->cm) >> 20,
3169 			       t3_mc7_size(&adap->pmtx) >> 20,
3170 			       t3_mc7_size(&adap->pmrx) >> 20,
3171 			       adap->params.vpd.sn);
3172 	}
3173 }
3174 
3175 static const struct net_device_ops cxgb_netdev_ops = {
3176 	.ndo_open		= cxgb_open,
3177 	.ndo_stop		= cxgb_close,
3178 	.ndo_start_xmit		= t3_eth_xmit,
3179 	.ndo_get_stats		= cxgb_get_stats,
3180 	.ndo_validate_addr	= eth_validate_addr,
3181 	.ndo_set_rx_mode	= cxgb_set_rxmode,
3182 	.ndo_eth_ioctl		= cxgb_ioctl,
3183 	.ndo_siocdevprivate	= cxgb_siocdevprivate,
3184 	.ndo_change_mtu		= cxgb_change_mtu,
3185 	.ndo_set_mac_address	= cxgb_set_mac_addr,
3186 	.ndo_fix_features	= cxgb_fix_features,
3187 	.ndo_set_features	= cxgb_set_features,
3188 #ifdef CONFIG_NET_POLL_CONTROLLER
3189 	.ndo_poll_controller	= cxgb_netpoll,
3190 #endif
3191 };
3192 
3193 static void cxgb3_init_iscsi_mac(struct net_device *dev)
3194 {
3195 	struct port_info *pi = netdev_priv(dev);
3196 
3197 	memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3198 	pi->iscsic.mac_addr[3] |= 0x80;
3199 }
3200 
3201 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3202 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3203 			NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3204 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3205 {
3206 	int i, err;
3207 	resource_size_t mmio_start, mmio_len;
3208 	const struct adapter_info *ai;
3209 	struct adapter *adapter = NULL;
3210 	struct port_info *pi;
3211 
3212 	if (!cxgb3_wq) {
3213 		cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3214 		if (!cxgb3_wq) {
3215 			pr_err("cannot initialize work queue\n");
3216 			return -ENOMEM;
3217 		}
3218 	}
3219 
3220 	err = pci_enable_device(pdev);
3221 	if (err) {
3222 		dev_err(&pdev->dev, "cannot enable PCI device\n");
3223 		goto out;
3224 	}
3225 
3226 	err = pci_request_regions(pdev, DRV_NAME);
3227 	if (err) {
3228 		/* Just info, some other driver may have claimed the device. */
3229 		dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3230 		goto out_disable_device;
3231 	}
3232 
3233 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3234 	if (err) {
3235 		dev_err(&pdev->dev, "no usable DMA configuration\n");
3236 		goto out_release_regions;
3237 	}
3238 
3239 	pci_set_master(pdev);
3240 	pci_save_state(pdev);
3241 
3242 	mmio_start = pci_resource_start(pdev, 0);
3243 	mmio_len = pci_resource_len(pdev, 0);
3244 	ai = t3_get_adapter_info(ent->driver_data);
3245 
3246 	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3247 	if (!adapter) {
3248 		err = -ENOMEM;
3249 		goto out_release_regions;
3250 	}
3251 
3252 	adapter->nofail_skb =
3253 		alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3254 	if (!adapter->nofail_skb) {
3255 		dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3256 		err = -ENOMEM;
3257 		goto out_free_adapter;
3258 	}
3259 
3260 	adapter->regs = ioremap(mmio_start, mmio_len);
3261 	if (!adapter->regs) {
3262 		dev_err(&pdev->dev, "cannot map device registers\n");
3263 		err = -ENOMEM;
3264 		goto out_free_adapter_nofail;
3265 	}
3266 
3267 	adapter->pdev = pdev;
3268 	adapter->name = pci_name(pdev);
3269 	adapter->msg_enable = dflt_msg_enable;
3270 	adapter->mmio_len = mmio_len;
3271 
3272 	mutex_init(&adapter->mdio_lock);
3273 	spin_lock_init(&adapter->work_lock);
3274 	spin_lock_init(&adapter->stats_lock);
3275 
3276 	INIT_LIST_HEAD(&adapter->adapter_list);
3277 	INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3278 	INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3279 
3280 	INIT_WORK(&adapter->db_full_task, db_full_task);
3281 	INIT_WORK(&adapter->db_empty_task, db_empty_task);
3282 	INIT_WORK(&adapter->db_drop_task, db_drop_task);
3283 
3284 	INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3285 
3286 	for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3287 		struct net_device *netdev;
3288 
3289 		netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3290 		if (!netdev) {
3291 			err = -ENOMEM;
3292 			goto out_free_dev;
3293 		}
3294 
3295 		SET_NETDEV_DEV(netdev, &pdev->dev);
3296 
3297 		adapter->port[i] = netdev;
3298 		pi = netdev_priv(netdev);
3299 		pi->adapter = adapter;
3300 		pi->port_id = i;
3301 		netif_carrier_off(netdev);
3302 		netdev->irq = pdev->irq;
3303 		netdev->mem_start = mmio_start;
3304 		netdev->mem_end = mmio_start + mmio_len - 1;
3305 		netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3306 			NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
3307 		netdev->features |= netdev->hw_features |
3308 				    NETIF_F_HW_VLAN_CTAG_TX;
3309 		netdev->vlan_features |= netdev->features & VLAN_FEAT;
3310 
3311 		netdev->features |= NETIF_F_HIGHDMA;
3312 
3313 		netdev->netdev_ops = &cxgb_netdev_ops;
3314 		netdev->ethtool_ops = &cxgb_ethtool_ops;
3315 		netdev->min_mtu = 81;
3316 		netdev->max_mtu = ETH_MAX_MTU;
3317 		netdev->dev_port = pi->port_id;
3318 	}
3319 
3320 	pci_set_drvdata(pdev, adapter);
3321 	if (t3_prep_adapter(adapter, ai, 1) < 0) {
3322 		err = -ENODEV;
3323 		goto out_free_dev;
3324 	}
3325 
3326 	/*
3327 	 * The card is now ready to go.  If any errors occur during device
3328 	 * registration we do not fail the whole card but rather proceed only
3329 	 * with the ports we manage to register successfully.  However we must
3330 	 * register at least one net device.
3331 	 */
3332 	for_each_port(adapter, i) {
3333 		err = register_netdev(adapter->port[i]);
3334 		if (err)
3335 			dev_warn(&pdev->dev,
3336 				 "cannot register net device %s, skipping\n",
3337 				 adapter->port[i]->name);
3338 		else {
3339 			/*
3340 			 * Change the name we use for messages to the name of
3341 			 * the first successfully registered interface.
3342 			 */
3343 			if (!adapter->registered_device_map)
3344 				adapter->name = adapter->port[i]->name;
3345 
3346 			__set_bit(i, &adapter->registered_device_map);
3347 		}
3348 	}
3349 	if (!adapter->registered_device_map) {
3350 		dev_err(&pdev->dev, "could not register any net devices\n");
3351 		err = -ENODEV;
3352 		goto out_free_dev;
3353 	}
3354 
3355 	for_each_port(adapter, i)
3356 		cxgb3_init_iscsi_mac(adapter->port[i]);
3357 
3358 	/* Driver's ready. Reflect it on LEDs */
3359 	t3_led_ready(adapter);
3360 
3361 	if (is_offload(adapter)) {
3362 		__set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3363 		cxgb3_adapter_ofld(adapter);
3364 	}
3365 
3366 	/* See what interrupts we'll be using */
3367 	if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3368 		adapter->flags |= USING_MSIX;
3369 	else if (msi > 0 && pci_enable_msi(pdev) == 0)
3370 		adapter->flags |= USING_MSI;
3371 
3372 	set_nqsets(adapter);
3373 
3374 	err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3375 				 &cxgb3_attr_group);
3376 	if (err) {
3377 		dev_err(&pdev->dev, "cannot create sysfs group\n");
3378 		goto out_close_led;
3379 	}
3380 
3381 	print_port_info(adapter, ai);
3382 	return 0;
3383 
3384 out_close_led:
3385 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
3386 
3387 out_free_dev:
3388 	iounmap(adapter->regs);
3389 	for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3390 		if (adapter->port[i])
3391 			free_netdev(adapter->port[i]);
3392 
3393 out_free_adapter_nofail:
3394 	kfree_skb(adapter->nofail_skb);
3395 
3396 out_free_adapter:
3397 	kfree(adapter);
3398 
3399 out_release_regions:
3400 	pci_release_regions(pdev);
3401 out_disable_device:
3402 	pci_disable_device(pdev);
3403 out:
3404 	return err;
3405 }
3406 
3407 static void remove_one(struct pci_dev *pdev)
3408 {
3409 	struct adapter *adapter = pci_get_drvdata(pdev);
3410 
3411 	if (adapter) {
3412 		int i;
3413 
3414 		t3_sge_stop(adapter);
3415 		sysfs_remove_group(&adapter->port[0]->dev.kobj,
3416 				   &cxgb3_attr_group);
3417 
3418 		if (is_offload(adapter)) {
3419 			cxgb3_adapter_unofld(adapter);
3420 			if (test_bit(OFFLOAD_DEVMAP_BIT,
3421 				     &adapter->open_device_map))
3422 				offload_close(&adapter->tdev);
3423 		}
3424 
3425 		for_each_port(adapter, i)
3426 		    if (test_bit(i, &adapter->registered_device_map))
3427 			unregister_netdev(adapter->port[i]);
3428 
3429 		t3_stop_sge_timers(adapter);
3430 		t3_free_sge_resources(adapter);
3431 		cxgb_disable_msi(adapter);
3432 
3433 		for_each_port(adapter, i)
3434 			if (adapter->port[i])
3435 				free_netdev(adapter->port[i]);
3436 
3437 		iounmap(adapter->regs);
3438 		kfree_skb(adapter->nofail_skb);
3439 		kfree(adapter);
3440 		pci_release_regions(pdev);
3441 		pci_disable_device(pdev);
3442 	}
3443 }
3444 
3445 static struct pci_driver driver = {
3446 	.name = DRV_NAME,
3447 	.id_table = cxgb3_pci_tbl,
3448 	.probe = init_one,
3449 	.remove = remove_one,
3450 	.err_handler = &t3_err_handler,
3451 };
3452 
3453 static int __init cxgb3_init_module(void)
3454 {
3455 	int ret;
3456 
3457 	cxgb3_offload_init();
3458 
3459 	ret = pci_register_driver(&driver);
3460 	return ret;
3461 }
3462 
3463 static void __exit cxgb3_cleanup_module(void)
3464 {
3465 	pci_unregister_driver(&driver);
3466 	if (cxgb3_wq)
3467 		destroy_workqueue(cxgb3_wq);
3468 }
3469 
3470 module_init(cxgb3_init_module);
3471 module_exit(cxgb3_cleanup_module);
3472