1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36 
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
44 #include <linux/if.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <asm/uaccess.h>
65 
66 #include "cxgb4.h"
67 #include "t4_regs.h"
68 #include "t4_msg.h"
69 #include "t4fw_api.h"
70 #include "l2t.h"
71 
72 #include <../drivers/net/bonding/bonding.h>
73 
74 #ifdef DRV_VERSION
75 #undef DRV_VERSION
76 #endif
77 #define DRV_VERSION "2.0.0-ko"
78 #define DRV_DESC "Chelsio T4/T5 Network Driver"
79 
80 /*
81  * Max interrupt hold-off timer value in us.  Queues fall back to this value
82  * under extreme memory pressure so it's largish to give the system time to
83  * recover.
84  */
85 #define MAX_SGE_TIMERVAL 200U
86 
87 enum {
88 	/*
89 	 * Physical Function provisioning constants.
90 	 */
91 	PFRES_NVI = 4,			/* # of Virtual Interfaces */
92 	PFRES_NETHCTRL = 128,		/* # of EQs used for ETH or CTRL Qs */
93 	PFRES_NIQFLINT = 128,		/* # of ingress Qs/w Free List(s)/intr
94 					 */
95 	PFRES_NEQ = 256,		/* # of egress queues */
96 	PFRES_NIQ = 0,			/* # of ingress queues */
97 	PFRES_TC = 0,			/* PCI-E traffic class */
98 	PFRES_NEXACTF = 128,		/* # of exact MPS filters */
99 
100 	PFRES_R_CAPS = FW_CMD_CAP_PF,
101 	PFRES_WX_CAPS = FW_CMD_CAP_PF,
102 
103 #ifdef CONFIG_PCI_IOV
104 	/*
105 	 * Virtual Function provisioning constants.  We need two extra Ingress
106 	 * Queues with Interrupt capability to serve as the VF's Firmware
107 	 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
108 	 * neither will have Free Lists associated with them).  For each
109 	 * Ethernet/Control Egress Queue and for each Free List, we need an
110 	 * Egress Context.
111 	 */
112 	VFRES_NPORTS = 1,		/* # of "ports" per VF */
113 	VFRES_NQSETS = 2,		/* # of "Queue Sets" per VF */
114 
115 	VFRES_NVI = VFRES_NPORTS,	/* # of Virtual Interfaces */
116 	VFRES_NETHCTRL = VFRES_NQSETS,	/* # of EQs used for ETH or CTRL Qs */
117 	VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
118 	VFRES_NEQ = VFRES_NQSETS*2,	/* # of egress queues */
119 	VFRES_NIQ = 0,			/* # of non-fl/int ingress queues */
120 	VFRES_TC = 0,			/* PCI-E traffic class */
121 	VFRES_NEXACTF = 16,		/* # of exact MPS filters */
122 
123 	VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
124 	VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
125 #endif
126 };
127 
128 /*
129  * Provide a Port Access Rights Mask for the specified PF/VF.  This is very
130  * static and likely not to be useful in the long run.  We really need to
131  * implement some form of persistent configuration which the firmware
132  * controls.
133  */
134 static unsigned int pfvfres_pmask(struct adapter *adapter,
135 				  unsigned int pf, unsigned int vf)
136 {
137 	unsigned int portn, portvec;
138 
139 	/*
140 	 * Give PF's access to all of the ports.
141 	 */
142 	if (vf == 0)
143 		return FW_PFVF_CMD_PMASK_MASK;
144 
145 	/*
146 	 * For VFs, we'll assign them access to the ports based purely on the
147 	 * PF.  We assign active ports in order, wrapping around if there are
148 	 * fewer active ports than PFs: e.g. active port[pf % nports].
149 	 * Unfortunately the adapter's port_info structs haven't been
150 	 * initialized yet so we have to compute this.
151 	 */
152 	if (adapter->params.nports == 0)
153 		return 0;
154 
155 	portn = pf % adapter->params.nports;
156 	portvec = adapter->params.portvec;
157 	for (;;) {
158 		/*
159 		 * Isolate the lowest set bit in the port vector.  If we're at
160 		 * the port number that we want, return that as the pmask.
161 		 * otherwise mask that bit out of the port vector and
162 		 * decrement our port number ...
163 		 */
164 		unsigned int pmask = portvec ^ (portvec & (portvec-1));
165 		if (portn == 0)
166 			return pmask;
167 		portn--;
168 		portvec &= ~pmask;
169 	}
170 	/*NOTREACHED*/
171 }
172 
173 enum {
174 	MAX_TXQ_ENTRIES      = 16384,
175 	MAX_CTRL_TXQ_ENTRIES = 1024,
176 	MAX_RSPQ_ENTRIES     = 16384,
177 	MAX_RX_BUFFERS       = 16384,
178 	MIN_TXQ_ENTRIES      = 32,
179 	MIN_CTRL_TXQ_ENTRIES = 32,
180 	MIN_RSPQ_ENTRIES     = 128,
181 	MIN_FL_ENTRIES       = 16
182 };
183 
184 /* Host shadow copy of ingress filter entry.  This is in host native format
185  * and doesn't match the ordering or bit order, etc. of the hardware of the
186  * firmware command.  The use of bit-field structure elements is purely to
187  * remind ourselves of the field size limitations and save memory in the case
188  * where the filter table is large.
189  */
190 struct filter_entry {
191 	/* Administrative fields for filter.
192 	 */
193 	u32 valid:1;            /* filter allocated and valid */
194 	u32 locked:1;           /* filter is administratively locked */
195 
196 	u32 pending:1;          /* filter action is pending firmware reply */
197 	u32 smtidx:8;           /* Source MAC Table index for smac */
198 	struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
199 
200 	/* The filter itself.  Most of this is a straight copy of information
201 	 * provided by the extended ioctl().  Some fields are translated to
202 	 * internal forms -- for instance the Ingress Queue ID passed in from
203 	 * the ioctl() is translated into the Absolute Ingress Queue ID.
204 	 */
205 	struct ch_filter_specification fs;
206 };
207 
208 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
209 			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
210 			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
211 
212 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
213 
214 static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
215 	CH_DEVICE(0xa000, 0),  /* PE10K */
216 	CH_DEVICE(0x4001, -1),
217 	CH_DEVICE(0x4002, -1),
218 	CH_DEVICE(0x4003, -1),
219 	CH_DEVICE(0x4004, -1),
220 	CH_DEVICE(0x4005, -1),
221 	CH_DEVICE(0x4006, -1),
222 	CH_DEVICE(0x4007, -1),
223 	CH_DEVICE(0x4008, -1),
224 	CH_DEVICE(0x4009, -1),
225 	CH_DEVICE(0x400a, -1),
226 	CH_DEVICE(0x4401, 4),
227 	CH_DEVICE(0x4402, 4),
228 	CH_DEVICE(0x4403, 4),
229 	CH_DEVICE(0x4404, 4),
230 	CH_DEVICE(0x4405, 4),
231 	CH_DEVICE(0x4406, 4),
232 	CH_DEVICE(0x4407, 4),
233 	CH_DEVICE(0x4408, 4),
234 	CH_DEVICE(0x4409, 4),
235 	CH_DEVICE(0x440a, 4),
236 	CH_DEVICE(0x440d, 4),
237 	CH_DEVICE(0x440e, 4),
238 	CH_DEVICE(0x5001, 4),
239 	CH_DEVICE(0x5002, 4),
240 	CH_DEVICE(0x5003, 4),
241 	CH_DEVICE(0x5004, 4),
242 	CH_DEVICE(0x5005, 4),
243 	CH_DEVICE(0x5006, 4),
244 	CH_DEVICE(0x5007, 4),
245 	CH_DEVICE(0x5008, 4),
246 	CH_DEVICE(0x5009, 4),
247 	CH_DEVICE(0x500A, 4),
248 	CH_DEVICE(0x500B, 4),
249 	CH_DEVICE(0x500C, 4),
250 	CH_DEVICE(0x500D, 4),
251 	CH_DEVICE(0x500E, 4),
252 	CH_DEVICE(0x500F, 4),
253 	CH_DEVICE(0x5010, 4),
254 	CH_DEVICE(0x5011, 4),
255 	CH_DEVICE(0x5012, 4),
256 	CH_DEVICE(0x5013, 4),
257 	CH_DEVICE(0x5401, 4),
258 	CH_DEVICE(0x5402, 4),
259 	CH_DEVICE(0x5403, 4),
260 	CH_DEVICE(0x5404, 4),
261 	CH_DEVICE(0x5405, 4),
262 	CH_DEVICE(0x5406, 4),
263 	CH_DEVICE(0x5407, 4),
264 	CH_DEVICE(0x5408, 4),
265 	CH_DEVICE(0x5409, 4),
266 	CH_DEVICE(0x540A, 4),
267 	CH_DEVICE(0x540B, 4),
268 	CH_DEVICE(0x540C, 4),
269 	CH_DEVICE(0x540D, 4),
270 	CH_DEVICE(0x540E, 4),
271 	CH_DEVICE(0x540F, 4),
272 	CH_DEVICE(0x5410, 4),
273 	CH_DEVICE(0x5411, 4),
274 	CH_DEVICE(0x5412, 4),
275 	CH_DEVICE(0x5413, 4),
276 	{ 0, }
277 };
278 
279 #define FW4_FNAME "cxgb4/t4fw.bin"
280 #define FW5_FNAME "cxgb4/t5fw.bin"
281 #define FW4_CFNAME "cxgb4/t4-config.txt"
282 #define FW5_CFNAME "cxgb4/t5-config.txt"
283 
284 MODULE_DESCRIPTION(DRV_DESC);
285 MODULE_AUTHOR("Chelsio Communications");
286 MODULE_LICENSE("Dual BSD/GPL");
287 MODULE_VERSION(DRV_VERSION);
288 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
289 MODULE_FIRMWARE(FW4_FNAME);
290 MODULE_FIRMWARE(FW5_FNAME);
291 
292 /*
293  * Normally we're willing to become the firmware's Master PF but will be happy
294  * if another PF has already become the Master and initialized the adapter.
295  * Setting "force_init" will cause this driver to forcibly establish itself as
296  * the Master PF and initialize the adapter.
297  */
298 static uint force_init;
299 
300 module_param(force_init, uint, 0644);
301 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
302 
303 /*
304  * Normally if the firmware we connect to has Configuration File support, we
305  * use that and only fall back to the old Driver-based initialization if the
306  * Configuration File fails for some reason.  If force_old_init is set, then
307  * we'll always use the old Driver-based initialization sequence.
308  */
309 static uint force_old_init;
310 
311 module_param(force_old_init, uint, 0644);
312 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
313 
314 static int dflt_msg_enable = DFLT_MSG_ENABLE;
315 
316 module_param(dflt_msg_enable, int, 0644);
317 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
318 
319 /*
320  * The driver uses the best interrupt scheme available on a platform in the
321  * order MSI-X, MSI, legacy INTx interrupts.  This parameter determines which
322  * of these schemes the driver may consider as follows:
323  *
324  * msi = 2: choose from among all three options
325  * msi = 1: only consider MSI and INTx interrupts
326  * msi = 0: force INTx interrupts
327  */
328 static int msi = 2;
329 
330 module_param(msi, int, 0644);
331 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
332 
333 /*
334  * Queue interrupt hold-off timer values.  Queues default to the first of these
335  * upon creation.
336  */
337 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
338 
339 module_param_array(intr_holdoff, uint, NULL, 0644);
340 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
341 		 "0..4 in microseconds");
342 
343 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
344 
345 module_param_array(intr_cnt, uint, NULL, 0644);
346 MODULE_PARM_DESC(intr_cnt,
347 		 "thresholds 1..3 for queue interrupt packet counters");
348 
349 /*
350  * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
351  * offset by 2 bytes in order to have the IP headers line up on 4-byte
352  * boundaries.  This is a requirement for many architectures which will throw
353  * a machine check fault if an attempt is made to access one of the 4-byte IP
354  * header fields on a non-4-byte boundary.  And it's a major performance issue
355  * even on some architectures which allow it like some implementations of the
356  * x86 ISA.  However, some architectures don't mind this and for some very
357  * edge-case performance sensitive applications (like forwarding large volumes
358  * of small packets), setting this DMA offset to 0 will decrease the number of
359  * PCI-E Bus transfers enough to measurably affect performance.
360  */
361 static int rx_dma_offset = 2;
362 
363 static bool vf_acls;
364 
365 #ifdef CONFIG_PCI_IOV
366 module_param(vf_acls, bool, 0644);
367 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
368 
369 /* Configure the number of PCI-E Virtual Function which are to be instantiated
370  * on SR-IOV Capable Physical Functions.
371  */
372 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
373 
374 module_param_array(num_vf, uint, NULL, 0644);
375 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
376 #endif
377 
378 /*
379  * The filter TCAM has a fixed portion and a variable portion.  The fixed
380  * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
381  * ports.  The variable portion is 36 bits which can include things like Exact
382  * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
383  * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
384  * far exceed the 36-bit budget for this "compressed" header portion of the
385  * filter.  Thus, we have a scarce resource which must be carefully managed.
386  *
387  * By default we set this up to mostly match the set of filter matching
388  * capabilities of T3 but with accommodations for some of T4's more
389  * interesting features:
390  *
391  *   { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
392  *     [Inner] VLAN (17), Port (3), FCoE (1) }
393  */
394 enum {
395 	TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
396 	TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
397 	TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
398 };
399 
400 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
401 
402 module_param(tp_vlan_pri_map, uint, 0644);
403 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
404 
405 static struct dentry *cxgb4_debugfs_root;
406 
407 static LIST_HEAD(adapter_list);
408 static DEFINE_MUTEX(uld_mutex);
409 /* Adapter list to be accessed from atomic context */
410 static LIST_HEAD(adap_rcu_list);
411 static DEFINE_SPINLOCK(adap_rcu_lock);
412 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
413 static const char *uld_str[] = { "RDMA", "iSCSI" };
414 
415 static void link_report(struct net_device *dev)
416 {
417 	if (!netif_carrier_ok(dev))
418 		netdev_info(dev, "link down\n");
419 	else {
420 		static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
421 
422 		const char *s = "10Mbps";
423 		const struct port_info *p = netdev_priv(dev);
424 
425 		switch (p->link_cfg.speed) {
426 		case SPEED_10000:
427 			s = "10Gbps";
428 			break;
429 		case SPEED_1000:
430 			s = "1000Mbps";
431 			break;
432 		case SPEED_100:
433 			s = "100Mbps";
434 			break;
435 		}
436 
437 		netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
438 			    fc[p->link_cfg.fc]);
439 	}
440 }
441 
442 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
443 {
444 	struct net_device *dev = adapter->port[port_id];
445 
446 	/* Skip changes from disabled ports. */
447 	if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
448 		if (link_stat)
449 			netif_carrier_on(dev);
450 		else
451 			netif_carrier_off(dev);
452 
453 		link_report(dev);
454 	}
455 }
456 
457 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
458 {
459 	static const char *mod_str[] = {
460 		NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
461 	};
462 
463 	const struct net_device *dev = adap->port[port_id];
464 	const struct port_info *pi = netdev_priv(dev);
465 
466 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
467 		netdev_info(dev, "port module unplugged\n");
468 	else if (pi->mod_type < ARRAY_SIZE(mod_str))
469 		netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
470 }
471 
472 /*
473  * Configure the exact and hash address filters to handle a port's multicast
474  * and secondary unicast MAC addresses.
475  */
476 static int set_addr_filters(const struct net_device *dev, bool sleep)
477 {
478 	u64 mhash = 0;
479 	u64 uhash = 0;
480 	bool free = true;
481 	u16 filt_idx[7];
482 	const u8 *addr[7];
483 	int ret, naddr = 0;
484 	const struct netdev_hw_addr *ha;
485 	int uc_cnt = netdev_uc_count(dev);
486 	int mc_cnt = netdev_mc_count(dev);
487 	const struct port_info *pi = netdev_priv(dev);
488 	unsigned int mb = pi->adapter->fn;
489 
490 	/* first do the secondary unicast addresses */
491 	netdev_for_each_uc_addr(ha, dev) {
492 		addr[naddr++] = ha->addr;
493 		if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
494 			ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
495 					naddr, addr, filt_idx, &uhash, sleep);
496 			if (ret < 0)
497 				return ret;
498 
499 			free = false;
500 			naddr = 0;
501 		}
502 	}
503 
504 	/* next set up the multicast addresses */
505 	netdev_for_each_mc_addr(ha, dev) {
506 		addr[naddr++] = ha->addr;
507 		if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
508 			ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
509 					naddr, addr, filt_idx, &mhash, sleep);
510 			if (ret < 0)
511 				return ret;
512 
513 			free = false;
514 			naddr = 0;
515 		}
516 	}
517 
518 	return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
519 				uhash | mhash, sleep);
520 }
521 
522 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
523 module_param(dbfifo_int_thresh, int, 0644);
524 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
525 
526 /*
527  * usecs to sleep while draining the dbfifo
528  */
529 static int dbfifo_drain_delay = 1000;
530 module_param(dbfifo_drain_delay, int, 0644);
531 MODULE_PARM_DESC(dbfifo_drain_delay,
532 		 "usecs to sleep while draining the dbfifo");
533 
534 /*
535  * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
536  * If @mtu is -1 it is left unchanged.
537  */
538 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
539 {
540 	int ret;
541 	struct port_info *pi = netdev_priv(dev);
542 
543 	ret = set_addr_filters(dev, sleep_ok);
544 	if (ret == 0)
545 		ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
546 				    (dev->flags & IFF_PROMISC) ? 1 : 0,
547 				    (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
548 				    sleep_ok);
549 	return ret;
550 }
551 
552 static struct workqueue_struct *workq;
553 
554 /**
555  *	link_start - enable a port
556  *	@dev: the port to enable
557  *
558  *	Performs the MAC and PHY actions needed to enable a port.
559  */
560 static int link_start(struct net_device *dev)
561 {
562 	int ret;
563 	struct port_info *pi = netdev_priv(dev);
564 	unsigned int mb = pi->adapter->fn;
565 
566 	/*
567 	 * We do not set address filters and promiscuity here, the stack does
568 	 * that step explicitly.
569 	 */
570 	ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
571 			    !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
572 	if (ret == 0) {
573 		ret = t4_change_mac(pi->adapter, mb, pi->viid,
574 				    pi->xact_addr_filt, dev->dev_addr, true,
575 				    true);
576 		if (ret >= 0) {
577 			pi->xact_addr_filt = ret;
578 			ret = 0;
579 		}
580 	}
581 	if (ret == 0)
582 		ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
583 				    &pi->link_cfg);
584 	if (ret == 0)
585 		ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
586 	return ret;
587 }
588 
589 /* Clear a filter and release any of its resources that we own.  This also
590  * clears the filter's "pending" status.
591  */
592 static void clear_filter(struct adapter *adap, struct filter_entry *f)
593 {
594 	/* If the new or old filter have loopback rewriteing rules then we'll
595 	 * need to free any existing Layer Two Table (L2T) entries of the old
596 	 * filter rule.  The firmware will handle freeing up any Source MAC
597 	 * Table (SMT) entries used for rewriting Source MAC Addresses in
598 	 * loopback rules.
599 	 */
600 	if (f->l2t)
601 		cxgb4_l2t_release(f->l2t);
602 
603 	/* The zeroing of the filter rule below clears the filter valid,
604 	 * pending, locked flags, l2t pointer, etc. so it's all we need for
605 	 * this operation.
606 	 */
607 	memset(f, 0, sizeof(*f));
608 }
609 
610 /* Handle a filter write/deletion reply.
611  */
612 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
613 {
614 	unsigned int idx = GET_TID(rpl);
615 	unsigned int nidx = idx - adap->tids.ftid_base;
616 	unsigned int ret;
617 	struct filter_entry *f;
618 
619 	if (idx >= adap->tids.ftid_base && nidx <
620 	   (adap->tids.nftids + adap->tids.nsftids)) {
621 		idx = nidx;
622 		ret = GET_TCB_COOKIE(rpl->cookie);
623 		f = &adap->tids.ftid_tab[idx];
624 
625 		if (ret == FW_FILTER_WR_FLT_DELETED) {
626 			/* Clear the filter when we get confirmation from the
627 			 * hardware that the filter has been deleted.
628 			 */
629 			clear_filter(adap, f);
630 		} else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
631 			dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
632 				idx);
633 			clear_filter(adap, f);
634 		} else if (ret == FW_FILTER_WR_FLT_ADDED) {
635 			f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
636 			f->pending = 0;  /* asynchronous setup completed */
637 			f->valid = 1;
638 		} else {
639 			/* Something went wrong.  Issue a warning about the
640 			 * problem and clear everything out.
641 			 */
642 			dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
643 				idx, ret);
644 			clear_filter(adap, f);
645 		}
646 	}
647 }
648 
649 /* Response queue handler for the FW event queue.
650  */
651 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
652 			  const struct pkt_gl *gl)
653 {
654 	u8 opcode = ((const struct rss_header *)rsp)->opcode;
655 
656 	rsp++;                                          /* skip RSS header */
657 
658 	/* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
659 	 */
660 	if (unlikely(opcode == CPL_FW4_MSG &&
661 	   ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
662 		rsp++;
663 		opcode = ((const struct rss_header *)rsp)->opcode;
664 		rsp++;
665 		if (opcode != CPL_SGE_EGR_UPDATE) {
666 			dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
667 				, opcode);
668 			goto out;
669 		}
670 	}
671 
672 	if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
673 		const struct cpl_sge_egr_update *p = (void *)rsp;
674 		unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
675 		struct sge_txq *txq;
676 
677 		txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
678 		txq->restarts++;
679 		if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
680 			struct sge_eth_txq *eq;
681 
682 			eq = container_of(txq, struct sge_eth_txq, q);
683 			netif_tx_wake_queue(eq->txq);
684 		} else {
685 			struct sge_ofld_txq *oq;
686 
687 			oq = container_of(txq, struct sge_ofld_txq, q);
688 			tasklet_schedule(&oq->qresume_tsk);
689 		}
690 	} else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
691 		const struct cpl_fw6_msg *p = (void *)rsp;
692 
693 		if (p->type == 0)
694 			t4_handle_fw_rpl(q->adap, p->data);
695 	} else if (opcode == CPL_L2T_WRITE_RPL) {
696 		const struct cpl_l2t_write_rpl *p = (void *)rsp;
697 
698 		do_l2t_write_rpl(q->adap, p);
699 	} else if (opcode == CPL_SET_TCB_RPL) {
700 		const struct cpl_set_tcb_rpl *p = (void *)rsp;
701 
702 		filter_rpl(q->adap, p);
703 	} else
704 		dev_err(q->adap->pdev_dev,
705 			"unexpected CPL %#x on FW event queue\n", opcode);
706 out:
707 	return 0;
708 }
709 
710 /**
711  *	uldrx_handler - response queue handler for ULD queues
712  *	@q: the response queue that received the packet
713  *	@rsp: the response queue descriptor holding the offload message
714  *	@gl: the gather list of packet fragments
715  *
716  *	Deliver an ingress offload packet to a ULD.  All processing is done by
717  *	the ULD, we just maintain statistics.
718  */
719 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
720 			 const struct pkt_gl *gl)
721 {
722 	struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
723 
724 	/* FW can send CPLs encapsulated in a CPL_FW4_MSG.
725 	 */
726 	if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
727 	    ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
728 		rsp += 2;
729 
730 	if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
731 		rxq->stats.nomem++;
732 		return -1;
733 	}
734 	if (gl == NULL)
735 		rxq->stats.imm++;
736 	else if (gl == CXGB4_MSG_AN)
737 		rxq->stats.an++;
738 	else
739 		rxq->stats.pkts++;
740 	return 0;
741 }
742 
743 static void disable_msi(struct adapter *adapter)
744 {
745 	if (adapter->flags & USING_MSIX) {
746 		pci_disable_msix(adapter->pdev);
747 		adapter->flags &= ~USING_MSIX;
748 	} else if (adapter->flags & USING_MSI) {
749 		pci_disable_msi(adapter->pdev);
750 		adapter->flags &= ~USING_MSI;
751 	}
752 }
753 
754 /*
755  * Interrupt handler for non-data events used with MSI-X.
756  */
757 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
758 {
759 	struct adapter *adap = cookie;
760 
761 	u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
762 	if (v & PFSW) {
763 		adap->swintr = 1;
764 		t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
765 	}
766 	t4_slow_intr_handler(adap);
767 	return IRQ_HANDLED;
768 }
769 
770 /*
771  * Name the MSI-X interrupts.
772  */
773 static void name_msix_vecs(struct adapter *adap)
774 {
775 	int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
776 
777 	/* non-data interrupts */
778 	snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
779 
780 	/* FW events */
781 	snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
782 		 adap->port[0]->name);
783 
784 	/* Ethernet queues */
785 	for_each_port(adap, j) {
786 		struct net_device *d = adap->port[j];
787 		const struct port_info *pi = netdev_priv(d);
788 
789 		for (i = 0; i < pi->nqsets; i++, msi_idx++)
790 			snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
791 				 d->name, i);
792 	}
793 
794 	/* offload queues */
795 	for_each_ofldrxq(&adap->sge, i)
796 		snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
797 			 adap->port[0]->name, i);
798 
799 	for_each_rdmarxq(&adap->sge, i)
800 		snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
801 			 adap->port[0]->name, i);
802 }
803 
804 static int request_msix_queue_irqs(struct adapter *adap)
805 {
806 	struct sge *s = &adap->sge;
807 	int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
808 
809 	err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
810 			  adap->msix_info[1].desc, &s->fw_evtq);
811 	if (err)
812 		return err;
813 
814 	for_each_ethrxq(s, ethqidx) {
815 		err = request_irq(adap->msix_info[msi_index].vec,
816 				  t4_sge_intr_msix, 0,
817 				  adap->msix_info[msi_index].desc,
818 				  &s->ethrxq[ethqidx].rspq);
819 		if (err)
820 			goto unwind;
821 		msi_index++;
822 	}
823 	for_each_ofldrxq(s, ofldqidx) {
824 		err = request_irq(adap->msix_info[msi_index].vec,
825 				  t4_sge_intr_msix, 0,
826 				  adap->msix_info[msi_index].desc,
827 				  &s->ofldrxq[ofldqidx].rspq);
828 		if (err)
829 			goto unwind;
830 		msi_index++;
831 	}
832 	for_each_rdmarxq(s, rdmaqidx) {
833 		err = request_irq(adap->msix_info[msi_index].vec,
834 				  t4_sge_intr_msix, 0,
835 				  adap->msix_info[msi_index].desc,
836 				  &s->rdmarxq[rdmaqidx].rspq);
837 		if (err)
838 			goto unwind;
839 		msi_index++;
840 	}
841 	return 0;
842 
843 unwind:
844 	while (--rdmaqidx >= 0)
845 		free_irq(adap->msix_info[--msi_index].vec,
846 			 &s->rdmarxq[rdmaqidx].rspq);
847 	while (--ofldqidx >= 0)
848 		free_irq(adap->msix_info[--msi_index].vec,
849 			 &s->ofldrxq[ofldqidx].rspq);
850 	while (--ethqidx >= 0)
851 		free_irq(adap->msix_info[--msi_index].vec,
852 			 &s->ethrxq[ethqidx].rspq);
853 	free_irq(adap->msix_info[1].vec, &s->fw_evtq);
854 	return err;
855 }
856 
857 static void free_msix_queue_irqs(struct adapter *adap)
858 {
859 	int i, msi_index = 2;
860 	struct sge *s = &adap->sge;
861 
862 	free_irq(adap->msix_info[1].vec, &s->fw_evtq);
863 	for_each_ethrxq(s, i)
864 		free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
865 	for_each_ofldrxq(s, i)
866 		free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
867 	for_each_rdmarxq(s, i)
868 		free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
869 }
870 
871 /**
872  *	write_rss - write the RSS table for a given port
873  *	@pi: the port
874  *	@queues: array of queue indices for RSS
875  *
876  *	Sets up the portion of the HW RSS table for the port's VI to distribute
877  *	packets to the Rx queues in @queues.
878  */
879 static int write_rss(const struct port_info *pi, const u16 *queues)
880 {
881 	u16 *rss;
882 	int i, err;
883 	const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
884 
885 	rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
886 	if (!rss)
887 		return -ENOMEM;
888 
889 	/* map the queue indices to queue ids */
890 	for (i = 0; i < pi->rss_size; i++, queues++)
891 		rss[i] = q[*queues].rspq.abs_id;
892 
893 	err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
894 				  pi->rss_size, rss, pi->rss_size);
895 	kfree(rss);
896 	return err;
897 }
898 
899 /**
900  *	setup_rss - configure RSS
901  *	@adap: the adapter
902  *
903  *	Sets up RSS for each port.
904  */
905 static int setup_rss(struct adapter *adap)
906 {
907 	int i, err;
908 
909 	for_each_port(adap, i) {
910 		const struct port_info *pi = adap2pinfo(adap, i);
911 
912 		err = write_rss(pi, pi->rss);
913 		if (err)
914 			return err;
915 	}
916 	return 0;
917 }
918 
919 /*
920  * Return the channel of the ingress queue with the given qid.
921  */
922 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
923 {
924 	qid -= p->ingr_start;
925 	return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
926 }
927 
928 /*
929  * Wait until all NAPI handlers are descheduled.
930  */
931 static void quiesce_rx(struct adapter *adap)
932 {
933 	int i;
934 
935 	for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
936 		struct sge_rspq *q = adap->sge.ingr_map[i];
937 
938 		if (q && q->handler)
939 			napi_disable(&q->napi);
940 	}
941 }
942 
943 /*
944  * Enable NAPI scheduling and interrupt generation for all Rx queues.
945  */
946 static void enable_rx(struct adapter *adap)
947 {
948 	int i;
949 
950 	for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
951 		struct sge_rspq *q = adap->sge.ingr_map[i];
952 
953 		if (!q)
954 			continue;
955 		if (q->handler)
956 			napi_enable(&q->napi);
957 		/* 0-increment GTS to start the timer and enable interrupts */
958 		t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
959 			     SEINTARM(q->intr_params) |
960 			     INGRESSQID(q->cntxt_id));
961 	}
962 }
963 
964 /**
965  *	setup_sge_queues - configure SGE Tx/Rx/response queues
966  *	@adap: the adapter
967  *
968  *	Determines how many sets of SGE queues to use and initializes them.
969  *	We support multiple queue sets per port if we have MSI-X, otherwise
970  *	just one queue set per port.
971  */
972 static int setup_sge_queues(struct adapter *adap)
973 {
974 	int err, msi_idx, i, j;
975 	struct sge *s = &adap->sge;
976 
977 	bitmap_zero(s->starving_fl, MAX_EGRQ);
978 	bitmap_zero(s->txq_maperr, MAX_EGRQ);
979 
980 	if (adap->flags & USING_MSIX)
981 		msi_idx = 1;         /* vector 0 is for non-queue interrupts */
982 	else {
983 		err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
984 				       NULL, NULL);
985 		if (err)
986 			return err;
987 		msi_idx = -((int)s->intrq.abs_id + 1);
988 	}
989 
990 	err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
991 			       msi_idx, NULL, fwevtq_handler);
992 	if (err) {
993 freeout:	t4_free_sge_resources(adap);
994 		return err;
995 	}
996 
997 	for_each_port(adap, i) {
998 		struct net_device *dev = adap->port[i];
999 		struct port_info *pi = netdev_priv(dev);
1000 		struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1001 		struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1002 
1003 		for (j = 0; j < pi->nqsets; j++, q++) {
1004 			if (msi_idx > 0)
1005 				msi_idx++;
1006 			err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1007 					       msi_idx, &q->fl,
1008 					       t4_ethrx_handler);
1009 			if (err)
1010 				goto freeout;
1011 			q->rspq.idx = j;
1012 			memset(&q->stats, 0, sizeof(q->stats));
1013 		}
1014 		for (j = 0; j < pi->nqsets; j++, t++) {
1015 			err = t4_sge_alloc_eth_txq(adap, t, dev,
1016 					netdev_get_tx_queue(dev, j),
1017 					s->fw_evtq.cntxt_id);
1018 			if (err)
1019 				goto freeout;
1020 		}
1021 	}
1022 
1023 	j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1024 	for_each_ofldrxq(s, i) {
1025 		struct sge_ofld_rxq *q = &s->ofldrxq[i];
1026 		struct net_device *dev = adap->port[i / j];
1027 
1028 		if (msi_idx > 0)
1029 			msi_idx++;
1030 		err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1031 				       &q->fl, uldrx_handler);
1032 		if (err)
1033 			goto freeout;
1034 		memset(&q->stats, 0, sizeof(q->stats));
1035 		s->ofld_rxq[i] = q->rspq.abs_id;
1036 		err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1037 					    s->fw_evtq.cntxt_id);
1038 		if (err)
1039 			goto freeout;
1040 	}
1041 
1042 	for_each_rdmarxq(s, i) {
1043 		struct sge_ofld_rxq *q = &s->rdmarxq[i];
1044 
1045 		if (msi_idx > 0)
1046 			msi_idx++;
1047 		err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1048 				       msi_idx, &q->fl, uldrx_handler);
1049 		if (err)
1050 			goto freeout;
1051 		memset(&q->stats, 0, sizeof(q->stats));
1052 		s->rdma_rxq[i] = q->rspq.abs_id;
1053 	}
1054 
1055 	for_each_port(adap, i) {
1056 		/*
1057 		 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1058 		 * have RDMA queues, and that's the right value.
1059 		 */
1060 		err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1061 					    s->fw_evtq.cntxt_id,
1062 					    s->rdmarxq[i].rspq.cntxt_id);
1063 		if (err)
1064 			goto freeout;
1065 	}
1066 
1067 	t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
1068 		     RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1069 		     QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1070 	return 0;
1071 }
1072 
1073 /*
1074  * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1075  * The allocated memory is cleared.
1076  */
1077 void *t4_alloc_mem(size_t size)
1078 {
1079 	void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1080 
1081 	if (!p)
1082 		p = vzalloc(size);
1083 	return p;
1084 }
1085 
1086 /*
1087  * Free memory allocated through alloc_mem().
1088  */
1089 static void t4_free_mem(void *addr)
1090 {
1091 	if (is_vmalloc_addr(addr))
1092 		vfree(addr);
1093 	else
1094 		kfree(addr);
1095 }
1096 
1097 /* Send a Work Request to write the filter at a specified index.  We construct
1098  * a Firmware Filter Work Request to have the work done and put the indicated
1099  * filter into "pending" mode which will prevent any further actions against
1100  * it till we get a reply from the firmware on the completion status of the
1101  * request.
1102  */
1103 static int set_filter_wr(struct adapter *adapter, int fidx)
1104 {
1105 	struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1106 	struct sk_buff *skb;
1107 	struct fw_filter_wr *fwr;
1108 	unsigned int ftid;
1109 
1110 	/* If the new filter requires loopback Destination MAC and/or VLAN
1111 	 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1112 	 * the filter.
1113 	 */
1114 	if (f->fs.newdmac || f->fs.newvlan) {
1115 		/* allocate L2T entry for new filter */
1116 		f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1117 		if (f->l2t == NULL)
1118 			return -EAGAIN;
1119 		if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1120 					f->fs.eport, f->fs.dmac)) {
1121 			cxgb4_l2t_release(f->l2t);
1122 			f->l2t = NULL;
1123 			return -ENOMEM;
1124 		}
1125 	}
1126 
1127 	ftid = adapter->tids.ftid_base + fidx;
1128 
1129 	skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1130 	fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1131 	memset(fwr, 0, sizeof(*fwr));
1132 
1133 	/* It would be nice to put most of the following in t4_hw.c but most
1134 	 * of the work is translating the cxgbtool ch_filter_specification
1135 	 * into the Work Request and the definition of that structure is
1136 	 * currently in cxgbtool.h which isn't appropriate to pull into the
1137 	 * common code.  We may eventually try to come up with a more neutral
1138 	 * filter specification structure but for now it's easiest to simply
1139 	 * put this fairly direct code in line ...
1140 	 */
1141 	fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1142 	fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1143 	fwr->tid_to_iq =
1144 		htonl(V_FW_FILTER_WR_TID(ftid) |
1145 		      V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1146 		      V_FW_FILTER_WR_NOREPLY(0) |
1147 		      V_FW_FILTER_WR_IQ(f->fs.iq));
1148 	fwr->del_filter_to_l2tix =
1149 		htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1150 		      V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1151 		      V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1152 		      V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1153 		      V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1154 		      V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1155 		      V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1156 		      V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1157 		      V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1158 					     f->fs.newvlan == VLAN_REWRITE) |
1159 		      V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1160 					    f->fs.newvlan == VLAN_REWRITE) |
1161 		      V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1162 		      V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1163 		      V_FW_FILTER_WR_PRIO(f->fs.prio) |
1164 		      V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1165 	fwr->ethtype = htons(f->fs.val.ethtype);
1166 	fwr->ethtypem = htons(f->fs.mask.ethtype);
1167 	fwr->frag_to_ovlan_vldm =
1168 		(V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1169 		 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1170 		 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1171 		 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1172 		 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1173 		 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1174 	fwr->smac_sel = 0;
1175 	fwr->rx_chan_rx_rpl_iq =
1176 		htons(V_FW_FILTER_WR_RX_CHAN(0) |
1177 		      V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1178 	fwr->maci_to_matchtypem =
1179 		htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1180 		      V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1181 		      V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1182 		      V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1183 		      V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1184 		      V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1185 		      V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1186 		      V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1187 	fwr->ptcl = f->fs.val.proto;
1188 	fwr->ptclm = f->fs.mask.proto;
1189 	fwr->ttyp = f->fs.val.tos;
1190 	fwr->ttypm = f->fs.mask.tos;
1191 	fwr->ivlan = htons(f->fs.val.ivlan);
1192 	fwr->ivlanm = htons(f->fs.mask.ivlan);
1193 	fwr->ovlan = htons(f->fs.val.ovlan);
1194 	fwr->ovlanm = htons(f->fs.mask.ovlan);
1195 	memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1196 	memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1197 	memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1198 	memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1199 	fwr->lp = htons(f->fs.val.lport);
1200 	fwr->lpm = htons(f->fs.mask.lport);
1201 	fwr->fp = htons(f->fs.val.fport);
1202 	fwr->fpm = htons(f->fs.mask.fport);
1203 	if (f->fs.newsmac)
1204 		memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1205 
1206 	/* Mark the filter as "pending" and ship off the Filter Work Request.
1207 	 * When we get the Work Request Reply we'll clear the pending status.
1208 	 */
1209 	f->pending = 1;
1210 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1211 	t4_ofld_send(adapter, skb);
1212 	return 0;
1213 }
1214 
1215 /* Delete the filter at a specified index.
1216  */
1217 static int del_filter_wr(struct adapter *adapter, int fidx)
1218 {
1219 	struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1220 	struct sk_buff *skb;
1221 	struct fw_filter_wr *fwr;
1222 	unsigned int len, ftid;
1223 
1224 	len = sizeof(*fwr);
1225 	ftid = adapter->tids.ftid_base + fidx;
1226 
1227 	skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1228 	fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1229 	t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1230 
1231 	/* Mark the filter as "pending" and ship off the Filter Work Request.
1232 	 * When we get the Work Request Reply we'll clear the pending status.
1233 	 */
1234 	f->pending = 1;
1235 	t4_mgmt_tx(adapter, skb);
1236 	return 0;
1237 }
1238 
1239 static inline int is_offload(const struct adapter *adap)
1240 {
1241 	return adap->params.offload;
1242 }
1243 
1244 /*
1245  * Implementation of ethtool operations.
1246  */
1247 
1248 static u32 get_msglevel(struct net_device *dev)
1249 {
1250 	return netdev2adap(dev)->msg_enable;
1251 }
1252 
1253 static void set_msglevel(struct net_device *dev, u32 val)
1254 {
1255 	netdev2adap(dev)->msg_enable = val;
1256 }
1257 
1258 static char stats_strings[][ETH_GSTRING_LEN] = {
1259 	"TxOctetsOK         ",
1260 	"TxFramesOK         ",
1261 	"TxBroadcastFrames  ",
1262 	"TxMulticastFrames  ",
1263 	"TxUnicastFrames    ",
1264 	"TxErrorFrames      ",
1265 
1266 	"TxFrames64         ",
1267 	"TxFrames65To127    ",
1268 	"TxFrames128To255   ",
1269 	"TxFrames256To511   ",
1270 	"TxFrames512To1023  ",
1271 	"TxFrames1024To1518 ",
1272 	"TxFrames1519ToMax  ",
1273 
1274 	"TxFramesDropped    ",
1275 	"TxPauseFrames      ",
1276 	"TxPPP0Frames       ",
1277 	"TxPPP1Frames       ",
1278 	"TxPPP2Frames       ",
1279 	"TxPPP3Frames       ",
1280 	"TxPPP4Frames       ",
1281 	"TxPPP5Frames       ",
1282 	"TxPPP6Frames       ",
1283 	"TxPPP7Frames       ",
1284 
1285 	"RxOctetsOK         ",
1286 	"RxFramesOK         ",
1287 	"RxBroadcastFrames  ",
1288 	"RxMulticastFrames  ",
1289 	"RxUnicastFrames    ",
1290 
1291 	"RxFramesTooLong    ",
1292 	"RxJabberErrors     ",
1293 	"RxFCSErrors        ",
1294 	"RxLengthErrors     ",
1295 	"RxSymbolErrors     ",
1296 	"RxRuntFrames       ",
1297 
1298 	"RxFrames64         ",
1299 	"RxFrames65To127    ",
1300 	"RxFrames128To255   ",
1301 	"RxFrames256To511   ",
1302 	"RxFrames512To1023  ",
1303 	"RxFrames1024To1518 ",
1304 	"RxFrames1519ToMax  ",
1305 
1306 	"RxPauseFrames      ",
1307 	"RxPPP0Frames       ",
1308 	"RxPPP1Frames       ",
1309 	"RxPPP2Frames       ",
1310 	"RxPPP3Frames       ",
1311 	"RxPPP4Frames       ",
1312 	"RxPPP5Frames       ",
1313 	"RxPPP6Frames       ",
1314 	"RxPPP7Frames       ",
1315 
1316 	"RxBG0FramesDropped ",
1317 	"RxBG1FramesDropped ",
1318 	"RxBG2FramesDropped ",
1319 	"RxBG3FramesDropped ",
1320 	"RxBG0FramesTrunc   ",
1321 	"RxBG1FramesTrunc   ",
1322 	"RxBG2FramesTrunc   ",
1323 	"RxBG3FramesTrunc   ",
1324 
1325 	"TSO                ",
1326 	"TxCsumOffload      ",
1327 	"RxCsumGood         ",
1328 	"VLANextractions    ",
1329 	"VLANinsertions     ",
1330 	"GROpackets         ",
1331 	"GROmerged          ",
1332 	"WriteCoalSuccess   ",
1333 	"WriteCoalFail      ",
1334 };
1335 
1336 static int get_sset_count(struct net_device *dev, int sset)
1337 {
1338 	switch (sset) {
1339 	case ETH_SS_STATS:
1340 		return ARRAY_SIZE(stats_strings);
1341 	default:
1342 		return -EOPNOTSUPP;
1343 	}
1344 }
1345 
1346 #define T4_REGMAP_SIZE (160 * 1024)
1347 #define T5_REGMAP_SIZE (332 * 1024)
1348 
1349 static int get_regs_len(struct net_device *dev)
1350 {
1351 	struct adapter *adap = netdev2adap(dev);
1352 	if (is_t4(adap->params.chip))
1353 		return T4_REGMAP_SIZE;
1354 	else
1355 		return T5_REGMAP_SIZE;
1356 }
1357 
1358 static int get_eeprom_len(struct net_device *dev)
1359 {
1360 	return EEPROMSIZE;
1361 }
1362 
1363 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1364 {
1365 	struct adapter *adapter = netdev2adap(dev);
1366 
1367 	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1368 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1369 	strlcpy(info->bus_info, pci_name(adapter->pdev),
1370 		sizeof(info->bus_info));
1371 
1372 	if (adapter->params.fw_vers)
1373 		snprintf(info->fw_version, sizeof(info->fw_version),
1374 			"%u.%u.%u.%u, TP %u.%u.%u.%u",
1375 			FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1376 			FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1377 			FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1378 			FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1379 			FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1380 			FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1381 			FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1382 			FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1383 }
1384 
1385 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1386 {
1387 	if (stringset == ETH_SS_STATS)
1388 		memcpy(data, stats_strings, sizeof(stats_strings));
1389 }
1390 
1391 /*
1392  * port stats maintained per queue of the port.  They should be in the same
1393  * order as in stats_strings above.
1394  */
1395 struct queue_port_stats {
1396 	u64 tso;
1397 	u64 tx_csum;
1398 	u64 rx_csum;
1399 	u64 vlan_ex;
1400 	u64 vlan_ins;
1401 	u64 gro_pkts;
1402 	u64 gro_merged;
1403 };
1404 
1405 static void collect_sge_port_stats(const struct adapter *adap,
1406 		const struct port_info *p, struct queue_port_stats *s)
1407 {
1408 	int i;
1409 	const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1410 	const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1411 
1412 	memset(s, 0, sizeof(*s));
1413 	for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1414 		s->tso += tx->tso;
1415 		s->tx_csum += tx->tx_cso;
1416 		s->rx_csum += rx->stats.rx_cso;
1417 		s->vlan_ex += rx->stats.vlan_ex;
1418 		s->vlan_ins += tx->vlan_ins;
1419 		s->gro_pkts += rx->stats.lro_pkts;
1420 		s->gro_merged += rx->stats.lro_merged;
1421 	}
1422 }
1423 
1424 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1425 		      u64 *data)
1426 {
1427 	struct port_info *pi = netdev_priv(dev);
1428 	struct adapter *adapter = pi->adapter;
1429 	u32 val1, val2;
1430 
1431 	t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1432 
1433 	data += sizeof(struct port_stats) / sizeof(u64);
1434 	collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1435 	data += sizeof(struct queue_port_stats) / sizeof(u64);
1436 	if (!is_t4(adapter->params.chip)) {
1437 		t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1438 		val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1439 		val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1440 		*data = val1 - val2;
1441 		data++;
1442 		*data = val2;
1443 		data++;
1444 	} else {
1445 		memset(data, 0, 2 * sizeof(u64));
1446 		*data += 2;
1447 	}
1448 }
1449 
1450 /*
1451  * Return a version number to identify the type of adapter.  The scheme is:
1452  * - bits 0..9: chip version
1453  * - bits 10..15: chip revision
1454  * - bits 16..23: register dump version
1455  */
1456 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1457 {
1458 	return CHELSIO_CHIP_VERSION(ap->params.chip) |
1459 		(CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
1460 }
1461 
1462 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1463 			   unsigned int end)
1464 {
1465 	u32 *p = buf + start;
1466 
1467 	for ( ; start <= end; start += sizeof(u32))
1468 		*p++ = t4_read_reg(ap, start);
1469 }
1470 
1471 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1472 		     void *buf)
1473 {
1474 	static const unsigned int t4_reg_ranges[] = {
1475 		0x1008, 0x1108,
1476 		0x1180, 0x11b4,
1477 		0x11fc, 0x123c,
1478 		0x1300, 0x173c,
1479 		0x1800, 0x18fc,
1480 		0x3000, 0x30d8,
1481 		0x30e0, 0x5924,
1482 		0x5960, 0x59d4,
1483 		0x5a00, 0x5af8,
1484 		0x6000, 0x6098,
1485 		0x6100, 0x6150,
1486 		0x6200, 0x6208,
1487 		0x6240, 0x6248,
1488 		0x6280, 0x6338,
1489 		0x6370, 0x638c,
1490 		0x6400, 0x643c,
1491 		0x6500, 0x6524,
1492 		0x6a00, 0x6a38,
1493 		0x6a60, 0x6a78,
1494 		0x6b00, 0x6b84,
1495 		0x6bf0, 0x6c84,
1496 		0x6cf0, 0x6d84,
1497 		0x6df0, 0x6e84,
1498 		0x6ef0, 0x6f84,
1499 		0x6ff0, 0x7084,
1500 		0x70f0, 0x7184,
1501 		0x71f0, 0x7284,
1502 		0x72f0, 0x7384,
1503 		0x73f0, 0x7450,
1504 		0x7500, 0x7530,
1505 		0x7600, 0x761c,
1506 		0x7680, 0x76cc,
1507 		0x7700, 0x7798,
1508 		0x77c0, 0x77fc,
1509 		0x7900, 0x79fc,
1510 		0x7b00, 0x7c38,
1511 		0x7d00, 0x7efc,
1512 		0x8dc0, 0x8e1c,
1513 		0x8e30, 0x8e78,
1514 		0x8ea0, 0x8f6c,
1515 		0x8fc0, 0x9074,
1516 		0x90fc, 0x90fc,
1517 		0x9400, 0x9458,
1518 		0x9600, 0x96bc,
1519 		0x9800, 0x9808,
1520 		0x9820, 0x983c,
1521 		0x9850, 0x9864,
1522 		0x9c00, 0x9c6c,
1523 		0x9c80, 0x9cec,
1524 		0x9d00, 0x9d6c,
1525 		0x9d80, 0x9dec,
1526 		0x9e00, 0x9e6c,
1527 		0x9e80, 0x9eec,
1528 		0x9f00, 0x9f6c,
1529 		0x9f80, 0x9fec,
1530 		0xd004, 0xd03c,
1531 		0xdfc0, 0xdfe0,
1532 		0xe000, 0xea7c,
1533 		0xf000, 0x11190,
1534 		0x19040, 0x1906c,
1535 		0x19078, 0x19080,
1536 		0x1908c, 0x19124,
1537 		0x19150, 0x191b0,
1538 		0x191d0, 0x191e8,
1539 		0x19238, 0x1924c,
1540 		0x193f8, 0x19474,
1541 		0x19490, 0x194f8,
1542 		0x19800, 0x19f30,
1543 		0x1a000, 0x1a06c,
1544 		0x1a0b0, 0x1a120,
1545 		0x1a128, 0x1a138,
1546 		0x1a190, 0x1a1c4,
1547 		0x1a1fc, 0x1a1fc,
1548 		0x1e040, 0x1e04c,
1549 		0x1e284, 0x1e28c,
1550 		0x1e2c0, 0x1e2c0,
1551 		0x1e2e0, 0x1e2e0,
1552 		0x1e300, 0x1e384,
1553 		0x1e3c0, 0x1e3c8,
1554 		0x1e440, 0x1e44c,
1555 		0x1e684, 0x1e68c,
1556 		0x1e6c0, 0x1e6c0,
1557 		0x1e6e0, 0x1e6e0,
1558 		0x1e700, 0x1e784,
1559 		0x1e7c0, 0x1e7c8,
1560 		0x1e840, 0x1e84c,
1561 		0x1ea84, 0x1ea8c,
1562 		0x1eac0, 0x1eac0,
1563 		0x1eae0, 0x1eae0,
1564 		0x1eb00, 0x1eb84,
1565 		0x1ebc0, 0x1ebc8,
1566 		0x1ec40, 0x1ec4c,
1567 		0x1ee84, 0x1ee8c,
1568 		0x1eec0, 0x1eec0,
1569 		0x1eee0, 0x1eee0,
1570 		0x1ef00, 0x1ef84,
1571 		0x1efc0, 0x1efc8,
1572 		0x1f040, 0x1f04c,
1573 		0x1f284, 0x1f28c,
1574 		0x1f2c0, 0x1f2c0,
1575 		0x1f2e0, 0x1f2e0,
1576 		0x1f300, 0x1f384,
1577 		0x1f3c0, 0x1f3c8,
1578 		0x1f440, 0x1f44c,
1579 		0x1f684, 0x1f68c,
1580 		0x1f6c0, 0x1f6c0,
1581 		0x1f6e0, 0x1f6e0,
1582 		0x1f700, 0x1f784,
1583 		0x1f7c0, 0x1f7c8,
1584 		0x1f840, 0x1f84c,
1585 		0x1fa84, 0x1fa8c,
1586 		0x1fac0, 0x1fac0,
1587 		0x1fae0, 0x1fae0,
1588 		0x1fb00, 0x1fb84,
1589 		0x1fbc0, 0x1fbc8,
1590 		0x1fc40, 0x1fc4c,
1591 		0x1fe84, 0x1fe8c,
1592 		0x1fec0, 0x1fec0,
1593 		0x1fee0, 0x1fee0,
1594 		0x1ff00, 0x1ff84,
1595 		0x1ffc0, 0x1ffc8,
1596 		0x20000, 0x2002c,
1597 		0x20100, 0x2013c,
1598 		0x20190, 0x201c8,
1599 		0x20200, 0x20318,
1600 		0x20400, 0x20528,
1601 		0x20540, 0x20614,
1602 		0x21000, 0x21040,
1603 		0x2104c, 0x21060,
1604 		0x210c0, 0x210ec,
1605 		0x21200, 0x21268,
1606 		0x21270, 0x21284,
1607 		0x212fc, 0x21388,
1608 		0x21400, 0x21404,
1609 		0x21500, 0x21518,
1610 		0x2152c, 0x2153c,
1611 		0x21550, 0x21554,
1612 		0x21600, 0x21600,
1613 		0x21608, 0x21628,
1614 		0x21630, 0x2163c,
1615 		0x21700, 0x2171c,
1616 		0x21780, 0x2178c,
1617 		0x21800, 0x21c38,
1618 		0x21c80, 0x21d7c,
1619 		0x21e00, 0x21e04,
1620 		0x22000, 0x2202c,
1621 		0x22100, 0x2213c,
1622 		0x22190, 0x221c8,
1623 		0x22200, 0x22318,
1624 		0x22400, 0x22528,
1625 		0x22540, 0x22614,
1626 		0x23000, 0x23040,
1627 		0x2304c, 0x23060,
1628 		0x230c0, 0x230ec,
1629 		0x23200, 0x23268,
1630 		0x23270, 0x23284,
1631 		0x232fc, 0x23388,
1632 		0x23400, 0x23404,
1633 		0x23500, 0x23518,
1634 		0x2352c, 0x2353c,
1635 		0x23550, 0x23554,
1636 		0x23600, 0x23600,
1637 		0x23608, 0x23628,
1638 		0x23630, 0x2363c,
1639 		0x23700, 0x2371c,
1640 		0x23780, 0x2378c,
1641 		0x23800, 0x23c38,
1642 		0x23c80, 0x23d7c,
1643 		0x23e00, 0x23e04,
1644 		0x24000, 0x2402c,
1645 		0x24100, 0x2413c,
1646 		0x24190, 0x241c8,
1647 		0x24200, 0x24318,
1648 		0x24400, 0x24528,
1649 		0x24540, 0x24614,
1650 		0x25000, 0x25040,
1651 		0x2504c, 0x25060,
1652 		0x250c0, 0x250ec,
1653 		0x25200, 0x25268,
1654 		0x25270, 0x25284,
1655 		0x252fc, 0x25388,
1656 		0x25400, 0x25404,
1657 		0x25500, 0x25518,
1658 		0x2552c, 0x2553c,
1659 		0x25550, 0x25554,
1660 		0x25600, 0x25600,
1661 		0x25608, 0x25628,
1662 		0x25630, 0x2563c,
1663 		0x25700, 0x2571c,
1664 		0x25780, 0x2578c,
1665 		0x25800, 0x25c38,
1666 		0x25c80, 0x25d7c,
1667 		0x25e00, 0x25e04,
1668 		0x26000, 0x2602c,
1669 		0x26100, 0x2613c,
1670 		0x26190, 0x261c8,
1671 		0x26200, 0x26318,
1672 		0x26400, 0x26528,
1673 		0x26540, 0x26614,
1674 		0x27000, 0x27040,
1675 		0x2704c, 0x27060,
1676 		0x270c0, 0x270ec,
1677 		0x27200, 0x27268,
1678 		0x27270, 0x27284,
1679 		0x272fc, 0x27388,
1680 		0x27400, 0x27404,
1681 		0x27500, 0x27518,
1682 		0x2752c, 0x2753c,
1683 		0x27550, 0x27554,
1684 		0x27600, 0x27600,
1685 		0x27608, 0x27628,
1686 		0x27630, 0x2763c,
1687 		0x27700, 0x2771c,
1688 		0x27780, 0x2778c,
1689 		0x27800, 0x27c38,
1690 		0x27c80, 0x27d7c,
1691 		0x27e00, 0x27e04
1692 	};
1693 
1694 	static const unsigned int t5_reg_ranges[] = {
1695 		0x1008, 0x1148,
1696 		0x1180, 0x11b4,
1697 		0x11fc, 0x123c,
1698 		0x1280, 0x173c,
1699 		0x1800, 0x18fc,
1700 		0x3000, 0x3028,
1701 		0x3060, 0x30d8,
1702 		0x30e0, 0x30fc,
1703 		0x3140, 0x357c,
1704 		0x35a8, 0x35cc,
1705 		0x35ec, 0x35ec,
1706 		0x3600, 0x5624,
1707 		0x56cc, 0x575c,
1708 		0x580c, 0x5814,
1709 		0x5890, 0x58bc,
1710 		0x5940, 0x59dc,
1711 		0x59fc, 0x5a18,
1712 		0x5a60, 0x5a9c,
1713 		0x5b9c, 0x5bfc,
1714 		0x6000, 0x6040,
1715 		0x6058, 0x614c,
1716 		0x7700, 0x7798,
1717 		0x77c0, 0x78fc,
1718 		0x7b00, 0x7c54,
1719 		0x7d00, 0x7efc,
1720 		0x8dc0, 0x8de0,
1721 		0x8df8, 0x8e84,
1722 		0x8ea0, 0x8f84,
1723 		0x8fc0, 0x90f8,
1724 		0x9400, 0x9470,
1725 		0x9600, 0x96f4,
1726 		0x9800, 0x9808,
1727 		0x9820, 0x983c,
1728 		0x9850, 0x9864,
1729 		0x9c00, 0x9c6c,
1730 		0x9c80, 0x9cec,
1731 		0x9d00, 0x9d6c,
1732 		0x9d80, 0x9dec,
1733 		0x9e00, 0x9e6c,
1734 		0x9e80, 0x9eec,
1735 		0x9f00, 0x9f6c,
1736 		0x9f80, 0xa020,
1737 		0xd004, 0xd03c,
1738 		0xdfc0, 0xdfe0,
1739 		0xe000, 0x11088,
1740 		0x1109c, 0x1117c,
1741 		0x11190, 0x11204,
1742 		0x19040, 0x1906c,
1743 		0x19078, 0x19080,
1744 		0x1908c, 0x19124,
1745 		0x19150, 0x191b0,
1746 		0x191d0, 0x191e8,
1747 		0x19238, 0x19290,
1748 		0x193f8, 0x19474,
1749 		0x19490, 0x194cc,
1750 		0x194f0, 0x194f8,
1751 		0x19c00, 0x19c60,
1752 		0x19c94, 0x19e10,
1753 		0x19e50, 0x19f34,
1754 		0x19f40, 0x19f50,
1755 		0x19f90, 0x19fe4,
1756 		0x1a000, 0x1a06c,
1757 		0x1a0b0, 0x1a120,
1758 		0x1a128, 0x1a138,
1759 		0x1a190, 0x1a1c4,
1760 		0x1a1fc, 0x1a1fc,
1761 		0x1e008, 0x1e00c,
1762 		0x1e040, 0x1e04c,
1763 		0x1e284, 0x1e290,
1764 		0x1e2c0, 0x1e2c0,
1765 		0x1e2e0, 0x1e2e0,
1766 		0x1e300, 0x1e384,
1767 		0x1e3c0, 0x1e3c8,
1768 		0x1e408, 0x1e40c,
1769 		0x1e440, 0x1e44c,
1770 		0x1e684, 0x1e690,
1771 		0x1e6c0, 0x1e6c0,
1772 		0x1e6e0, 0x1e6e0,
1773 		0x1e700, 0x1e784,
1774 		0x1e7c0, 0x1e7c8,
1775 		0x1e808, 0x1e80c,
1776 		0x1e840, 0x1e84c,
1777 		0x1ea84, 0x1ea90,
1778 		0x1eac0, 0x1eac0,
1779 		0x1eae0, 0x1eae0,
1780 		0x1eb00, 0x1eb84,
1781 		0x1ebc0, 0x1ebc8,
1782 		0x1ec08, 0x1ec0c,
1783 		0x1ec40, 0x1ec4c,
1784 		0x1ee84, 0x1ee90,
1785 		0x1eec0, 0x1eec0,
1786 		0x1eee0, 0x1eee0,
1787 		0x1ef00, 0x1ef84,
1788 		0x1efc0, 0x1efc8,
1789 		0x1f008, 0x1f00c,
1790 		0x1f040, 0x1f04c,
1791 		0x1f284, 0x1f290,
1792 		0x1f2c0, 0x1f2c0,
1793 		0x1f2e0, 0x1f2e0,
1794 		0x1f300, 0x1f384,
1795 		0x1f3c0, 0x1f3c8,
1796 		0x1f408, 0x1f40c,
1797 		0x1f440, 0x1f44c,
1798 		0x1f684, 0x1f690,
1799 		0x1f6c0, 0x1f6c0,
1800 		0x1f6e0, 0x1f6e0,
1801 		0x1f700, 0x1f784,
1802 		0x1f7c0, 0x1f7c8,
1803 		0x1f808, 0x1f80c,
1804 		0x1f840, 0x1f84c,
1805 		0x1fa84, 0x1fa90,
1806 		0x1fac0, 0x1fac0,
1807 		0x1fae0, 0x1fae0,
1808 		0x1fb00, 0x1fb84,
1809 		0x1fbc0, 0x1fbc8,
1810 		0x1fc08, 0x1fc0c,
1811 		0x1fc40, 0x1fc4c,
1812 		0x1fe84, 0x1fe90,
1813 		0x1fec0, 0x1fec0,
1814 		0x1fee0, 0x1fee0,
1815 		0x1ff00, 0x1ff84,
1816 		0x1ffc0, 0x1ffc8,
1817 		0x30000, 0x30030,
1818 		0x30100, 0x30144,
1819 		0x30190, 0x301d0,
1820 		0x30200, 0x30318,
1821 		0x30400, 0x3052c,
1822 		0x30540, 0x3061c,
1823 		0x30800, 0x30834,
1824 		0x308c0, 0x30908,
1825 		0x30910, 0x309ac,
1826 		0x30a00, 0x30a04,
1827 		0x30a0c, 0x30a2c,
1828 		0x30a44, 0x30a50,
1829 		0x30a74, 0x30c24,
1830 		0x30d08, 0x30d14,
1831 		0x30d1c, 0x30d20,
1832 		0x30d3c, 0x30d50,
1833 		0x31200, 0x3120c,
1834 		0x31220, 0x31220,
1835 		0x31240, 0x31240,
1836 		0x31600, 0x31600,
1837 		0x31608, 0x3160c,
1838 		0x31a00, 0x31a1c,
1839 		0x31e04, 0x31e20,
1840 		0x31e38, 0x31e3c,
1841 		0x31e80, 0x31e80,
1842 		0x31e88, 0x31ea8,
1843 		0x31eb0, 0x31eb4,
1844 		0x31ec8, 0x31ed4,
1845 		0x31fb8, 0x32004,
1846 		0x32208, 0x3223c,
1847 		0x32600, 0x32630,
1848 		0x32a00, 0x32abc,
1849 		0x32b00, 0x32b70,
1850 		0x33000, 0x33048,
1851 		0x33060, 0x3309c,
1852 		0x330f0, 0x33148,
1853 		0x33160, 0x3319c,
1854 		0x331f0, 0x332e4,
1855 		0x332f8, 0x333e4,
1856 		0x333f8, 0x33448,
1857 		0x33460, 0x3349c,
1858 		0x334f0, 0x33548,
1859 		0x33560, 0x3359c,
1860 		0x335f0, 0x336e4,
1861 		0x336f8, 0x337e4,
1862 		0x337f8, 0x337fc,
1863 		0x33814, 0x33814,
1864 		0x3382c, 0x3382c,
1865 		0x33880, 0x3388c,
1866 		0x338e8, 0x338ec,
1867 		0x33900, 0x33948,
1868 		0x33960, 0x3399c,
1869 		0x339f0, 0x33ae4,
1870 		0x33af8, 0x33b10,
1871 		0x33b28, 0x33b28,
1872 		0x33b3c, 0x33b50,
1873 		0x33bf0, 0x33c10,
1874 		0x33c28, 0x33c28,
1875 		0x33c3c, 0x33c50,
1876 		0x33cf0, 0x33cfc,
1877 		0x34000, 0x34030,
1878 		0x34100, 0x34144,
1879 		0x34190, 0x341d0,
1880 		0x34200, 0x34318,
1881 		0x34400, 0x3452c,
1882 		0x34540, 0x3461c,
1883 		0x34800, 0x34834,
1884 		0x348c0, 0x34908,
1885 		0x34910, 0x349ac,
1886 		0x34a00, 0x34a04,
1887 		0x34a0c, 0x34a2c,
1888 		0x34a44, 0x34a50,
1889 		0x34a74, 0x34c24,
1890 		0x34d08, 0x34d14,
1891 		0x34d1c, 0x34d20,
1892 		0x34d3c, 0x34d50,
1893 		0x35200, 0x3520c,
1894 		0x35220, 0x35220,
1895 		0x35240, 0x35240,
1896 		0x35600, 0x35600,
1897 		0x35608, 0x3560c,
1898 		0x35a00, 0x35a1c,
1899 		0x35e04, 0x35e20,
1900 		0x35e38, 0x35e3c,
1901 		0x35e80, 0x35e80,
1902 		0x35e88, 0x35ea8,
1903 		0x35eb0, 0x35eb4,
1904 		0x35ec8, 0x35ed4,
1905 		0x35fb8, 0x36004,
1906 		0x36208, 0x3623c,
1907 		0x36600, 0x36630,
1908 		0x36a00, 0x36abc,
1909 		0x36b00, 0x36b70,
1910 		0x37000, 0x37048,
1911 		0x37060, 0x3709c,
1912 		0x370f0, 0x37148,
1913 		0x37160, 0x3719c,
1914 		0x371f0, 0x372e4,
1915 		0x372f8, 0x373e4,
1916 		0x373f8, 0x37448,
1917 		0x37460, 0x3749c,
1918 		0x374f0, 0x37548,
1919 		0x37560, 0x3759c,
1920 		0x375f0, 0x376e4,
1921 		0x376f8, 0x377e4,
1922 		0x377f8, 0x377fc,
1923 		0x37814, 0x37814,
1924 		0x3782c, 0x3782c,
1925 		0x37880, 0x3788c,
1926 		0x378e8, 0x378ec,
1927 		0x37900, 0x37948,
1928 		0x37960, 0x3799c,
1929 		0x379f0, 0x37ae4,
1930 		0x37af8, 0x37b10,
1931 		0x37b28, 0x37b28,
1932 		0x37b3c, 0x37b50,
1933 		0x37bf0, 0x37c10,
1934 		0x37c28, 0x37c28,
1935 		0x37c3c, 0x37c50,
1936 		0x37cf0, 0x37cfc,
1937 		0x38000, 0x38030,
1938 		0x38100, 0x38144,
1939 		0x38190, 0x381d0,
1940 		0x38200, 0x38318,
1941 		0x38400, 0x3852c,
1942 		0x38540, 0x3861c,
1943 		0x38800, 0x38834,
1944 		0x388c0, 0x38908,
1945 		0x38910, 0x389ac,
1946 		0x38a00, 0x38a04,
1947 		0x38a0c, 0x38a2c,
1948 		0x38a44, 0x38a50,
1949 		0x38a74, 0x38c24,
1950 		0x38d08, 0x38d14,
1951 		0x38d1c, 0x38d20,
1952 		0x38d3c, 0x38d50,
1953 		0x39200, 0x3920c,
1954 		0x39220, 0x39220,
1955 		0x39240, 0x39240,
1956 		0x39600, 0x39600,
1957 		0x39608, 0x3960c,
1958 		0x39a00, 0x39a1c,
1959 		0x39e04, 0x39e20,
1960 		0x39e38, 0x39e3c,
1961 		0x39e80, 0x39e80,
1962 		0x39e88, 0x39ea8,
1963 		0x39eb0, 0x39eb4,
1964 		0x39ec8, 0x39ed4,
1965 		0x39fb8, 0x3a004,
1966 		0x3a208, 0x3a23c,
1967 		0x3a600, 0x3a630,
1968 		0x3aa00, 0x3aabc,
1969 		0x3ab00, 0x3ab70,
1970 		0x3b000, 0x3b048,
1971 		0x3b060, 0x3b09c,
1972 		0x3b0f0, 0x3b148,
1973 		0x3b160, 0x3b19c,
1974 		0x3b1f0, 0x3b2e4,
1975 		0x3b2f8, 0x3b3e4,
1976 		0x3b3f8, 0x3b448,
1977 		0x3b460, 0x3b49c,
1978 		0x3b4f0, 0x3b548,
1979 		0x3b560, 0x3b59c,
1980 		0x3b5f0, 0x3b6e4,
1981 		0x3b6f8, 0x3b7e4,
1982 		0x3b7f8, 0x3b7fc,
1983 		0x3b814, 0x3b814,
1984 		0x3b82c, 0x3b82c,
1985 		0x3b880, 0x3b88c,
1986 		0x3b8e8, 0x3b8ec,
1987 		0x3b900, 0x3b948,
1988 		0x3b960, 0x3b99c,
1989 		0x3b9f0, 0x3bae4,
1990 		0x3baf8, 0x3bb10,
1991 		0x3bb28, 0x3bb28,
1992 		0x3bb3c, 0x3bb50,
1993 		0x3bbf0, 0x3bc10,
1994 		0x3bc28, 0x3bc28,
1995 		0x3bc3c, 0x3bc50,
1996 		0x3bcf0, 0x3bcfc,
1997 		0x3c000, 0x3c030,
1998 		0x3c100, 0x3c144,
1999 		0x3c190, 0x3c1d0,
2000 		0x3c200, 0x3c318,
2001 		0x3c400, 0x3c52c,
2002 		0x3c540, 0x3c61c,
2003 		0x3c800, 0x3c834,
2004 		0x3c8c0, 0x3c908,
2005 		0x3c910, 0x3c9ac,
2006 		0x3ca00, 0x3ca04,
2007 		0x3ca0c, 0x3ca2c,
2008 		0x3ca44, 0x3ca50,
2009 		0x3ca74, 0x3cc24,
2010 		0x3cd08, 0x3cd14,
2011 		0x3cd1c, 0x3cd20,
2012 		0x3cd3c, 0x3cd50,
2013 		0x3d200, 0x3d20c,
2014 		0x3d220, 0x3d220,
2015 		0x3d240, 0x3d240,
2016 		0x3d600, 0x3d600,
2017 		0x3d608, 0x3d60c,
2018 		0x3da00, 0x3da1c,
2019 		0x3de04, 0x3de20,
2020 		0x3de38, 0x3de3c,
2021 		0x3de80, 0x3de80,
2022 		0x3de88, 0x3dea8,
2023 		0x3deb0, 0x3deb4,
2024 		0x3dec8, 0x3ded4,
2025 		0x3dfb8, 0x3e004,
2026 		0x3e208, 0x3e23c,
2027 		0x3e600, 0x3e630,
2028 		0x3ea00, 0x3eabc,
2029 		0x3eb00, 0x3eb70,
2030 		0x3f000, 0x3f048,
2031 		0x3f060, 0x3f09c,
2032 		0x3f0f0, 0x3f148,
2033 		0x3f160, 0x3f19c,
2034 		0x3f1f0, 0x3f2e4,
2035 		0x3f2f8, 0x3f3e4,
2036 		0x3f3f8, 0x3f448,
2037 		0x3f460, 0x3f49c,
2038 		0x3f4f0, 0x3f548,
2039 		0x3f560, 0x3f59c,
2040 		0x3f5f0, 0x3f6e4,
2041 		0x3f6f8, 0x3f7e4,
2042 		0x3f7f8, 0x3f7fc,
2043 		0x3f814, 0x3f814,
2044 		0x3f82c, 0x3f82c,
2045 		0x3f880, 0x3f88c,
2046 		0x3f8e8, 0x3f8ec,
2047 		0x3f900, 0x3f948,
2048 		0x3f960, 0x3f99c,
2049 		0x3f9f0, 0x3fae4,
2050 		0x3faf8, 0x3fb10,
2051 		0x3fb28, 0x3fb28,
2052 		0x3fb3c, 0x3fb50,
2053 		0x3fbf0, 0x3fc10,
2054 		0x3fc28, 0x3fc28,
2055 		0x3fc3c, 0x3fc50,
2056 		0x3fcf0, 0x3fcfc,
2057 		0x40000, 0x4000c,
2058 		0x40040, 0x40068,
2059 		0x40080, 0x40144,
2060 		0x40180, 0x4018c,
2061 		0x40200, 0x40298,
2062 		0x402ac, 0x4033c,
2063 		0x403f8, 0x403fc,
2064 		0x41300, 0x413c4,
2065 		0x41400, 0x4141c,
2066 		0x41480, 0x414d0,
2067 		0x44000, 0x44078,
2068 		0x440c0, 0x44278,
2069 		0x442c0, 0x44478,
2070 		0x444c0, 0x44678,
2071 		0x446c0, 0x44878,
2072 		0x448c0, 0x449fc,
2073 		0x45000, 0x45068,
2074 		0x45080, 0x45084,
2075 		0x450a0, 0x450b0,
2076 		0x45200, 0x45268,
2077 		0x45280, 0x45284,
2078 		0x452a0, 0x452b0,
2079 		0x460c0, 0x460e4,
2080 		0x47000, 0x4708c,
2081 		0x47200, 0x47250,
2082 		0x47400, 0x47420,
2083 		0x47600, 0x47618,
2084 		0x47800, 0x47814,
2085 		0x48000, 0x4800c,
2086 		0x48040, 0x48068,
2087 		0x48080, 0x48144,
2088 		0x48180, 0x4818c,
2089 		0x48200, 0x48298,
2090 		0x482ac, 0x4833c,
2091 		0x483f8, 0x483fc,
2092 		0x49300, 0x493c4,
2093 		0x49400, 0x4941c,
2094 		0x49480, 0x494d0,
2095 		0x4c000, 0x4c078,
2096 		0x4c0c0, 0x4c278,
2097 		0x4c2c0, 0x4c478,
2098 		0x4c4c0, 0x4c678,
2099 		0x4c6c0, 0x4c878,
2100 		0x4c8c0, 0x4c9fc,
2101 		0x4d000, 0x4d068,
2102 		0x4d080, 0x4d084,
2103 		0x4d0a0, 0x4d0b0,
2104 		0x4d200, 0x4d268,
2105 		0x4d280, 0x4d284,
2106 		0x4d2a0, 0x4d2b0,
2107 		0x4e0c0, 0x4e0e4,
2108 		0x4f000, 0x4f08c,
2109 		0x4f200, 0x4f250,
2110 		0x4f400, 0x4f420,
2111 		0x4f600, 0x4f618,
2112 		0x4f800, 0x4f814,
2113 		0x50000, 0x500cc,
2114 		0x50400, 0x50400,
2115 		0x50800, 0x508cc,
2116 		0x50c00, 0x50c00,
2117 		0x51000, 0x5101c,
2118 		0x51300, 0x51308,
2119 	};
2120 
2121 	int i;
2122 	struct adapter *ap = netdev2adap(dev);
2123 	static const unsigned int *reg_ranges;
2124 	int arr_size = 0, buf_size = 0;
2125 
2126 	if (is_t4(ap->params.chip)) {
2127 		reg_ranges = &t4_reg_ranges[0];
2128 		arr_size = ARRAY_SIZE(t4_reg_ranges);
2129 		buf_size = T4_REGMAP_SIZE;
2130 	} else {
2131 		reg_ranges = &t5_reg_ranges[0];
2132 		arr_size = ARRAY_SIZE(t5_reg_ranges);
2133 		buf_size = T5_REGMAP_SIZE;
2134 	}
2135 
2136 	regs->version = mk_adap_vers(ap);
2137 
2138 	memset(buf, 0, buf_size);
2139 	for (i = 0; i < arr_size; i += 2)
2140 		reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2141 }
2142 
2143 static int restart_autoneg(struct net_device *dev)
2144 {
2145 	struct port_info *p = netdev_priv(dev);
2146 
2147 	if (!netif_running(dev))
2148 		return -EAGAIN;
2149 	if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2150 		return -EINVAL;
2151 	t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
2152 	return 0;
2153 }
2154 
2155 static int identify_port(struct net_device *dev,
2156 			 enum ethtool_phys_id_state state)
2157 {
2158 	unsigned int val;
2159 	struct adapter *adap = netdev2adap(dev);
2160 
2161 	if (state == ETHTOOL_ID_ACTIVE)
2162 		val = 0xffff;
2163 	else if (state == ETHTOOL_ID_INACTIVE)
2164 		val = 0;
2165 	else
2166 		return -EINVAL;
2167 
2168 	return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
2169 }
2170 
2171 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2172 {
2173 	unsigned int v = 0;
2174 
2175 	if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2176 	    type == FW_PORT_TYPE_BT_XAUI) {
2177 		v |= SUPPORTED_TP;
2178 		if (caps & FW_PORT_CAP_SPEED_100M)
2179 			v |= SUPPORTED_100baseT_Full;
2180 		if (caps & FW_PORT_CAP_SPEED_1G)
2181 			v |= SUPPORTED_1000baseT_Full;
2182 		if (caps & FW_PORT_CAP_SPEED_10G)
2183 			v |= SUPPORTED_10000baseT_Full;
2184 	} else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2185 		v |= SUPPORTED_Backplane;
2186 		if (caps & FW_PORT_CAP_SPEED_1G)
2187 			v |= SUPPORTED_1000baseKX_Full;
2188 		if (caps & FW_PORT_CAP_SPEED_10G)
2189 			v |= SUPPORTED_10000baseKX4_Full;
2190 	} else if (type == FW_PORT_TYPE_KR)
2191 		v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
2192 	else if (type == FW_PORT_TYPE_BP_AP)
2193 		v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2194 		     SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2195 	else if (type == FW_PORT_TYPE_BP4_AP)
2196 		v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2197 		     SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2198 		     SUPPORTED_10000baseKX4_Full;
2199 	else if (type == FW_PORT_TYPE_FIBER_XFI ||
2200 		 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
2201 		v |= SUPPORTED_FIBRE;
2202 
2203 	if (caps & FW_PORT_CAP_ANEG)
2204 		v |= SUPPORTED_Autoneg;
2205 	return v;
2206 }
2207 
2208 static unsigned int to_fw_linkcaps(unsigned int caps)
2209 {
2210 	unsigned int v = 0;
2211 
2212 	if (caps & ADVERTISED_100baseT_Full)
2213 		v |= FW_PORT_CAP_SPEED_100M;
2214 	if (caps & ADVERTISED_1000baseT_Full)
2215 		v |= FW_PORT_CAP_SPEED_1G;
2216 	if (caps & ADVERTISED_10000baseT_Full)
2217 		v |= FW_PORT_CAP_SPEED_10G;
2218 	return v;
2219 }
2220 
2221 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2222 {
2223 	const struct port_info *p = netdev_priv(dev);
2224 
2225 	if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
2226 	    p->port_type == FW_PORT_TYPE_BT_XFI ||
2227 	    p->port_type == FW_PORT_TYPE_BT_XAUI)
2228 		cmd->port = PORT_TP;
2229 	else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2230 		 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2231 		cmd->port = PORT_FIBRE;
2232 	else if (p->port_type == FW_PORT_TYPE_SFP) {
2233 		if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2234 		    p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2235 			cmd->port = PORT_DA;
2236 		else
2237 			cmd->port = PORT_FIBRE;
2238 	} else
2239 		cmd->port = PORT_OTHER;
2240 
2241 	if (p->mdio_addr >= 0) {
2242 		cmd->phy_address = p->mdio_addr;
2243 		cmd->transceiver = XCVR_EXTERNAL;
2244 		cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2245 			MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2246 	} else {
2247 		cmd->phy_address = 0;  /* not really, but no better option */
2248 		cmd->transceiver = XCVR_INTERNAL;
2249 		cmd->mdio_support = 0;
2250 	}
2251 
2252 	cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2253 	cmd->advertising = from_fw_linkcaps(p->port_type,
2254 					    p->link_cfg.advertising);
2255 	ethtool_cmd_speed_set(cmd,
2256 			      netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
2257 	cmd->duplex = DUPLEX_FULL;
2258 	cmd->autoneg = p->link_cfg.autoneg;
2259 	cmd->maxtxpkt = 0;
2260 	cmd->maxrxpkt = 0;
2261 	return 0;
2262 }
2263 
2264 static unsigned int speed_to_caps(int speed)
2265 {
2266 	if (speed == SPEED_100)
2267 		return FW_PORT_CAP_SPEED_100M;
2268 	if (speed == SPEED_1000)
2269 		return FW_PORT_CAP_SPEED_1G;
2270 	if (speed == SPEED_10000)
2271 		return FW_PORT_CAP_SPEED_10G;
2272 	return 0;
2273 }
2274 
2275 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2276 {
2277 	unsigned int cap;
2278 	struct port_info *p = netdev_priv(dev);
2279 	struct link_config *lc = &p->link_cfg;
2280 	u32 speed = ethtool_cmd_speed(cmd);
2281 
2282 	if (cmd->duplex != DUPLEX_FULL)     /* only full-duplex supported */
2283 		return -EINVAL;
2284 
2285 	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2286 		/*
2287 		 * PHY offers a single speed.  See if that's what's
2288 		 * being requested.
2289 		 */
2290 		if (cmd->autoneg == AUTONEG_DISABLE &&
2291 		    (lc->supported & speed_to_caps(speed)))
2292 			return 0;
2293 		return -EINVAL;
2294 	}
2295 
2296 	if (cmd->autoneg == AUTONEG_DISABLE) {
2297 		cap = speed_to_caps(speed);
2298 
2299 		if (!(lc->supported & cap) || (speed == SPEED_1000) ||
2300 		    (speed == SPEED_10000))
2301 			return -EINVAL;
2302 		lc->requested_speed = cap;
2303 		lc->advertising = 0;
2304 	} else {
2305 		cap = to_fw_linkcaps(cmd->advertising);
2306 		if (!(lc->supported & cap))
2307 			return -EINVAL;
2308 		lc->requested_speed = 0;
2309 		lc->advertising = cap | FW_PORT_CAP_ANEG;
2310 	}
2311 	lc->autoneg = cmd->autoneg;
2312 
2313 	if (netif_running(dev))
2314 		return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2315 				     lc);
2316 	return 0;
2317 }
2318 
2319 static void get_pauseparam(struct net_device *dev,
2320 			   struct ethtool_pauseparam *epause)
2321 {
2322 	struct port_info *p = netdev_priv(dev);
2323 
2324 	epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2325 	epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2326 	epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2327 }
2328 
2329 static int set_pauseparam(struct net_device *dev,
2330 			  struct ethtool_pauseparam *epause)
2331 {
2332 	struct port_info *p = netdev_priv(dev);
2333 	struct link_config *lc = &p->link_cfg;
2334 
2335 	if (epause->autoneg == AUTONEG_DISABLE)
2336 		lc->requested_fc = 0;
2337 	else if (lc->supported & FW_PORT_CAP_ANEG)
2338 		lc->requested_fc = PAUSE_AUTONEG;
2339 	else
2340 		return -EINVAL;
2341 
2342 	if (epause->rx_pause)
2343 		lc->requested_fc |= PAUSE_RX;
2344 	if (epause->tx_pause)
2345 		lc->requested_fc |= PAUSE_TX;
2346 	if (netif_running(dev))
2347 		return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2348 				     lc);
2349 	return 0;
2350 }
2351 
2352 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2353 {
2354 	const struct port_info *pi = netdev_priv(dev);
2355 	const struct sge *s = &pi->adapter->sge;
2356 
2357 	e->rx_max_pending = MAX_RX_BUFFERS;
2358 	e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2359 	e->rx_jumbo_max_pending = 0;
2360 	e->tx_max_pending = MAX_TXQ_ENTRIES;
2361 
2362 	e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2363 	e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2364 	e->rx_jumbo_pending = 0;
2365 	e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2366 }
2367 
2368 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2369 {
2370 	int i;
2371 	const struct port_info *pi = netdev_priv(dev);
2372 	struct adapter *adapter = pi->adapter;
2373 	struct sge *s = &adapter->sge;
2374 
2375 	if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2376 	    e->tx_pending > MAX_TXQ_ENTRIES ||
2377 	    e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2378 	    e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2379 	    e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2380 		return -EINVAL;
2381 
2382 	if (adapter->flags & FULL_INIT_DONE)
2383 		return -EBUSY;
2384 
2385 	for (i = 0; i < pi->nqsets; ++i) {
2386 		s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2387 		s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2388 		s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2389 	}
2390 	return 0;
2391 }
2392 
2393 static int closest_timer(const struct sge *s, int time)
2394 {
2395 	int i, delta, match = 0, min_delta = INT_MAX;
2396 
2397 	for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2398 		delta = time - s->timer_val[i];
2399 		if (delta < 0)
2400 			delta = -delta;
2401 		if (delta < min_delta) {
2402 			min_delta = delta;
2403 			match = i;
2404 		}
2405 	}
2406 	return match;
2407 }
2408 
2409 static int closest_thres(const struct sge *s, int thres)
2410 {
2411 	int i, delta, match = 0, min_delta = INT_MAX;
2412 
2413 	for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2414 		delta = thres - s->counter_val[i];
2415 		if (delta < 0)
2416 			delta = -delta;
2417 		if (delta < min_delta) {
2418 			min_delta = delta;
2419 			match = i;
2420 		}
2421 	}
2422 	return match;
2423 }
2424 
2425 /*
2426  * Return a queue's interrupt hold-off time in us.  0 means no timer.
2427  */
2428 static unsigned int qtimer_val(const struct adapter *adap,
2429 			       const struct sge_rspq *q)
2430 {
2431 	unsigned int idx = q->intr_params >> 1;
2432 
2433 	return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2434 }
2435 
2436 /**
2437  *	set_rxq_intr_params - set a queue's interrupt holdoff parameters
2438  *	@adap: the adapter
2439  *	@q: the Rx queue
2440  *	@us: the hold-off time in us, or 0 to disable timer
2441  *	@cnt: the hold-off packet count, or 0 to disable counter
2442  *
2443  *	Sets an Rx queue's interrupt hold-off time and packet count.  At least
2444  *	one of the two needs to be enabled for the queue to generate interrupts.
2445  */
2446 static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
2447 			       unsigned int us, unsigned int cnt)
2448 {
2449 	if ((us | cnt) == 0)
2450 		cnt = 1;
2451 
2452 	if (cnt) {
2453 		int err;
2454 		u32 v, new_idx;
2455 
2456 		new_idx = closest_thres(&adap->sge, cnt);
2457 		if (q->desc && q->pktcnt_idx != new_idx) {
2458 			/* the queue has already been created, update it */
2459 			v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2460 			    FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2461 			    FW_PARAMS_PARAM_YZ(q->cntxt_id);
2462 			err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2463 					    &new_idx);
2464 			if (err)
2465 				return err;
2466 		}
2467 		q->pktcnt_idx = new_idx;
2468 	}
2469 
2470 	us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2471 	q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2472 	return 0;
2473 }
2474 
2475 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2476 {
2477 	const struct port_info *pi = netdev_priv(dev);
2478 	struct adapter *adap = pi->adapter;
2479 	struct sge_rspq *q;
2480 	int i;
2481 	int r = 0;
2482 
2483 	for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
2484 		q = &adap->sge.ethrxq[i].rspq;
2485 		r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
2486 			c->rx_max_coalesced_frames);
2487 		if (r) {
2488 			dev_err(&dev->dev, "failed to set coalesce %d\n", r);
2489 			break;
2490 		}
2491 	}
2492 	return r;
2493 }
2494 
2495 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2496 {
2497 	const struct port_info *pi = netdev_priv(dev);
2498 	const struct adapter *adap = pi->adapter;
2499 	const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2500 
2501 	c->rx_coalesce_usecs = qtimer_val(adap, rq);
2502 	c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2503 		adap->sge.counter_val[rq->pktcnt_idx] : 0;
2504 	return 0;
2505 }
2506 
2507 /**
2508  *	eeprom_ptov - translate a physical EEPROM address to virtual
2509  *	@phys_addr: the physical EEPROM address
2510  *	@fn: the PCI function number
2511  *	@sz: size of function-specific area
2512  *
2513  *	Translate a physical EEPROM address to virtual.  The first 1K is
2514  *	accessed through virtual addresses starting at 31K, the rest is
2515  *	accessed through virtual addresses starting at 0.
2516  *
2517  *	The mapping is as follows:
2518  *	[0..1K) -> [31K..32K)
2519  *	[1K..1K+A) -> [31K-A..31K)
2520  *	[1K+A..ES) -> [0..ES-A-1K)
2521  *
2522  *	where A = @fn * @sz, and ES = EEPROM size.
2523  */
2524 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2525 {
2526 	fn *= sz;
2527 	if (phys_addr < 1024)
2528 		return phys_addr + (31 << 10);
2529 	if (phys_addr < 1024 + fn)
2530 		return 31744 - fn + phys_addr - 1024;
2531 	if (phys_addr < EEPROMSIZE)
2532 		return phys_addr - 1024 - fn;
2533 	return -EINVAL;
2534 }
2535 
2536 /*
2537  * The next two routines implement eeprom read/write from physical addresses.
2538  */
2539 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2540 {
2541 	int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2542 
2543 	if (vaddr >= 0)
2544 		vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2545 	return vaddr < 0 ? vaddr : 0;
2546 }
2547 
2548 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2549 {
2550 	int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2551 
2552 	if (vaddr >= 0)
2553 		vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2554 	return vaddr < 0 ? vaddr : 0;
2555 }
2556 
2557 #define EEPROM_MAGIC 0x38E2F10C
2558 
2559 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2560 		      u8 *data)
2561 {
2562 	int i, err = 0;
2563 	struct adapter *adapter = netdev2adap(dev);
2564 
2565 	u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2566 	if (!buf)
2567 		return -ENOMEM;
2568 
2569 	e->magic = EEPROM_MAGIC;
2570 	for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2571 		err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2572 
2573 	if (!err)
2574 		memcpy(data, buf + e->offset, e->len);
2575 	kfree(buf);
2576 	return err;
2577 }
2578 
2579 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2580 		      u8 *data)
2581 {
2582 	u8 *buf;
2583 	int err = 0;
2584 	u32 aligned_offset, aligned_len, *p;
2585 	struct adapter *adapter = netdev2adap(dev);
2586 
2587 	if (eeprom->magic != EEPROM_MAGIC)
2588 		return -EINVAL;
2589 
2590 	aligned_offset = eeprom->offset & ~3;
2591 	aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2592 
2593 	if (adapter->fn > 0) {
2594 		u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2595 
2596 		if (aligned_offset < start ||
2597 		    aligned_offset + aligned_len > start + EEPROMPFSIZE)
2598 			return -EPERM;
2599 	}
2600 
2601 	if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2602 		/*
2603 		 * RMW possibly needed for first or last words.
2604 		 */
2605 		buf = kmalloc(aligned_len, GFP_KERNEL);
2606 		if (!buf)
2607 			return -ENOMEM;
2608 		err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2609 		if (!err && aligned_len > 4)
2610 			err = eeprom_rd_phys(adapter,
2611 					     aligned_offset + aligned_len - 4,
2612 					     (u32 *)&buf[aligned_len - 4]);
2613 		if (err)
2614 			goto out;
2615 		memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2616 	} else
2617 		buf = data;
2618 
2619 	err = t4_seeprom_wp(adapter, false);
2620 	if (err)
2621 		goto out;
2622 
2623 	for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2624 		err = eeprom_wr_phys(adapter, aligned_offset, *p);
2625 		aligned_offset += 4;
2626 	}
2627 
2628 	if (!err)
2629 		err = t4_seeprom_wp(adapter, true);
2630 out:
2631 	if (buf != data)
2632 		kfree(buf);
2633 	return err;
2634 }
2635 
2636 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2637 {
2638 	int ret;
2639 	const struct firmware *fw;
2640 	struct adapter *adap = netdev2adap(netdev);
2641 
2642 	ef->data[sizeof(ef->data) - 1] = '\0';
2643 	ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2644 	if (ret < 0)
2645 		return ret;
2646 
2647 	ret = t4_load_fw(adap, fw->data, fw->size);
2648 	release_firmware(fw);
2649 	if (!ret)
2650 		dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2651 	return ret;
2652 }
2653 
2654 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2655 #define BCAST_CRC 0xa0ccc1a6
2656 
2657 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2658 {
2659 	wol->supported = WAKE_BCAST | WAKE_MAGIC;
2660 	wol->wolopts = netdev2adap(dev)->wol;
2661 	memset(&wol->sopass, 0, sizeof(wol->sopass));
2662 }
2663 
2664 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2665 {
2666 	int err = 0;
2667 	struct port_info *pi = netdev_priv(dev);
2668 
2669 	if (wol->wolopts & ~WOL_SUPPORTED)
2670 		return -EINVAL;
2671 	t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2672 			    (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2673 	if (wol->wolopts & WAKE_BCAST) {
2674 		err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2675 					~0ULL, 0, false);
2676 		if (!err)
2677 			err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2678 						~6ULL, ~0ULL, BCAST_CRC, true);
2679 	} else
2680 		t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2681 	return err;
2682 }
2683 
2684 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2685 {
2686 	const struct port_info *pi = netdev_priv(dev);
2687 	netdev_features_t changed = dev->features ^ features;
2688 	int err;
2689 
2690 	if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2691 		return 0;
2692 
2693 	err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2694 			    -1, -1, -1,
2695 			    !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2696 	if (unlikely(err))
2697 		dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
2698 	return err;
2699 }
2700 
2701 static u32 get_rss_table_size(struct net_device *dev)
2702 {
2703 	const struct port_info *pi = netdev_priv(dev);
2704 
2705 	return pi->rss_size;
2706 }
2707 
2708 static int get_rss_table(struct net_device *dev, u32 *p)
2709 {
2710 	const struct port_info *pi = netdev_priv(dev);
2711 	unsigned int n = pi->rss_size;
2712 
2713 	while (n--)
2714 		p[n] = pi->rss[n];
2715 	return 0;
2716 }
2717 
2718 static int set_rss_table(struct net_device *dev, const u32 *p)
2719 {
2720 	unsigned int i;
2721 	struct port_info *pi = netdev_priv(dev);
2722 
2723 	for (i = 0; i < pi->rss_size; i++)
2724 		pi->rss[i] = p[i];
2725 	if (pi->adapter->flags & FULL_INIT_DONE)
2726 		return write_rss(pi, pi->rss);
2727 	return 0;
2728 }
2729 
2730 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2731 		     u32 *rules)
2732 {
2733 	const struct port_info *pi = netdev_priv(dev);
2734 
2735 	switch (info->cmd) {
2736 	case ETHTOOL_GRXFH: {
2737 		unsigned int v = pi->rss_mode;
2738 
2739 		info->data = 0;
2740 		switch (info->flow_type) {
2741 		case TCP_V4_FLOW:
2742 			if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
2743 				info->data = RXH_IP_SRC | RXH_IP_DST |
2744 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
2745 			else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2746 				info->data = RXH_IP_SRC | RXH_IP_DST;
2747 			break;
2748 		case UDP_V4_FLOW:
2749 			if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2750 			    (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2751 				info->data = RXH_IP_SRC | RXH_IP_DST |
2752 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
2753 			else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2754 				info->data = RXH_IP_SRC | RXH_IP_DST;
2755 			break;
2756 		case SCTP_V4_FLOW:
2757 		case AH_ESP_V4_FLOW:
2758 		case IPV4_FLOW:
2759 			if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2760 				info->data = RXH_IP_SRC | RXH_IP_DST;
2761 			break;
2762 		case TCP_V6_FLOW:
2763 			if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
2764 				info->data = RXH_IP_SRC | RXH_IP_DST |
2765 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
2766 			else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2767 				info->data = RXH_IP_SRC | RXH_IP_DST;
2768 			break;
2769 		case UDP_V6_FLOW:
2770 			if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
2771 			    (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2772 				info->data = RXH_IP_SRC | RXH_IP_DST |
2773 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
2774 			else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2775 				info->data = RXH_IP_SRC | RXH_IP_DST;
2776 			break;
2777 		case SCTP_V6_FLOW:
2778 		case AH_ESP_V6_FLOW:
2779 		case IPV6_FLOW:
2780 			if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2781 				info->data = RXH_IP_SRC | RXH_IP_DST;
2782 			break;
2783 		}
2784 		return 0;
2785 	}
2786 	case ETHTOOL_GRXRINGS:
2787 		info->data = pi->nqsets;
2788 		return 0;
2789 	}
2790 	return -EOPNOTSUPP;
2791 }
2792 
2793 static const struct ethtool_ops cxgb_ethtool_ops = {
2794 	.get_settings      = get_settings,
2795 	.set_settings      = set_settings,
2796 	.get_drvinfo       = get_drvinfo,
2797 	.get_msglevel      = get_msglevel,
2798 	.set_msglevel      = set_msglevel,
2799 	.get_ringparam     = get_sge_param,
2800 	.set_ringparam     = set_sge_param,
2801 	.get_coalesce      = get_coalesce,
2802 	.set_coalesce      = set_coalesce,
2803 	.get_eeprom_len    = get_eeprom_len,
2804 	.get_eeprom        = get_eeprom,
2805 	.set_eeprom        = set_eeprom,
2806 	.get_pauseparam    = get_pauseparam,
2807 	.set_pauseparam    = set_pauseparam,
2808 	.get_link          = ethtool_op_get_link,
2809 	.get_strings       = get_strings,
2810 	.set_phys_id       = identify_port,
2811 	.nway_reset        = restart_autoneg,
2812 	.get_sset_count    = get_sset_count,
2813 	.get_ethtool_stats = get_stats,
2814 	.get_regs_len      = get_regs_len,
2815 	.get_regs          = get_regs,
2816 	.get_wol           = get_wol,
2817 	.set_wol           = set_wol,
2818 	.get_rxnfc         = get_rxnfc,
2819 	.get_rxfh_indir_size = get_rss_table_size,
2820 	.get_rxfh_indir    = get_rss_table,
2821 	.set_rxfh_indir    = set_rss_table,
2822 	.flash_device      = set_flash,
2823 };
2824 
2825 /*
2826  * debugfs support
2827  */
2828 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2829 			loff_t *ppos)
2830 {
2831 	loff_t pos = *ppos;
2832 	loff_t avail = file_inode(file)->i_size;
2833 	unsigned int mem = (uintptr_t)file->private_data & 3;
2834 	struct adapter *adap = file->private_data - mem;
2835 
2836 	if (pos < 0)
2837 		return -EINVAL;
2838 	if (pos >= avail)
2839 		return 0;
2840 	if (count > avail - pos)
2841 		count = avail - pos;
2842 
2843 	while (count) {
2844 		size_t len;
2845 		int ret, ofst;
2846 		__be32 data[16];
2847 
2848 		if ((mem == MEM_MC) || (mem == MEM_MC1))
2849 			ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL);
2850 		else
2851 			ret = t4_edc_read(adap, mem, pos, data, NULL);
2852 		if (ret)
2853 			return ret;
2854 
2855 		ofst = pos % sizeof(data);
2856 		len = min(count, sizeof(data) - ofst);
2857 		if (copy_to_user(buf, (u8 *)data + ofst, len))
2858 			return -EFAULT;
2859 
2860 		buf += len;
2861 		pos += len;
2862 		count -= len;
2863 	}
2864 	count = pos - *ppos;
2865 	*ppos = pos;
2866 	return count;
2867 }
2868 
2869 static const struct file_operations mem_debugfs_fops = {
2870 	.owner   = THIS_MODULE,
2871 	.open    = simple_open,
2872 	.read    = mem_read,
2873 	.llseek  = default_llseek,
2874 };
2875 
2876 static void add_debugfs_mem(struct adapter *adap, const char *name,
2877 			    unsigned int idx, unsigned int size_mb)
2878 {
2879 	struct dentry *de;
2880 
2881 	de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2882 				 (void *)adap + idx, &mem_debugfs_fops);
2883 	if (de && de->d_inode)
2884 		de->d_inode->i_size = size_mb << 20;
2885 }
2886 
2887 static int setup_debugfs(struct adapter *adap)
2888 {
2889 	int i;
2890 	u32 size;
2891 
2892 	if (IS_ERR_OR_NULL(adap->debugfs_root))
2893 		return -1;
2894 
2895 	i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2896 	if (i & EDRAM0_ENABLE) {
2897 		size = t4_read_reg(adap, MA_EDRAM0_BAR);
2898 		add_debugfs_mem(adap, "edc0", MEM_EDC0,	EDRAM_SIZE_GET(size));
2899 	}
2900 	if (i & EDRAM1_ENABLE) {
2901 		size = t4_read_reg(adap, MA_EDRAM1_BAR);
2902 		add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
2903 	}
2904 	if (is_t4(adap->params.chip)) {
2905 		size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2906 		if (i & EXT_MEM_ENABLE)
2907 			add_debugfs_mem(adap, "mc", MEM_MC,
2908 					EXT_MEM_SIZE_GET(size));
2909 	} else {
2910 		if (i & EXT_MEM_ENABLE) {
2911 			size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2912 			add_debugfs_mem(adap, "mc0", MEM_MC0,
2913 					EXT_MEM_SIZE_GET(size));
2914 		}
2915 		if (i & EXT_MEM1_ENABLE) {
2916 			size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
2917 			add_debugfs_mem(adap, "mc1", MEM_MC1,
2918 					EXT_MEM_SIZE_GET(size));
2919 		}
2920 	}
2921 	if (adap->l2t)
2922 		debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2923 				    &t4_l2t_fops);
2924 	return 0;
2925 }
2926 
2927 /*
2928  * upper-layer driver support
2929  */
2930 
2931 /*
2932  * Allocate an active-open TID and set it to the supplied value.
2933  */
2934 int cxgb4_alloc_atid(struct tid_info *t, void *data)
2935 {
2936 	int atid = -1;
2937 
2938 	spin_lock_bh(&t->atid_lock);
2939 	if (t->afree) {
2940 		union aopen_entry *p = t->afree;
2941 
2942 		atid = (p - t->atid_tab) + t->atid_base;
2943 		t->afree = p->next;
2944 		p->data = data;
2945 		t->atids_in_use++;
2946 	}
2947 	spin_unlock_bh(&t->atid_lock);
2948 	return atid;
2949 }
2950 EXPORT_SYMBOL(cxgb4_alloc_atid);
2951 
2952 /*
2953  * Release an active-open TID.
2954  */
2955 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2956 {
2957 	union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
2958 
2959 	spin_lock_bh(&t->atid_lock);
2960 	p->next = t->afree;
2961 	t->afree = p;
2962 	t->atids_in_use--;
2963 	spin_unlock_bh(&t->atid_lock);
2964 }
2965 EXPORT_SYMBOL(cxgb4_free_atid);
2966 
2967 /*
2968  * Allocate a server TID and set it to the supplied value.
2969  */
2970 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2971 {
2972 	int stid;
2973 
2974 	spin_lock_bh(&t->stid_lock);
2975 	if (family == PF_INET) {
2976 		stid = find_first_zero_bit(t->stid_bmap, t->nstids);
2977 		if (stid < t->nstids)
2978 			__set_bit(stid, t->stid_bmap);
2979 		else
2980 			stid = -1;
2981 	} else {
2982 		stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
2983 		if (stid < 0)
2984 			stid = -1;
2985 	}
2986 	if (stid >= 0) {
2987 		t->stid_tab[stid].data = data;
2988 		stid += t->stid_base;
2989 		/* IPv6 requires max of 520 bits or 16 cells in TCAM
2990 		 * This is equivalent to 4 TIDs. With CLIP enabled it
2991 		 * needs 2 TIDs.
2992 		 */
2993 		if (family == PF_INET)
2994 			t->stids_in_use++;
2995 		else
2996 			t->stids_in_use += 4;
2997 	}
2998 	spin_unlock_bh(&t->stid_lock);
2999 	return stid;
3000 }
3001 EXPORT_SYMBOL(cxgb4_alloc_stid);
3002 
3003 /* Allocate a server filter TID and set it to the supplied value.
3004  */
3005 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3006 {
3007 	int stid;
3008 
3009 	spin_lock_bh(&t->stid_lock);
3010 	if (family == PF_INET) {
3011 		stid = find_next_zero_bit(t->stid_bmap,
3012 				t->nstids + t->nsftids, t->nstids);
3013 		if (stid < (t->nstids + t->nsftids))
3014 			__set_bit(stid, t->stid_bmap);
3015 		else
3016 			stid = -1;
3017 	} else {
3018 		stid = -1;
3019 	}
3020 	if (stid >= 0) {
3021 		t->stid_tab[stid].data = data;
3022 		stid -= t->nstids;
3023 		stid += t->sftid_base;
3024 		t->stids_in_use++;
3025 	}
3026 	spin_unlock_bh(&t->stid_lock);
3027 	return stid;
3028 }
3029 EXPORT_SYMBOL(cxgb4_alloc_sftid);
3030 
3031 /* Release a server TID.
3032  */
3033 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3034 {
3035 	/* Is it a server filter TID? */
3036 	if (t->nsftids && (stid >= t->sftid_base)) {
3037 		stid -= t->sftid_base;
3038 		stid += t->nstids;
3039 	} else {
3040 		stid -= t->stid_base;
3041 	}
3042 
3043 	spin_lock_bh(&t->stid_lock);
3044 	if (family == PF_INET)
3045 		__clear_bit(stid, t->stid_bmap);
3046 	else
3047 		bitmap_release_region(t->stid_bmap, stid, 2);
3048 	t->stid_tab[stid].data = NULL;
3049 	if (family == PF_INET)
3050 		t->stids_in_use--;
3051 	else
3052 		t->stids_in_use -= 4;
3053 	spin_unlock_bh(&t->stid_lock);
3054 }
3055 EXPORT_SYMBOL(cxgb4_free_stid);
3056 
3057 /*
3058  * Populate a TID_RELEASE WR.  Caller must properly size the skb.
3059  */
3060 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3061 			   unsigned int tid)
3062 {
3063 	struct cpl_tid_release *req;
3064 
3065 	set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3066 	req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3067 	INIT_TP_WR(req, tid);
3068 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3069 }
3070 
3071 /*
3072  * Queue a TID release request and if necessary schedule a work queue to
3073  * process it.
3074  */
3075 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3076 				    unsigned int tid)
3077 {
3078 	void **p = &t->tid_tab[tid];
3079 	struct adapter *adap = container_of(t, struct adapter, tids);
3080 
3081 	spin_lock_bh(&adap->tid_release_lock);
3082 	*p = adap->tid_release_head;
3083 	/* Low 2 bits encode the Tx channel number */
3084 	adap->tid_release_head = (void **)((uintptr_t)p | chan);
3085 	if (!adap->tid_release_task_busy) {
3086 		adap->tid_release_task_busy = true;
3087 		queue_work(workq, &adap->tid_release_task);
3088 	}
3089 	spin_unlock_bh(&adap->tid_release_lock);
3090 }
3091 
3092 /*
3093  * Process the list of pending TID release requests.
3094  */
3095 static void process_tid_release_list(struct work_struct *work)
3096 {
3097 	struct sk_buff *skb;
3098 	struct adapter *adap;
3099 
3100 	adap = container_of(work, struct adapter, tid_release_task);
3101 
3102 	spin_lock_bh(&adap->tid_release_lock);
3103 	while (adap->tid_release_head) {
3104 		void **p = adap->tid_release_head;
3105 		unsigned int chan = (uintptr_t)p & 3;
3106 		p = (void *)p - chan;
3107 
3108 		adap->tid_release_head = *p;
3109 		*p = NULL;
3110 		spin_unlock_bh(&adap->tid_release_lock);
3111 
3112 		while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3113 					 GFP_KERNEL)))
3114 			schedule_timeout_uninterruptible(1);
3115 
3116 		mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3117 		t4_ofld_send(adap, skb);
3118 		spin_lock_bh(&adap->tid_release_lock);
3119 	}
3120 	adap->tid_release_task_busy = false;
3121 	spin_unlock_bh(&adap->tid_release_lock);
3122 }
3123 
3124 /*
3125  * Release a TID and inform HW.  If we are unable to allocate the release
3126  * message we defer to a work queue.
3127  */
3128 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3129 {
3130 	void *old;
3131 	struct sk_buff *skb;
3132 	struct adapter *adap = container_of(t, struct adapter, tids);
3133 
3134 	old = t->tid_tab[tid];
3135 	skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3136 	if (likely(skb)) {
3137 		t->tid_tab[tid] = NULL;
3138 		mk_tid_release(skb, chan, tid);
3139 		t4_ofld_send(adap, skb);
3140 	} else
3141 		cxgb4_queue_tid_release(t, chan, tid);
3142 	if (old)
3143 		atomic_dec(&t->tids_in_use);
3144 }
3145 EXPORT_SYMBOL(cxgb4_remove_tid);
3146 
3147 /*
3148  * Allocate and initialize the TID tables.  Returns 0 on success.
3149  */
3150 static int tid_init(struct tid_info *t)
3151 {
3152 	size_t size;
3153 	unsigned int stid_bmap_size;
3154 	unsigned int natids = t->natids;
3155 	struct adapter *adap = container_of(t, struct adapter, tids);
3156 
3157 	stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3158 	size = t->ntids * sizeof(*t->tid_tab) +
3159 	       natids * sizeof(*t->atid_tab) +
3160 	       t->nstids * sizeof(*t->stid_tab) +
3161 	       t->nsftids * sizeof(*t->stid_tab) +
3162 	       stid_bmap_size * sizeof(long) +
3163 	       t->nftids * sizeof(*t->ftid_tab) +
3164 	       t->nsftids * sizeof(*t->ftid_tab);
3165 
3166 	t->tid_tab = t4_alloc_mem(size);
3167 	if (!t->tid_tab)
3168 		return -ENOMEM;
3169 
3170 	t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3171 	t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
3172 	t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
3173 	t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
3174 	spin_lock_init(&t->stid_lock);
3175 	spin_lock_init(&t->atid_lock);
3176 
3177 	t->stids_in_use = 0;
3178 	t->afree = NULL;
3179 	t->atids_in_use = 0;
3180 	atomic_set(&t->tids_in_use, 0);
3181 
3182 	/* Setup the free list for atid_tab and clear the stid bitmap. */
3183 	if (natids) {
3184 		while (--natids)
3185 			t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3186 		t->afree = t->atid_tab;
3187 	}
3188 	bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3189 	/* Reserve stid 0 for T4/T5 adapters */
3190 	if (!t->stid_base &&
3191 	    (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3192 		__set_bit(0, t->stid_bmap);
3193 
3194 	return 0;
3195 }
3196 
3197 static int cxgb4_clip_get(const struct net_device *dev,
3198 			  const struct in6_addr *lip)
3199 {
3200 	struct adapter *adap;
3201 	struct fw_clip_cmd c;
3202 
3203 	adap = netdev2adap(dev);
3204 	memset(&c, 0, sizeof(c));
3205 	c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3206 			FW_CMD_REQUEST | FW_CMD_WRITE);
3207 	c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
3208 	*(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
3209 	*(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3210 	return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3211 }
3212 
3213 static int cxgb4_clip_release(const struct net_device *dev,
3214 			      const struct in6_addr *lip)
3215 {
3216 	struct adapter *adap;
3217 	struct fw_clip_cmd c;
3218 
3219 	adap = netdev2adap(dev);
3220 	memset(&c, 0, sizeof(c));
3221 	c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3222 			FW_CMD_REQUEST | FW_CMD_READ);
3223 	c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
3224 	*(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
3225 	*(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3226 	return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3227 }
3228 
3229 /**
3230  *	cxgb4_create_server - create an IP server
3231  *	@dev: the device
3232  *	@stid: the server TID
3233  *	@sip: local IP address to bind server to
3234  *	@sport: the server's TCP port
3235  *	@queue: queue to direct messages from this server to
3236  *
3237  *	Create an IP server for the given port and address.
3238  *	Returns <0 on error and one of the %NET_XMIT_* values on success.
3239  */
3240 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
3241 			__be32 sip, __be16 sport, __be16 vlan,
3242 			unsigned int queue)
3243 {
3244 	unsigned int chan;
3245 	struct sk_buff *skb;
3246 	struct adapter *adap;
3247 	struct cpl_pass_open_req *req;
3248 	int ret;
3249 
3250 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3251 	if (!skb)
3252 		return -ENOMEM;
3253 
3254 	adap = netdev2adap(dev);
3255 	req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3256 	INIT_TP_WR(req, 0);
3257 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3258 	req->local_port = sport;
3259 	req->peer_port = htons(0);
3260 	req->local_ip = sip;
3261 	req->peer_ip = htonl(0);
3262 	chan = rxq_to_chan(&adap->sge, queue);
3263 	req->opt0 = cpu_to_be64(TX_CHAN(chan));
3264 	req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3265 				SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3266 	ret = t4_mgmt_tx(adap, skb);
3267 	return net_xmit_eval(ret);
3268 }
3269 EXPORT_SYMBOL(cxgb4_create_server);
3270 
3271 /*	cxgb4_create_server6 - create an IPv6 server
3272  *	@dev: the device
3273  *	@stid: the server TID
3274  *	@sip: local IPv6 address to bind server to
3275  *	@sport: the server's TCP port
3276  *	@queue: queue to direct messages from this server to
3277  *
3278  *	Create an IPv6 server for the given port and address.
3279  *	Returns <0 on error and one of the %NET_XMIT_* values on success.
3280  */
3281 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3282 			 const struct in6_addr *sip, __be16 sport,
3283 			 unsigned int queue)
3284 {
3285 	unsigned int chan;
3286 	struct sk_buff *skb;
3287 	struct adapter *adap;
3288 	struct cpl_pass_open_req6 *req;
3289 	int ret;
3290 
3291 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3292 	if (!skb)
3293 		return -ENOMEM;
3294 
3295 	adap = netdev2adap(dev);
3296 	req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3297 	INIT_TP_WR(req, 0);
3298 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3299 	req->local_port = sport;
3300 	req->peer_port = htons(0);
3301 	req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3302 	req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3303 	req->peer_ip_hi = cpu_to_be64(0);
3304 	req->peer_ip_lo = cpu_to_be64(0);
3305 	chan = rxq_to_chan(&adap->sge, queue);
3306 	req->opt0 = cpu_to_be64(TX_CHAN(chan));
3307 	req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3308 				SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3309 	ret = t4_mgmt_tx(adap, skb);
3310 	return net_xmit_eval(ret);
3311 }
3312 EXPORT_SYMBOL(cxgb4_create_server6);
3313 
3314 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3315 			unsigned int queue, bool ipv6)
3316 {
3317 	struct sk_buff *skb;
3318 	struct adapter *adap;
3319 	struct cpl_close_listsvr_req *req;
3320 	int ret;
3321 
3322 	adap = netdev2adap(dev);
3323 
3324 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3325 	if (!skb)
3326 		return -ENOMEM;
3327 
3328 	req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3329 	INIT_TP_WR(req, 0);
3330 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3331 	req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
3332 				LISTSVR_IPV6(0)) | QUEUENO(queue));
3333 	ret = t4_mgmt_tx(adap, skb);
3334 	return net_xmit_eval(ret);
3335 }
3336 EXPORT_SYMBOL(cxgb4_remove_server);
3337 
3338 /**
3339  *	cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3340  *	@mtus: the HW MTU table
3341  *	@mtu: the target MTU
3342  *	@idx: index of selected entry in the MTU table
3343  *
3344  *	Returns the index and the value in the HW MTU table that is closest to
3345  *	but does not exceed @mtu, unless @mtu is smaller than any value in the
3346  *	table, in which case that smallest available value is selected.
3347  */
3348 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3349 			    unsigned int *idx)
3350 {
3351 	unsigned int i = 0;
3352 
3353 	while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3354 		++i;
3355 	if (idx)
3356 		*idx = i;
3357 	return mtus[i];
3358 }
3359 EXPORT_SYMBOL(cxgb4_best_mtu);
3360 
3361 /**
3362  *	cxgb4_port_chan - get the HW channel of a port
3363  *	@dev: the net device for the port
3364  *
3365  *	Return the HW Tx channel of the given port.
3366  */
3367 unsigned int cxgb4_port_chan(const struct net_device *dev)
3368 {
3369 	return netdev2pinfo(dev)->tx_chan;
3370 }
3371 EXPORT_SYMBOL(cxgb4_port_chan);
3372 
3373 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3374 {
3375 	struct adapter *adap = netdev2adap(dev);
3376 	u32 v1, v2, lp_count, hp_count;
3377 
3378 	v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3379 	v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3380 	if (is_t4(adap->params.chip)) {
3381 		lp_count = G_LP_COUNT(v1);
3382 		hp_count = G_HP_COUNT(v1);
3383 	} else {
3384 		lp_count = G_LP_COUNT_T5(v1);
3385 		hp_count = G_HP_COUNT_T5(v2);
3386 	}
3387 	return lpfifo ? lp_count : hp_count;
3388 }
3389 EXPORT_SYMBOL(cxgb4_dbfifo_count);
3390 
3391 /**
3392  *	cxgb4_port_viid - get the VI id of a port
3393  *	@dev: the net device for the port
3394  *
3395  *	Return the VI id of the given port.
3396  */
3397 unsigned int cxgb4_port_viid(const struct net_device *dev)
3398 {
3399 	return netdev2pinfo(dev)->viid;
3400 }
3401 EXPORT_SYMBOL(cxgb4_port_viid);
3402 
3403 /**
3404  *	cxgb4_port_idx - get the index of a port
3405  *	@dev: the net device for the port
3406  *
3407  *	Return the index of the given port.
3408  */
3409 unsigned int cxgb4_port_idx(const struct net_device *dev)
3410 {
3411 	return netdev2pinfo(dev)->port_id;
3412 }
3413 EXPORT_SYMBOL(cxgb4_port_idx);
3414 
3415 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3416 			 struct tp_tcp_stats *v6)
3417 {
3418 	struct adapter *adap = pci_get_drvdata(pdev);
3419 
3420 	spin_lock(&adap->stats_lock);
3421 	t4_tp_get_tcp_stats(adap, v4, v6);
3422 	spin_unlock(&adap->stats_lock);
3423 }
3424 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3425 
3426 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3427 		      const unsigned int *pgsz_order)
3428 {
3429 	struct adapter *adap = netdev2adap(dev);
3430 
3431 	t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3432 	t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3433 		     HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3434 		     HPZ3(pgsz_order[3]));
3435 }
3436 EXPORT_SYMBOL(cxgb4_iscsi_init);
3437 
3438 int cxgb4_flush_eq_cache(struct net_device *dev)
3439 {
3440 	struct adapter *adap = netdev2adap(dev);
3441 	int ret;
3442 
3443 	ret = t4_fwaddrspace_write(adap, adap->mbox,
3444 				   0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3445 	return ret;
3446 }
3447 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3448 
3449 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3450 {
3451 	u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3452 	__be64 indices;
3453 	int ret;
3454 
3455 	ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
3456 	if (!ret) {
3457 		*cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3458 		*pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3459 	}
3460 	return ret;
3461 }
3462 
3463 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3464 			u16 size)
3465 {
3466 	struct adapter *adap = netdev2adap(dev);
3467 	u16 hw_pidx, hw_cidx;
3468 	int ret;
3469 
3470 	ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3471 	if (ret)
3472 		goto out;
3473 
3474 	if (pidx != hw_pidx) {
3475 		u16 delta;
3476 
3477 		if (pidx >= hw_pidx)
3478 			delta = pidx - hw_pidx;
3479 		else
3480 			delta = size - hw_pidx + pidx;
3481 		wmb();
3482 		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3483 			     QID(qid) | PIDX(delta));
3484 	}
3485 out:
3486 	return ret;
3487 }
3488 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3489 
3490 void cxgb4_disable_db_coalescing(struct net_device *dev)
3491 {
3492 	struct adapter *adap;
3493 
3494 	adap = netdev2adap(dev);
3495 	t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3496 			 F_NOCOALESCE);
3497 }
3498 EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3499 
3500 void cxgb4_enable_db_coalescing(struct net_device *dev)
3501 {
3502 	struct adapter *adap;
3503 
3504 	adap = netdev2adap(dev);
3505 	t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3506 }
3507 EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3508 
3509 static struct pci_driver cxgb4_driver;
3510 
3511 static void check_neigh_update(struct neighbour *neigh)
3512 {
3513 	const struct device *parent;
3514 	const struct net_device *netdev = neigh->dev;
3515 
3516 	if (netdev->priv_flags & IFF_802_1Q_VLAN)
3517 		netdev = vlan_dev_real_dev(netdev);
3518 	parent = netdev->dev.parent;
3519 	if (parent && parent->driver == &cxgb4_driver.driver)
3520 		t4_l2t_update(dev_get_drvdata(parent), neigh);
3521 }
3522 
3523 static int netevent_cb(struct notifier_block *nb, unsigned long event,
3524 		       void *data)
3525 {
3526 	switch (event) {
3527 	case NETEVENT_NEIGH_UPDATE:
3528 		check_neigh_update(data);
3529 		break;
3530 	case NETEVENT_REDIRECT:
3531 	default:
3532 		break;
3533 	}
3534 	return 0;
3535 }
3536 
3537 static bool netevent_registered;
3538 static struct notifier_block cxgb4_netevent_nb = {
3539 	.notifier_call = netevent_cb
3540 };
3541 
3542 static void drain_db_fifo(struct adapter *adap, int usecs)
3543 {
3544 	u32 v1, v2, lp_count, hp_count;
3545 
3546 	do {
3547 		v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3548 		v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3549 		if (is_t4(adap->params.chip)) {
3550 			lp_count = G_LP_COUNT(v1);
3551 			hp_count = G_HP_COUNT(v1);
3552 		} else {
3553 			lp_count = G_LP_COUNT_T5(v1);
3554 			hp_count = G_HP_COUNT_T5(v2);
3555 		}
3556 
3557 		if (lp_count == 0 && hp_count == 0)
3558 			break;
3559 		set_current_state(TASK_UNINTERRUPTIBLE);
3560 		schedule_timeout(usecs_to_jiffies(usecs));
3561 	} while (1);
3562 }
3563 
3564 static void disable_txq_db(struct sge_txq *q)
3565 {
3566 	spin_lock_irq(&q->db_lock);
3567 	q->db_disabled = 1;
3568 	spin_unlock_irq(&q->db_lock);
3569 }
3570 
3571 static void enable_txq_db(struct sge_txq *q)
3572 {
3573 	spin_lock_irq(&q->db_lock);
3574 	q->db_disabled = 0;
3575 	spin_unlock_irq(&q->db_lock);
3576 }
3577 
3578 static void disable_dbs(struct adapter *adap)
3579 {
3580 	int i;
3581 
3582 	for_each_ethrxq(&adap->sge, i)
3583 		disable_txq_db(&adap->sge.ethtxq[i].q);
3584 	for_each_ofldrxq(&adap->sge, i)
3585 		disable_txq_db(&adap->sge.ofldtxq[i].q);
3586 	for_each_port(adap, i)
3587 		disable_txq_db(&adap->sge.ctrlq[i].q);
3588 }
3589 
3590 static void enable_dbs(struct adapter *adap)
3591 {
3592 	int i;
3593 
3594 	for_each_ethrxq(&adap->sge, i)
3595 		enable_txq_db(&adap->sge.ethtxq[i].q);
3596 	for_each_ofldrxq(&adap->sge, i)
3597 		enable_txq_db(&adap->sge.ofldtxq[i].q);
3598 	for_each_port(adap, i)
3599 		enable_txq_db(&adap->sge.ctrlq[i].q);
3600 }
3601 
3602 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3603 {
3604 	u16 hw_pidx, hw_cidx;
3605 	int ret;
3606 
3607 	spin_lock_bh(&q->db_lock);
3608 	ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
3609 	if (ret)
3610 		goto out;
3611 	if (q->db_pidx != hw_pidx) {
3612 		u16 delta;
3613 
3614 		if (q->db_pidx >= hw_pidx)
3615 			delta = q->db_pidx - hw_pidx;
3616 		else
3617 			delta = q->size - hw_pidx + q->db_pidx;
3618 		wmb();
3619 		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3620 			     QID(q->cntxt_id) | PIDX(delta));
3621 	}
3622 out:
3623 	q->db_disabled = 0;
3624 	spin_unlock_bh(&q->db_lock);
3625 	if (ret)
3626 		CH_WARN(adap, "DB drop recovery failed.\n");
3627 }
3628 static void recover_all_queues(struct adapter *adap)
3629 {
3630 	int i;
3631 
3632 	for_each_ethrxq(&adap->sge, i)
3633 		sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
3634 	for_each_ofldrxq(&adap->sge, i)
3635 		sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
3636 	for_each_port(adap, i)
3637 		sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
3638 }
3639 
3640 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3641 {
3642 	mutex_lock(&uld_mutex);
3643 	if (adap->uld_handle[CXGB4_ULD_RDMA])
3644 		ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3645 				cmd);
3646 	mutex_unlock(&uld_mutex);
3647 }
3648 
3649 static void process_db_full(struct work_struct *work)
3650 {
3651 	struct adapter *adap;
3652 
3653 	adap = container_of(work, struct adapter, db_full_task);
3654 
3655 	notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3656 	drain_db_fifo(adap, dbfifo_drain_delay);
3657 	t4_set_reg_field(adap, SGE_INT_ENABLE3,
3658 			 DBFIFO_HP_INT | DBFIFO_LP_INT,
3659 			 DBFIFO_HP_INT | DBFIFO_LP_INT);
3660 	notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3661 }
3662 
3663 static void process_db_drop(struct work_struct *work)
3664 {
3665 	struct adapter *adap;
3666 
3667 	adap = container_of(work, struct adapter, db_drop_task);
3668 
3669 	if (is_t4(adap->params.chip)) {
3670 		disable_dbs(adap);
3671 		notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3672 		drain_db_fifo(adap, 1);
3673 		recover_all_queues(adap);
3674 		enable_dbs(adap);
3675 	} else {
3676 		u32 dropped_db = t4_read_reg(adap, 0x010ac);
3677 		u16 qid = (dropped_db >> 15) & 0x1ffff;
3678 		u16 pidx_inc = dropped_db & 0x1fff;
3679 		unsigned int s_qpp;
3680 		unsigned short udb_density;
3681 		unsigned long qpshift;
3682 		int page;
3683 		u32 udb;
3684 
3685 		dev_warn(adap->pdev_dev,
3686 			 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
3687 			 dropped_db, qid,
3688 			 (dropped_db >> 14) & 1,
3689 			 (dropped_db >> 13) & 1,
3690 			 pidx_inc);
3691 
3692 		drain_db_fifo(adap, 1);
3693 
3694 		s_qpp = QUEUESPERPAGEPF1 * adap->fn;
3695 		udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
3696 				SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
3697 		qpshift = PAGE_SHIFT - ilog2(udb_density);
3698 		udb = qid << qpshift;
3699 		udb &= PAGE_MASK;
3700 		page = udb / PAGE_SIZE;
3701 		udb += (qid - (page * udb_density)) * 128;
3702 
3703 		writel(PIDX(pidx_inc),  adap->bar2 + udb + 8);
3704 
3705 		/* Re-enable BAR2 WC */
3706 		t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
3707 	}
3708 
3709 	t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
3710 }
3711 
3712 void t4_db_full(struct adapter *adap)
3713 {
3714 	if (is_t4(adap->params.chip)) {
3715 		t4_set_reg_field(adap, SGE_INT_ENABLE3,
3716 				 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
3717 		queue_work(workq, &adap->db_full_task);
3718 	}
3719 }
3720 
3721 void t4_db_dropped(struct adapter *adap)
3722 {
3723 	if (is_t4(adap->params.chip))
3724 		queue_work(workq, &adap->db_drop_task);
3725 }
3726 
3727 static void uld_attach(struct adapter *adap, unsigned int uld)
3728 {
3729 	void *handle;
3730 	struct cxgb4_lld_info lli;
3731 	unsigned short i;
3732 
3733 	lli.pdev = adap->pdev;
3734 	lli.l2t = adap->l2t;
3735 	lli.tids = &adap->tids;
3736 	lli.ports = adap->port;
3737 	lli.vr = &adap->vres;
3738 	lli.mtus = adap->params.mtus;
3739 	if (uld == CXGB4_ULD_RDMA) {
3740 		lli.rxq_ids = adap->sge.rdma_rxq;
3741 		lli.nrxq = adap->sge.rdmaqs;
3742 	} else if (uld == CXGB4_ULD_ISCSI) {
3743 		lli.rxq_ids = adap->sge.ofld_rxq;
3744 		lli.nrxq = adap->sge.ofldqsets;
3745 	}
3746 	lli.ntxq = adap->sge.ofldqsets;
3747 	lli.nchan = adap->params.nports;
3748 	lli.nports = adap->params.nports;
3749 	lli.wr_cred = adap->params.ofldq_wr_cred;
3750 	lli.adapter_type = adap->params.chip;
3751 	lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
3752 	lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
3753 			t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
3754 			(adap->fn * 4));
3755 	lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
3756 			t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3757 			(adap->fn * 4));
3758 	lli.filt_mode = adap->params.tp.vlan_pri_map;
3759 	/* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3760 	for (i = 0; i < NCHAN; i++)
3761 		lli.tx_modq[i] = i;
3762 	lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
3763 	lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
3764 	lli.fw_vers = adap->params.fw_vers;
3765 	lli.dbfifo_int_thresh = dbfifo_int_thresh;
3766 	lli.sge_pktshift = adap->sge.pktshift;
3767 	lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
3768 
3769 	handle = ulds[uld].add(&lli);
3770 	if (IS_ERR(handle)) {
3771 		dev_warn(adap->pdev_dev,
3772 			 "could not attach to the %s driver, error %ld\n",
3773 			 uld_str[uld], PTR_ERR(handle));
3774 		return;
3775 	}
3776 
3777 	adap->uld_handle[uld] = handle;
3778 
3779 	if (!netevent_registered) {
3780 		register_netevent_notifier(&cxgb4_netevent_nb);
3781 		netevent_registered = true;
3782 	}
3783 
3784 	if (adap->flags & FULL_INIT_DONE)
3785 		ulds[uld].state_change(handle, CXGB4_STATE_UP);
3786 }
3787 
3788 static void attach_ulds(struct adapter *adap)
3789 {
3790 	unsigned int i;
3791 
3792 	spin_lock(&adap_rcu_lock);
3793 	list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
3794 	spin_unlock(&adap_rcu_lock);
3795 
3796 	mutex_lock(&uld_mutex);
3797 	list_add_tail(&adap->list_node, &adapter_list);
3798 	for (i = 0; i < CXGB4_ULD_MAX; i++)
3799 		if (ulds[i].add)
3800 			uld_attach(adap, i);
3801 	mutex_unlock(&uld_mutex);
3802 }
3803 
3804 static void detach_ulds(struct adapter *adap)
3805 {
3806 	unsigned int i;
3807 
3808 	mutex_lock(&uld_mutex);
3809 	list_del(&adap->list_node);
3810 	for (i = 0; i < CXGB4_ULD_MAX; i++)
3811 		if (adap->uld_handle[i]) {
3812 			ulds[i].state_change(adap->uld_handle[i],
3813 					     CXGB4_STATE_DETACH);
3814 			adap->uld_handle[i] = NULL;
3815 		}
3816 	if (netevent_registered && list_empty(&adapter_list)) {
3817 		unregister_netevent_notifier(&cxgb4_netevent_nb);
3818 		netevent_registered = false;
3819 	}
3820 	mutex_unlock(&uld_mutex);
3821 
3822 	spin_lock(&adap_rcu_lock);
3823 	list_del_rcu(&adap->rcu_node);
3824 	spin_unlock(&adap_rcu_lock);
3825 }
3826 
3827 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
3828 {
3829 	unsigned int i;
3830 
3831 	mutex_lock(&uld_mutex);
3832 	for (i = 0; i < CXGB4_ULD_MAX; i++)
3833 		if (adap->uld_handle[i])
3834 			ulds[i].state_change(adap->uld_handle[i], new_state);
3835 	mutex_unlock(&uld_mutex);
3836 }
3837 
3838 /**
3839  *	cxgb4_register_uld - register an upper-layer driver
3840  *	@type: the ULD type
3841  *	@p: the ULD methods
3842  *
3843  *	Registers an upper-layer driver with this driver and notifies the ULD
3844  *	about any presently available devices that support its type.  Returns
3845  *	%-EBUSY if a ULD of the same type is already registered.
3846  */
3847 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
3848 {
3849 	int ret = 0;
3850 	struct adapter *adap;
3851 
3852 	if (type >= CXGB4_ULD_MAX)
3853 		return -EINVAL;
3854 	mutex_lock(&uld_mutex);
3855 	if (ulds[type].add) {
3856 		ret = -EBUSY;
3857 		goto out;
3858 	}
3859 	ulds[type] = *p;
3860 	list_for_each_entry(adap, &adapter_list, list_node)
3861 		uld_attach(adap, type);
3862 out:	mutex_unlock(&uld_mutex);
3863 	return ret;
3864 }
3865 EXPORT_SYMBOL(cxgb4_register_uld);
3866 
3867 /**
3868  *	cxgb4_unregister_uld - unregister an upper-layer driver
3869  *	@type: the ULD type
3870  *
3871  *	Unregisters an existing upper-layer driver.
3872  */
3873 int cxgb4_unregister_uld(enum cxgb4_uld type)
3874 {
3875 	struct adapter *adap;
3876 
3877 	if (type >= CXGB4_ULD_MAX)
3878 		return -EINVAL;
3879 	mutex_lock(&uld_mutex);
3880 	list_for_each_entry(adap, &adapter_list, list_node)
3881 		adap->uld_handle[type] = NULL;
3882 	ulds[type].add = NULL;
3883 	mutex_unlock(&uld_mutex);
3884 	return 0;
3885 }
3886 EXPORT_SYMBOL(cxgb4_unregister_uld);
3887 
3888 /* Check if netdev on which event is occured belongs to us or not. Return
3889  * suceess (1) if it belongs otherwise failure (0).
3890  */
3891 static int cxgb4_netdev(struct net_device *netdev)
3892 {
3893 	struct adapter *adap;
3894 	int i;
3895 
3896 	spin_lock(&adap_rcu_lock);
3897 	list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
3898 		for (i = 0; i < MAX_NPORTS; i++)
3899 			if (adap->port[i] == netdev) {
3900 				spin_unlock(&adap_rcu_lock);
3901 				return 1;
3902 			}
3903 	spin_unlock(&adap_rcu_lock);
3904 	return 0;
3905 }
3906 
3907 static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
3908 		    unsigned long event)
3909 {
3910 	int ret = NOTIFY_DONE;
3911 
3912 	rcu_read_lock();
3913 	if (cxgb4_netdev(event_dev)) {
3914 		switch (event) {
3915 		case NETDEV_UP:
3916 			ret = cxgb4_clip_get(event_dev,
3917 				(const struct in6_addr *)ifa->addr.s6_addr);
3918 			if (ret < 0) {
3919 				rcu_read_unlock();
3920 				return ret;
3921 			}
3922 			ret = NOTIFY_OK;
3923 			break;
3924 		case NETDEV_DOWN:
3925 			cxgb4_clip_release(event_dev,
3926 				(const struct in6_addr *)ifa->addr.s6_addr);
3927 			ret = NOTIFY_OK;
3928 			break;
3929 		default:
3930 			break;
3931 		}
3932 	}
3933 	rcu_read_unlock();
3934 	return ret;
3935 }
3936 
3937 static int cxgb4_inet6addr_handler(struct notifier_block *this,
3938 		unsigned long event, void *data)
3939 {
3940 	struct inet6_ifaddr *ifa = data;
3941 	struct net_device *event_dev;
3942 	int ret = NOTIFY_DONE;
3943 	struct bonding *bond = netdev_priv(ifa->idev->dev);
3944 	struct list_head *iter;
3945 	struct slave *slave;
3946 	struct pci_dev *first_pdev = NULL;
3947 
3948 	if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
3949 		event_dev = vlan_dev_real_dev(ifa->idev->dev);
3950 		ret = clip_add(event_dev, ifa, event);
3951 	} else if (ifa->idev->dev->flags & IFF_MASTER) {
3952 		/* It is possible that two different adapters are bonded in one
3953 		 * bond. We need to find such different adapters and add clip
3954 		 * in all of them only once.
3955 		 */
3956 		read_lock(&bond->lock);
3957 		bond_for_each_slave(bond, slave, iter) {
3958 			if (!first_pdev) {
3959 				ret = clip_add(slave->dev, ifa, event);
3960 				/* If clip_add is success then only initialize
3961 				 * first_pdev since it means it is our device
3962 				 */
3963 				if (ret == NOTIFY_OK)
3964 					first_pdev = to_pci_dev(
3965 							slave->dev->dev.parent);
3966 			} else if (first_pdev !=
3967 				   to_pci_dev(slave->dev->dev.parent))
3968 					ret = clip_add(slave->dev, ifa, event);
3969 		}
3970 		read_unlock(&bond->lock);
3971 	} else
3972 		ret = clip_add(ifa->idev->dev, ifa, event);
3973 
3974 	return ret;
3975 }
3976 
3977 static struct notifier_block cxgb4_inet6addr_notifier = {
3978 	.notifier_call = cxgb4_inet6addr_handler
3979 };
3980 
3981 /* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
3982  * a physical device.
3983  * The physical device reference is needed to send the actul CLIP command.
3984  */
3985 static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
3986 {
3987 	struct inet6_dev *idev = NULL;
3988 	struct inet6_ifaddr *ifa;
3989 	int ret = 0;
3990 
3991 	idev = __in6_dev_get(root_dev);
3992 	if (!idev)
3993 		return ret;
3994 
3995 	read_lock_bh(&idev->lock);
3996 	list_for_each_entry(ifa, &idev->addr_list, if_list) {
3997 		ret = cxgb4_clip_get(dev,
3998 				(const struct in6_addr *)ifa->addr.s6_addr);
3999 		if (ret < 0)
4000 			break;
4001 	}
4002 	read_unlock_bh(&idev->lock);
4003 
4004 	return ret;
4005 }
4006 
4007 static int update_root_dev_clip(struct net_device *dev)
4008 {
4009 	struct net_device *root_dev = NULL;
4010 	int i, ret = 0;
4011 
4012 	/* First populate the real net device's IPv6 addresses */
4013 	ret = update_dev_clip(dev, dev);
4014 	if (ret)
4015 		return ret;
4016 
4017 	/* Parse all bond and vlan devices layered on top of the physical dev */
4018 	for (i = 0; i < VLAN_N_VID; i++) {
4019 		root_dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), i);
4020 		if (!root_dev)
4021 			continue;
4022 
4023 		ret = update_dev_clip(root_dev, dev);
4024 		if (ret)
4025 			break;
4026 	}
4027 	return ret;
4028 }
4029 
4030 static void update_clip(const struct adapter *adap)
4031 {
4032 	int i;
4033 	struct net_device *dev;
4034 	int ret;
4035 
4036 	rcu_read_lock();
4037 
4038 	for (i = 0; i < MAX_NPORTS; i++) {
4039 		dev = adap->port[i];
4040 		ret = 0;
4041 
4042 		if (dev)
4043 			ret = update_root_dev_clip(dev);
4044 
4045 		if (ret < 0)
4046 			break;
4047 	}
4048 	rcu_read_unlock();
4049 }
4050 
4051 /**
4052  *	cxgb_up - enable the adapter
4053  *	@adap: adapter being enabled
4054  *
4055  *	Called when the first port is enabled, this function performs the
4056  *	actions necessary to make an adapter operational, such as completing
4057  *	the initialization of HW modules, and enabling interrupts.
4058  *
4059  *	Must be called with the rtnl lock held.
4060  */
4061 static int cxgb_up(struct adapter *adap)
4062 {
4063 	int err;
4064 
4065 	err = setup_sge_queues(adap);
4066 	if (err)
4067 		goto out;
4068 	err = setup_rss(adap);
4069 	if (err)
4070 		goto freeq;
4071 
4072 	if (adap->flags & USING_MSIX) {
4073 		name_msix_vecs(adap);
4074 		err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4075 				  adap->msix_info[0].desc, adap);
4076 		if (err)
4077 			goto irq_err;
4078 
4079 		err = request_msix_queue_irqs(adap);
4080 		if (err) {
4081 			free_irq(adap->msix_info[0].vec, adap);
4082 			goto irq_err;
4083 		}
4084 	} else {
4085 		err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4086 				  (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
4087 				  adap->port[0]->name, adap);
4088 		if (err)
4089 			goto irq_err;
4090 	}
4091 	enable_rx(adap);
4092 	t4_sge_start(adap);
4093 	t4_intr_enable(adap);
4094 	adap->flags |= FULL_INIT_DONE;
4095 	notify_ulds(adap, CXGB4_STATE_UP);
4096 	update_clip(adap);
4097  out:
4098 	return err;
4099  irq_err:
4100 	dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
4101  freeq:
4102 	t4_free_sge_resources(adap);
4103 	goto out;
4104 }
4105 
4106 static void cxgb_down(struct adapter *adapter)
4107 {
4108 	t4_intr_disable(adapter);
4109 	cancel_work_sync(&adapter->tid_release_task);
4110 	cancel_work_sync(&adapter->db_full_task);
4111 	cancel_work_sync(&adapter->db_drop_task);
4112 	adapter->tid_release_task_busy = false;
4113 	adapter->tid_release_head = NULL;
4114 
4115 	if (adapter->flags & USING_MSIX) {
4116 		free_msix_queue_irqs(adapter);
4117 		free_irq(adapter->msix_info[0].vec, adapter);
4118 	} else
4119 		free_irq(adapter->pdev->irq, adapter);
4120 	quiesce_rx(adapter);
4121 	t4_sge_stop(adapter);
4122 	t4_free_sge_resources(adapter);
4123 	adapter->flags &= ~FULL_INIT_DONE;
4124 }
4125 
4126 /*
4127  * net_device operations
4128  */
4129 static int cxgb_open(struct net_device *dev)
4130 {
4131 	int err;
4132 	struct port_info *pi = netdev_priv(dev);
4133 	struct adapter *adapter = pi->adapter;
4134 
4135 	netif_carrier_off(dev);
4136 
4137 	if (!(adapter->flags & FULL_INIT_DONE)) {
4138 		err = cxgb_up(adapter);
4139 		if (err < 0)
4140 			return err;
4141 	}
4142 
4143 	err = link_start(dev);
4144 	if (!err)
4145 		netif_tx_start_all_queues(dev);
4146 	return err;
4147 }
4148 
4149 static int cxgb_close(struct net_device *dev)
4150 {
4151 	struct port_info *pi = netdev_priv(dev);
4152 	struct adapter *adapter = pi->adapter;
4153 
4154 	netif_tx_stop_all_queues(dev);
4155 	netif_carrier_off(dev);
4156 	return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
4157 }
4158 
4159 /* Return an error number if the indicated filter isn't writable ...
4160  */
4161 static int writable_filter(struct filter_entry *f)
4162 {
4163 	if (f->locked)
4164 		return -EPERM;
4165 	if (f->pending)
4166 		return -EBUSY;
4167 
4168 	return 0;
4169 }
4170 
4171 /* Delete the filter at the specified index (if valid).  The checks for all
4172  * the common problems with doing this like the filter being locked, currently
4173  * pending in another operation, etc.
4174  */
4175 static int delete_filter(struct adapter *adapter, unsigned int fidx)
4176 {
4177 	struct filter_entry *f;
4178 	int ret;
4179 
4180 	if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
4181 		return -EINVAL;
4182 
4183 	f = &adapter->tids.ftid_tab[fidx];
4184 	ret = writable_filter(f);
4185 	if (ret)
4186 		return ret;
4187 	if (f->valid)
4188 		return del_filter_wr(adapter, fidx);
4189 
4190 	return 0;
4191 }
4192 
4193 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4194 		__be32 sip, __be16 sport, __be16 vlan,
4195 		unsigned int queue, unsigned char port, unsigned char mask)
4196 {
4197 	int ret;
4198 	struct filter_entry *f;
4199 	struct adapter *adap;
4200 	int i;
4201 	u8 *val;
4202 
4203 	adap = netdev2adap(dev);
4204 
4205 	/* Adjust stid to correct filter index */
4206 	stid -= adap->tids.sftid_base;
4207 	stid += adap->tids.nftids;
4208 
4209 	/* Check to make sure the filter requested is writable ...
4210 	 */
4211 	f = &adap->tids.ftid_tab[stid];
4212 	ret = writable_filter(f);
4213 	if (ret)
4214 		return ret;
4215 
4216 	/* Clear out any old resources being used by the filter before
4217 	 * we start constructing the new filter.
4218 	 */
4219 	if (f->valid)
4220 		clear_filter(adap, f);
4221 
4222 	/* Clear out filter specifications */
4223 	memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4224 	f->fs.val.lport = cpu_to_be16(sport);
4225 	f->fs.mask.lport  = ~0;
4226 	val = (u8 *)&sip;
4227 	if ((val[0] | val[1] | val[2] | val[3]) != 0) {
4228 		for (i = 0; i < 4; i++) {
4229 			f->fs.val.lip[i] = val[i];
4230 			f->fs.mask.lip[i] = ~0;
4231 		}
4232 		if (adap->params.tp.vlan_pri_map & F_PORT) {
4233 			f->fs.val.iport = port;
4234 			f->fs.mask.iport = mask;
4235 		}
4236 	}
4237 
4238 	if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
4239 		f->fs.val.proto = IPPROTO_TCP;
4240 		f->fs.mask.proto = ~0;
4241 	}
4242 
4243 	f->fs.dirsteer = 1;
4244 	f->fs.iq = queue;
4245 	/* Mark filter as locked */
4246 	f->locked = 1;
4247 	f->fs.rpttid = 1;
4248 
4249 	ret = set_filter_wr(adap, stid);
4250 	if (ret) {
4251 		clear_filter(adap, f);
4252 		return ret;
4253 	}
4254 
4255 	return 0;
4256 }
4257 EXPORT_SYMBOL(cxgb4_create_server_filter);
4258 
4259 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4260 		unsigned int queue, bool ipv6)
4261 {
4262 	int ret;
4263 	struct filter_entry *f;
4264 	struct adapter *adap;
4265 
4266 	adap = netdev2adap(dev);
4267 
4268 	/* Adjust stid to correct filter index */
4269 	stid -= adap->tids.sftid_base;
4270 	stid += adap->tids.nftids;
4271 
4272 	f = &adap->tids.ftid_tab[stid];
4273 	/* Unlock the filter */
4274 	f->locked = 0;
4275 
4276 	ret = delete_filter(adap, stid);
4277 	if (ret)
4278 		return ret;
4279 
4280 	return 0;
4281 }
4282 EXPORT_SYMBOL(cxgb4_remove_server_filter);
4283 
4284 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4285 						struct rtnl_link_stats64 *ns)
4286 {
4287 	struct port_stats stats;
4288 	struct port_info *p = netdev_priv(dev);
4289 	struct adapter *adapter = p->adapter;
4290 
4291 	/* Block retrieving statistics during EEH error
4292 	 * recovery. Otherwise, the recovery might fail
4293 	 * and the PCI device will be removed permanently
4294 	 */
4295 	spin_lock(&adapter->stats_lock);
4296 	if (!netif_device_present(dev)) {
4297 		spin_unlock(&adapter->stats_lock);
4298 		return ns;
4299 	}
4300 	t4_get_port_stats(adapter, p->tx_chan, &stats);
4301 	spin_unlock(&adapter->stats_lock);
4302 
4303 	ns->tx_bytes   = stats.tx_octets;
4304 	ns->tx_packets = stats.tx_frames;
4305 	ns->rx_bytes   = stats.rx_octets;
4306 	ns->rx_packets = stats.rx_frames;
4307 	ns->multicast  = stats.rx_mcast_frames;
4308 
4309 	/* detailed rx_errors */
4310 	ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4311 			       stats.rx_runt;
4312 	ns->rx_over_errors   = 0;
4313 	ns->rx_crc_errors    = stats.rx_fcs_err;
4314 	ns->rx_frame_errors  = stats.rx_symbol_err;
4315 	ns->rx_fifo_errors   = stats.rx_ovflow0 + stats.rx_ovflow1 +
4316 			       stats.rx_ovflow2 + stats.rx_ovflow3 +
4317 			       stats.rx_trunc0 + stats.rx_trunc1 +
4318 			       stats.rx_trunc2 + stats.rx_trunc3;
4319 	ns->rx_missed_errors = 0;
4320 
4321 	/* detailed tx_errors */
4322 	ns->tx_aborted_errors   = 0;
4323 	ns->tx_carrier_errors   = 0;
4324 	ns->tx_fifo_errors      = 0;
4325 	ns->tx_heartbeat_errors = 0;
4326 	ns->tx_window_errors    = 0;
4327 
4328 	ns->tx_errors = stats.tx_error_frames;
4329 	ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4330 		ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4331 	return ns;
4332 }
4333 
4334 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4335 {
4336 	unsigned int mbox;
4337 	int ret = 0, prtad, devad;
4338 	struct port_info *pi = netdev_priv(dev);
4339 	struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4340 
4341 	switch (cmd) {
4342 	case SIOCGMIIPHY:
4343 		if (pi->mdio_addr < 0)
4344 			return -EOPNOTSUPP;
4345 		data->phy_id = pi->mdio_addr;
4346 		break;
4347 	case SIOCGMIIREG:
4348 	case SIOCSMIIREG:
4349 		if (mdio_phy_id_is_c45(data->phy_id)) {
4350 			prtad = mdio_phy_id_prtad(data->phy_id);
4351 			devad = mdio_phy_id_devad(data->phy_id);
4352 		} else if (data->phy_id < 32) {
4353 			prtad = data->phy_id;
4354 			devad = 0;
4355 			data->reg_num &= 0x1f;
4356 		} else
4357 			return -EINVAL;
4358 
4359 		mbox = pi->adapter->fn;
4360 		if (cmd == SIOCGMIIREG)
4361 			ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
4362 					 data->reg_num, &data->val_out);
4363 		else
4364 			ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
4365 					 data->reg_num, data->val_in);
4366 		break;
4367 	default:
4368 		return -EOPNOTSUPP;
4369 	}
4370 	return ret;
4371 }
4372 
4373 static void cxgb_set_rxmode(struct net_device *dev)
4374 {
4375 	/* unfortunately we can't return errors to the stack */
4376 	set_rxmode(dev, -1, false);
4377 }
4378 
4379 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4380 {
4381 	int ret;
4382 	struct port_info *pi = netdev_priv(dev);
4383 
4384 	if (new_mtu < 81 || new_mtu > MAX_MTU)         /* accommodate SACK */
4385 		return -EINVAL;
4386 	ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4387 			    -1, -1, -1, true);
4388 	if (!ret)
4389 		dev->mtu = new_mtu;
4390 	return ret;
4391 }
4392 
4393 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4394 {
4395 	int ret;
4396 	struct sockaddr *addr = p;
4397 	struct port_info *pi = netdev_priv(dev);
4398 
4399 	if (!is_valid_ether_addr(addr->sa_data))
4400 		return -EADDRNOTAVAIL;
4401 
4402 	ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4403 			    pi->xact_addr_filt, addr->sa_data, true, true);
4404 	if (ret < 0)
4405 		return ret;
4406 
4407 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4408 	pi->xact_addr_filt = ret;
4409 	return 0;
4410 }
4411 
4412 #ifdef CONFIG_NET_POLL_CONTROLLER
4413 static void cxgb_netpoll(struct net_device *dev)
4414 {
4415 	struct port_info *pi = netdev_priv(dev);
4416 	struct adapter *adap = pi->adapter;
4417 
4418 	if (adap->flags & USING_MSIX) {
4419 		int i;
4420 		struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4421 
4422 		for (i = pi->nqsets; i; i--, rx++)
4423 			t4_sge_intr_msix(0, &rx->rspq);
4424 	} else
4425 		t4_intr_handler(adap)(0, adap);
4426 }
4427 #endif
4428 
4429 static const struct net_device_ops cxgb4_netdev_ops = {
4430 	.ndo_open             = cxgb_open,
4431 	.ndo_stop             = cxgb_close,
4432 	.ndo_start_xmit       = t4_eth_xmit,
4433 	.ndo_get_stats64      = cxgb_get_stats,
4434 	.ndo_set_rx_mode      = cxgb_set_rxmode,
4435 	.ndo_set_mac_address  = cxgb_set_mac_addr,
4436 	.ndo_set_features     = cxgb_set_features,
4437 	.ndo_validate_addr    = eth_validate_addr,
4438 	.ndo_do_ioctl         = cxgb_ioctl,
4439 	.ndo_change_mtu       = cxgb_change_mtu,
4440 #ifdef CONFIG_NET_POLL_CONTROLLER
4441 	.ndo_poll_controller  = cxgb_netpoll,
4442 #endif
4443 };
4444 
4445 void t4_fatal_err(struct adapter *adap)
4446 {
4447 	t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4448 	t4_intr_disable(adap);
4449 	dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4450 }
4451 
4452 static void setup_memwin(struct adapter *adap)
4453 {
4454 	u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
4455 
4456 	bar0 = pci_resource_start(adap->pdev, 0);  /* truncation intentional */
4457 	if (is_t4(adap->params.chip)) {
4458 		mem_win0_base = bar0 + MEMWIN0_BASE;
4459 		mem_win1_base = bar0 + MEMWIN1_BASE;
4460 		mem_win2_base = bar0 + MEMWIN2_BASE;
4461 	} else {
4462 		/* For T5, only relative offset inside the PCIe BAR is passed */
4463 		mem_win0_base = MEMWIN0_BASE;
4464 		mem_win1_base = MEMWIN1_BASE_T5;
4465 		mem_win2_base = MEMWIN2_BASE_T5;
4466 	}
4467 	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
4468 		     mem_win0_base | BIR(0) |
4469 		     WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4470 	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
4471 		     mem_win1_base | BIR(0) |
4472 		     WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4473 	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
4474 		     mem_win2_base | BIR(0) |
4475 		     WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
4476 }
4477 
4478 static void setup_memwin_rdma(struct adapter *adap)
4479 {
4480 	if (adap->vres.ocq.size) {
4481 		unsigned int start, sz_kb;
4482 
4483 		start = pci_resource_start(adap->pdev, 2) +
4484 			OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4485 		sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4486 		t4_write_reg(adap,
4487 			     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
4488 			     start | BIR(1) | WINDOW(ilog2(sz_kb)));
4489 		t4_write_reg(adap,
4490 			     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
4491 			     adap->vres.ocq.start);
4492 		t4_read_reg(adap,
4493 			    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
4494 	}
4495 }
4496 
4497 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4498 {
4499 	u32 v;
4500 	int ret;
4501 
4502 	/* get device capabilities */
4503 	memset(c, 0, sizeof(*c));
4504 	c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4505 			       FW_CMD_REQUEST | FW_CMD_READ);
4506 	c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
4507 	ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
4508 	if (ret < 0)
4509 		return ret;
4510 
4511 	/* select capabilities we'll be using */
4512 	if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4513 		if (!vf_acls)
4514 			c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4515 		else
4516 			c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4517 	} else if (vf_acls) {
4518 		dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4519 		return ret;
4520 	}
4521 	c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4522 			       FW_CMD_REQUEST | FW_CMD_WRITE);
4523 	ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
4524 	if (ret < 0)
4525 		return ret;
4526 
4527 	ret = t4_config_glbl_rss(adap, adap->fn,
4528 				 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4529 				 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4530 				 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
4531 	if (ret < 0)
4532 		return ret;
4533 
4534 	ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4535 			  0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
4536 	if (ret < 0)
4537 		return ret;
4538 
4539 	t4_sge_init(adap);
4540 
4541 	/* tweak some settings */
4542 	t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
4543 	t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
4544 	t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
4545 	v = t4_read_reg(adap, TP_PIO_DATA);
4546 	t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
4547 
4548 	/* first 4 Tx modulation queues point to consecutive Tx channels */
4549 	adap->params.tp.tx_modq_map = 0xE4;
4550 	t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
4551 		     V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
4552 
4553 	/* associate each Tx modulation queue with consecutive Tx channels */
4554 	v = 0x84218421;
4555 	t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4556 			  &v, 1, A_TP_TX_SCHED_HDR);
4557 	t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4558 			  &v, 1, A_TP_TX_SCHED_FIFO);
4559 	t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4560 			  &v, 1, A_TP_TX_SCHED_PCMD);
4561 
4562 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4563 	if (is_offload(adap)) {
4564 		t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
4565 			     V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4566 			     V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4567 			     V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4568 			     V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4569 		t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
4570 			     V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4571 			     V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4572 			     V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4573 			     V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4574 	}
4575 
4576 	/* get basic stuff going */
4577 	return t4_early_init(adap, adap->fn);
4578 }
4579 
4580 /*
4581  * Max # of ATIDs.  The absolute HW max is 16K but we keep it lower.
4582  */
4583 #define MAX_ATIDS 8192U
4584 
4585 /*
4586  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4587  *
4588  * If the firmware we're dealing with has Configuration File support, then
4589  * we use that to perform all configuration
4590  */
4591 
4592 /*
4593  * Tweak configuration based on module parameters, etc.  Most of these have
4594  * defaults assigned to them by Firmware Configuration Files (if we're using
4595  * them) but need to be explicitly set if we're using hard-coded
4596  * initialization.  But even in the case of using Firmware Configuration
4597  * Files, we'd like to expose the ability to change these via module
4598  * parameters so these are essentially common tweaks/settings for
4599  * Configuration Files and hard-coded initialization ...
4600  */
4601 static int adap_init0_tweaks(struct adapter *adapter)
4602 {
4603 	/*
4604 	 * Fix up various Host-Dependent Parameters like Page Size, Cache
4605 	 * Line Size, etc.  The firmware default is for a 4KB Page Size and
4606 	 * 64B Cache Line Size ...
4607 	 */
4608 	t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
4609 
4610 	/*
4611 	 * Process module parameters which affect early initialization.
4612 	 */
4613 	if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4614 		dev_err(&adapter->pdev->dev,
4615 			"Ignoring illegal rx_dma_offset=%d, using 2\n",
4616 			rx_dma_offset);
4617 		rx_dma_offset = 2;
4618 	}
4619 	t4_set_reg_field(adapter, SGE_CONTROL,
4620 			 PKTSHIFT_MASK,
4621 			 PKTSHIFT(rx_dma_offset));
4622 
4623 	/*
4624 	 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4625 	 * adds the pseudo header itself.
4626 	 */
4627 	t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
4628 			       CSUM_HAS_PSEUDO_HDR, 0);
4629 
4630 	return 0;
4631 }
4632 
4633 /*
4634  * Attempt to initialize the adapter via a Firmware Configuration File.
4635  */
4636 static int adap_init0_config(struct adapter *adapter, int reset)
4637 {
4638 	struct fw_caps_config_cmd caps_cmd;
4639 	const struct firmware *cf;
4640 	unsigned long mtype = 0, maddr = 0;
4641 	u32 finiver, finicsum, cfcsum;
4642 	int ret;
4643 	int config_issued = 0;
4644 	char *fw_config_file, fw_config_file_path[256];
4645 	char *config_name = NULL;
4646 
4647 	/*
4648 	 * Reset device if necessary.
4649 	 */
4650 	if (reset) {
4651 		ret = t4_fw_reset(adapter, adapter->mbox,
4652 				  PIORSTMODE | PIORST);
4653 		if (ret < 0)
4654 			goto bye;
4655 	}
4656 
4657 	/*
4658 	 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4659 	 * then use that.  Otherwise, use the configuration file stored
4660 	 * in the adapter flash ...
4661 	 */
4662 	switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
4663 	case CHELSIO_T4:
4664 		fw_config_file = FW4_CFNAME;
4665 		break;
4666 	case CHELSIO_T5:
4667 		fw_config_file = FW5_CFNAME;
4668 		break;
4669 	default:
4670 		dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4671 		       adapter->pdev->device);
4672 		ret = -EINVAL;
4673 		goto bye;
4674 	}
4675 
4676 	ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
4677 	if (ret < 0) {
4678 		config_name = "On FLASH";
4679 		mtype = FW_MEMTYPE_CF_FLASH;
4680 		maddr = t4_flash_cfg_addr(adapter);
4681 	} else {
4682 		u32 params[7], val[7];
4683 
4684 		sprintf(fw_config_file_path,
4685 			"/lib/firmware/%s", fw_config_file);
4686 		config_name = fw_config_file_path;
4687 
4688 		if (cf->size >= FLASH_CFG_MAX_SIZE)
4689 			ret = -ENOMEM;
4690 		else {
4691 			params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4692 			     FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4693 			ret = t4_query_params(adapter, adapter->mbox,
4694 					      adapter->fn, 0, 1, params, val);
4695 			if (ret == 0) {
4696 				/*
4697 				 * For t4_memory_write() below addresses and
4698 				 * sizes have to be in terms of multiples of 4
4699 				 * bytes.  So, if the Configuration File isn't
4700 				 * a multiple of 4 bytes in length we'll have
4701 				 * to write that out separately since we can't
4702 				 * guarantee that the bytes following the
4703 				 * residual byte in the buffer returned by
4704 				 * request_firmware() are zeroed out ...
4705 				 */
4706 				size_t resid = cf->size & 0x3;
4707 				size_t size = cf->size & ~0x3;
4708 				__be32 *data = (__be32 *)cf->data;
4709 
4710 				mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
4711 				maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
4712 
4713 				ret = t4_memory_write(adapter, mtype, maddr,
4714 						      size, data);
4715 				if (ret == 0 && resid != 0) {
4716 					union {
4717 						__be32 word;
4718 						char buf[4];
4719 					} last;
4720 					int i;
4721 
4722 					last.word = data[size >> 2];
4723 					for (i = resid; i < 4; i++)
4724 						last.buf[i] = 0;
4725 					ret = t4_memory_write(adapter, mtype,
4726 							      maddr + size,
4727 							      4, &last.word);
4728 				}
4729 			}
4730 		}
4731 
4732 		release_firmware(cf);
4733 		if (ret)
4734 			goto bye;
4735 	}
4736 
4737 	/*
4738 	 * Issue a Capability Configuration command to the firmware to get it
4739 	 * to parse the Configuration File.  We don't use t4_fw_config_file()
4740 	 * because we want the ability to modify various features after we've
4741 	 * processed the configuration file ...
4742 	 */
4743 	memset(&caps_cmd, 0, sizeof(caps_cmd));
4744 	caps_cmd.op_to_write =
4745 		htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4746 		      FW_CMD_REQUEST |
4747 		      FW_CMD_READ);
4748 	caps_cmd.cfvalid_to_len16 =
4749 		htonl(FW_CAPS_CONFIG_CMD_CFVALID |
4750 		      FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4751 		      FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
4752 		      FW_LEN16(caps_cmd));
4753 	ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4754 			 &caps_cmd);
4755 
4756 	/* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
4757 	 * Configuration File in FLASH), our last gasp effort is to use the
4758 	 * Firmware Configuration File which is embedded in the firmware.  A
4759 	 * very few early versions of the firmware didn't have one embedded
4760 	 * but we can ignore those.
4761 	 */
4762 	if (ret == -ENOENT) {
4763 		memset(&caps_cmd, 0, sizeof(caps_cmd));
4764 		caps_cmd.op_to_write =
4765 			htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4766 					FW_CMD_REQUEST |
4767 					FW_CMD_READ);
4768 		caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4769 		ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
4770 				sizeof(caps_cmd), &caps_cmd);
4771 		config_name = "Firmware Default";
4772 	}
4773 
4774 	config_issued = 1;
4775 	if (ret < 0)
4776 		goto bye;
4777 
4778 	finiver = ntohl(caps_cmd.finiver);
4779 	finicsum = ntohl(caps_cmd.finicsum);
4780 	cfcsum = ntohl(caps_cmd.cfcsum);
4781 	if (finicsum != cfcsum)
4782 		dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4783 			 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4784 			 finicsum, cfcsum);
4785 
4786 	/*
4787 	 * And now tell the firmware to use the configuration we just loaded.
4788 	 */
4789 	caps_cmd.op_to_write =
4790 		htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4791 		      FW_CMD_REQUEST |
4792 		      FW_CMD_WRITE);
4793 	caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4794 	ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4795 			 NULL);
4796 	if (ret < 0)
4797 		goto bye;
4798 
4799 	/*
4800 	 * Tweak configuration based on system architecture, module
4801 	 * parameters, etc.
4802 	 */
4803 	ret = adap_init0_tweaks(adapter);
4804 	if (ret < 0)
4805 		goto bye;
4806 
4807 	/*
4808 	 * And finally tell the firmware to initialize itself using the
4809 	 * parameters from the Configuration File.
4810 	 */
4811 	ret = t4_fw_initialize(adapter, adapter->mbox);
4812 	if (ret < 0)
4813 		goto bye;
4814 
4815 	/*
4816 	 * Return successfully and note that we're operating with parameters
4817 	 * not supplied by the driver, rather than from hard-wired
4818 	 * initialization constants burried in the driver.
4819 	 */
4820 	adapter->flags |= USING_SOFT_PARAMS;
4821 	dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4822 		 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
4823 		 config_name, finiver, cfcsum);
4824 	return 0;
4825 
4826 	/*
4827 	 * Something bad happened.  Return the error ...  (If the "error"
4828 	 * is that there's no Configuration File on the adapter we don't
4829 	 * want to issue a warning since this is fairly common.)
4830 	 */
4831 bye:
4832 	if (config_issued && ret != -ENOENT)
4833 		dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
4834 			 config_name, -ret);
4835 	return ret;
4836 }
4837 
4838 /*
4839  * Attempt to initialize the adapter via hard-coded, driver supplied
4840  * parameters ...
4841  */
4842 static int adap_init0_no_config(struct adapter *adapter, int reset)
4843 {
4844 	struct sge *s = &adapter->sge;
4845 	struct fw_caps_config_cmd caps_cmd;
4846 	u32 v;
4847 	int i, ret;
4848 
4849 	/*
4850 	 * Reset device if necessary
4851 	 */
4852 	if (reset) {
4853 		ret = t4_fw_reset(adapter, adapter->mbox,
4854 				  PIORSTMODE | PIORST);
4855 		if (ret < 0)
4856 			goto bye;
4857 	}
4858 
4859 	/*
4860 	 * Get device capabilities and select which we'll be using.
4861 	 */
4862 	memset(&caps_cmd, 0, sizeof(caps_cmd));
4863 	caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4864 				     FW_CMD_REQUEST | FW_CMD_READ);
4865 	caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4866 	ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4867 			 &caps_cmd);
4868 	if (ret < 0)
4869 		goto bye;
4870 
4871 	if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4872 		if (!vf_acls)
4873 			caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4874 		else
4875 			caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4876 	} else if (vf_acls) {
4877 		dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
4878 		goto bye;
4879 	}
4880 	caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4881 			      FW_CMD_REQUEST | FW_CMD_WRITE);
4882 	ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4883 			 NULL);
4884 	if (ret < 0)
4885 		goto bye;
4886 
4887 	/*
4888 	 * Tweak configuration based on system architecture, module
4889 	 * parameters, etc.
4890 	 */
4891 	ret = adap_init0_tweaks(adapter);
4892 	if (ret < 0)
4893 		goto bye;
4894 
4895 	/*
4896 	 * Select RSS Global Mode we want to use.  We use "Basic Virtual"
4897 	 * mode which maps each Virtual Interface to its own section of
4898 	 * the RSS Table and we turn on all map and hash enables ...
4899 	 */
4900 	adapter->flags |= RSS_TNLALLLOOKUP;
4901 	ret = t4_config_glbl_rss(adapter, adapter->mbox,
4902 				 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4903 				 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4904 				 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
4905 				 ((adapter->flags & RSS_TNLALLLOOKUP) ?
4906 					FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
4907 	if (ret < 0)
4908 		goto bye;
4909 
4910 	/*
4911 	 * Set up our own fundamental resource provisioning ...
4912 	 */
4913 	ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
4914 			  PFRES_NEQ, PFRES_NETHCTRL,
4915 			  PFRES_NIQFLINT, PFRES_NIQ,
4916 			  PFRES_TC, PFRES_NVI,
4917 			  FW_PFVF_CMD_CMASK_MASK,
4918 			  pfvfres_pmask(adapter, adapter->fn, 0),
4919 			  PFRES_NEXACTF,
4920 			  PFRES_R_CAPS, PFRES_WX_CAPS);
4921 	if (ret < 0)
4922 		goto bye;
4923 
4924 	/*
4925 	 * Perform low level SGE initialization.  We need to do this before we
4926 	 * send the firmware the INITIALIZE command because that will cause
4927 	 * any other PF Drivers which are waiting for the Master
4928 	 * Initialization to proceed forward.
4929 	 */
4930 	for (i = 0; i < SGE_NTIMERS - 1; i++)
4931 		s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
4932 	s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
4933 	s->counter_val[0] = 1;
4934 	for (i = 1; i < SGE_NCOUNTERS; i++)
4935 		s->counter_val[i] = min(intr_cnt[i - 1],
4936 					THRESHOLD_0_GET(THRESHOLD_0_MASK));
4937 	t4_sge_init(adapter);
4938 
4939 #ifdef CONFIG_PCI_IOV
4940 	/*
4941 	 * Provision resource limits for Virtual Functions.  We currently
4942 	 * grant them all the same static resource limits except for the Port
4943 	 * Access Rights Mask which we're assigning based on the PF.  All of
4944 	 * the static provisioning stuff for both the PF and VF really needs
4945 	 * to be managed in a persistent manner for each device which the
4946 	 * firmware controls.
4947 	 */
4948 	{
4949 		int pf, vf;
4950 
4951 		for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
4952 			if (num_vf[pf] <= 0)
4953 				continue;
4954 
4955 			/* VF numbering starts at 1! */
4956 			for (vf = 1; vf <= num_vf[pf]; vf++) {
4957 				ret = t4_cfg_pfvf(adapter, adapter->mbox,
4958 						  pf, vf,
4959 						  VFRES_NEQ, VFRES_NETHCTRL,
4960 						  VFRES_NIQFLINT, VFRES_NIQ,
4961 						  VFRES_TC, VFRES_NVI,
4962 						  FW_PFVF_CMD_CMASK_MASK,
4963 						  pfvfres_pmask(
4964 						  adapter, pf, vf),
4965 						  VFRES_NEXACTF,
4966 						  VFRES_R_CAPS, VFRES_WX_CAPS);
4967 				if (ret < 0)
4968 					dev_warn(adapter->pdev_dev,
4969 						 "failed to "\
4970 						 "provision pf/vf=%d/%d; "
4971 						 "err=%d\n", pf, vf, ret);
4972 			}
4973 		}
4974 	}
4975 #endif
4976 
4977 	/*
4978 	 * Set up the default filter mode.  Later we'll want to implement this
4979 	 * via a firmware command, etc. ...  This needs to be done before the
4980 	 * firmare initialization command ...  If the selected set of fields
4981 	 * isn't equal to the default value, we'll need to make sure that the
4982 	 * field selections will fit in the 36-bit budget.
4983 	 */
4984 	if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
4985 		int j, bits = 0;
4986 
4987 		for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
4988 			switch (tp_vlan_pri_map & (1 << j)) {
4989 			case 0:
4990 				/* compressed filter field not enabled */
4991 				break;
4992 			case FCOE_MASK:
4993 				bits +=  1;
4994 				break;
4995 			case PORT_MASK:
4996 				bits +=  3;
4997 				break;
4998 			case VNIC_ID_MASK:
4999 				bits += 17;
5000 				break;
5001 			case VLAN_MASK:
5002 				bits += 17;
5003 				break;
5004 			case TOS_MASK:
5005 				bits +=  8;
5006 				break;
5007 			case PROTOCOL_MASK:
5008 				bits +=  8;
5009 				break;
5010 			case ETHERTYPE_MASK:
5011 				bits += 16;
5012 				break;
5013 			case MACMATCH_MASK:
5014 				bits +=  9;
5015 				break;
5016 			case MPSHITTYPE_MASK:
5017 				bits +=  3;
5018 				break;
5019 			case FRAGMENTATION_MASK:
5020 				bits +=  1;
5021 				break;
5022 			}
5023 
5024 		if (bits > 36) {
5025 			dev_err(adapter->pdev_dev,
5026 				"tp_vlan_pri_map=%#x needs %d bits > 36;"\
5027 				" using %#x\n", tp_vlan_pri_map, bits,
5028 				TP_VLAN_PRI_MAP_DEFAULT);
5029 			tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
5030 		}
5031 	}
5032 	v = tp_vlan_pri_map;
5033 	t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
5034 			  &v, 1, TP_VLAN_PRI_MAP);
5035 
5036 	/*
5037 	 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5038 	 * to support any of the compressed filter fields above.  Newer
5039 	 * versions of the firmware do this automatically but it doesn't hurt
5040 	 * to set it here.  Meanwhile, we do _not_ need to set Lookup Every
5041 	 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5042 	 * since the firmware automatically turns this on and off when we have
5043 	 * a non-zero number of filters active (since it does have a
5044 	 * performance impact).
5045 	 */
5046 	if (tp_vlan_pri_map)
5047 		t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
5048 				 FIVETUPLELOOKUP_MASK,
5049 				 FIVETUPLELOOKUP_MASK);
5050 
5051 	/*
5052 	 * Tweak some settings.
5053 	 */
5054 	t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
5055 		     RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5056 		     PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5057 		     KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5058 
5059 	/*
5060 	 * Get basic stuff going by issuing the Firmware Initialize command.
5061 	 * Note that this _must_ be after all PFVF commands ...
5062 	 */
5063 	ret = t4_fw_initialize(adapter, adapter->mbox);
5064 	if (ret < 0)
5065 		goto bye;
5066 
5067 	/*
5068 	 * Return successfully!
5069 	 */
5070 	dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
5071 		 "driver parameters\n");
5072 	return 0;
5073 
5074 	/*
5075 	 * Something bad happened.  Return the error ...
5076 	 */
5077 bye:
5078 	return ret;
5079 }
5080 
5081 static struct fw_info fw_info_array[] = {
5082 	{
5083 		.chip = CHELSIO_T4,
5084 		.fs_name = FW4_CFNAME,
5085 		.fw_mod_name = FW4_FNAME,
5086 		.fw_hdr = {
5087 			.chip = FW_HDR_CHIP_T4,
5088 			.fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5089 			.intfver_nic = FW_INTFVER(T4, NIC),
5090 			.intfver_vnic = FW_INTFVER(T4, VNIC),
5091 			.intfver_ri = FW_INTFVER(T4, RI),
5092 			.intfver_iscsi = FW_INTFVER(T4, ISCSI),
5093 			.intfver_fcoe = FW_INTFVER(T4, FCOE),
5094 		},
5095 	}, {
5096 		.chip = CHELSIO_T5,
5097 		.fs_name = FW5_CFNAME,
5098 		.fw_mod_name = FW5_FNAME,
5099 		.fw_hdr = {
5100 			.chip = FW_HDR_CHIP_T5,
5101 			.fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5102 			.intfver_nic = FW_INTFVER(T5, NIC),
5103 			.intfver_vnic = FW_INTFVER(T5, VNIC),
5104 			.intfver_ri = FW_INTFVER(T5, RI),
5105 			.intfver_iscsi = FW_INTFVER(T5, ISCSI),
5106 			.intfver_fcoe = FW_INTFVER(T5, FCOE),
5107 		},
5108 	}
5109 };
5110 
5111 static struct fw_info *find_fw_info(int chip)
5112 {
5113 	int i;
5114 
5115 	for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5116 		if (fw_info_array[i].chip == chip)
5117 			return &fw_info_array[i];
5118 	}
5119 	return NULL;
5120 }
5121 
5122 /*
5123  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5124  */
5125 static int adap_init0(struct adapter *adap)
5126 {
5127 	int ret;
5128 	u32 v, port_vec;
5129 	enum dev_state state;
5130 	u32 params[7], val[7];
5131 	struct fw_caps_config_cmd caps_cmd;
5132 	int reset = 1;
5133 
5134 	/*
5135 	 * Contact FW, advertising Master capability (and potentially forcing
5136 	 * ourselves as the Master PF if our module parameter force_init is
5137 	 * set).
5138 	 */
5139 	ret = t4_fw_hello(adap, adap->mbox, adap->fn,
5140 			  force_init ? MASTER_MUST : MASTER_MAY,
5141 			  &state);
5142 	if (ret < 0) {
5143 		dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5144 			ret);
5145 		return ret;
5146 	}
5147 	if (ret == adap->mbox)
5148 		adap->flags |= MASTER_PF;
5149 	if (force_init && state == DEV_STATE_INIT)
5150 		state = DEV_STATE_UNINIT;
5151 
5152 	/*
5153 	 * If we're the Master PF Driver and the device is uninitialized,
5154 	 * then let's consider upgrading the firmware ...  (We always want
5155 	 * to check the firmware version number in order to A. get it for
5156 	 * later reporting and B. to warn if the currently loaded firmware
5157 	 * is excessively mismatched relative to the driver.)
5158 	 */
5159 	t4_get_fw_version(adap, &adap->params.fw_vers);
5160 	t4_get_tp_version(adap, &adap->params.tp_vers);
5161 	if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
5162 		struct fw_info *fw_info;
5163 		struct fw_hdr *card_fw;
5164 		const struct firmware *fw;
5165 		const u8 *fw_data = NULL;
5166 		unsigned int fw_size = 0;
5167 
5168 		/* This is the firmware whose headers the driver was compiled
5169 		 * against
5170 		 */
5171 		fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5172 		if (fw_info == NULL) {
5173 			dev_err(adap->pdev_dev,
5174 				"unable to get firmware info for chip %d.\n",
5175 				CHELSIO_CHIP_VERSION(adap->params.chip));
5176 			return -EINVAL;
5177 		}
5178 
5179 		/* allocate memory to read the header of the firmware on the
5180 		 * card
5181 		 */
5182 		card_fw = t4_alloc_mem(sizeof(*card_fw));
5183 
5184 		/* Get FW from from /lib/firmware/ */
5185 		ret = request_firmware(&fw, fw_info->fw_mod_name,
5186 				       adap->pdev_dev);
5187 		if (ret < 0) {
5188 			dev_err(adap->pdev_dev,
5189 				"unable to load firmware image %s, error %d\n",
5190 				fw_info->fw_mod_name, ret);
5191 		} else {
5192 			fw_data = fw->data;
5193 			fw_size = fw->size;
5194 		}
5195 
5196 		/* upgrade FW logic */
5197 		ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5198 				 state, &reset);
5199 
5200 		/* Cleaning up */
5201 		if (fw != NULL)
5202 			release_firmware(fw);
5203 		t4_free_mem(card_fw);
5204 
5205 		if (ret < 0)
5206 			goto bye;
5207 	}
5208 
5209 	/*
5210 	 * Grab VPD parameters.  This should be done after we establish a
5211 	 * connection to the firmware since some of the VPD parameters
5212 	 * (notably the Core Clock frequency) are retrieved via requests to
5213 	 * the firmware.  On the other hand, we need these fairly early on
5214 	 * so we do this right after getting ahold of the firmware.
5215 	 */
5216 	ret = get_vpd_params(adap, &adap->params.vpd);
5217 	if (ret < 0)
5218 		goto bye;
5219 
5220 	/*
5221 	 * Find out what ports are available to us.  Note that we need to do
5222 	 * this before calling adap_init0_no_config() since it needs nports
5223 	 * and portvec ...
5224 	 */
5225 	v =
5226 	    FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5227 	    FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
5228 	ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
5229 	if (ret < 0)
5230 		goto bye;
5231 
5232 	adap->params.nports = hweight32(port_vec);
5233 	adap->params.portvec = port_vec;
5234 
5235 	/*
5236 	 * If the firmware is initialized already (and we're not forcing a
5237 	 * master initialization), note that we're living with existing
5238 	 * adapter parameters.  Otherwise, it's time to try initializing the
5239 	 * adapter ...
5240 	 */
5241 	if (state == DEV_STATE_INIT) {
5242 		dev_info(adap->pdev_dev, "Coming up as %s: "\
5243 			 "Adapter already initialized\n",
5244 			 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
5245 		adap->flags |= USING_SOFT_PARAMS;
5246 	} else {
5247 		dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5248 			 "Initializing adapter\n");
5249 
5250 		/*
5251 		 * If the firmware doesn't support Configuration
5252 		 * Files warn user and exit,
5253 		 */
5254 		if (ret < 0)
5255 			dev_warn(adap->pdev_dev, "Firmware doesn't support "
5256 				 "configuration file.\n");
5257 		if (force_old_init)
5258 			ret = adap_init0_no_config(adap, reset);
5259 		else {
5260 			/*
5261 			 * Find out whether we're dealing with a version of
5262 			 * the firmware which has configuration file support.
5263 			 */
5264 			params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5265 				     FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5266 			ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5267 					      params, val);
5268 
5269 			/*
5270 			 * If the firmware doesn't support Configuration
5271 			 * Files, use the old Driver-based, hard-wired
5272 			 * initialization.  Otherwise, try using the
5273 			 * Configuration File support and fall back to the
5274 			 * Driver-based initialization if there's no
5275 			 * Configuration File found.
5276 			 */
5277 			if (ret < 0)
5278 				ret = adap_init0_no_config(adap, reset);
5279 			else {
5280 				/*
5281 				 * The firmware provides us with a memory
5282 				 * buffer where we can load a Configuration
5283 				 * File from the host if we want to override
5284 				 * the Configuration File in flash.
5285 				 */
5286 
5287 				ret = adap_init0_config(adap, reset);
5288 				if (ret == -ENOENT) {
5289 					dev_info(adap->pdev_dev,
5290 					    "No Configuration File present "
5291 					    "on adapter. Using hard-wired "
5292 					    "configuration parameters.\n");
5293 					ret = adap_init0_no_config(adap, reset);
5294 				}
5295 			}
5296 		}
5297 		if (ret < 0) {
5298 			dev_err(adap->pdev_dev,
5299 				"could not initialize adapter, error %d\n",
5300 				-ret);
5301 			goto bye;
5302 		}
5303 	}
5304 
5305 	/*
5306 	 * If we're living with non-hard-coded parameters (either from a
5307 	 * Firmware Configuration File or values programmed by a different PF
5308 	 * Driver), give the SGE code a chance to pull in anything that it
5309 	 * needs ...  Note that this must be called after we retrieve our VPD
5310 	 * parameters in order to know how to convert core ticks to seconds.
5311 	 */
5312 	if (adap->flags & USING_SOFT_PARAMS) {
5313 		ret = t4_sge_init(adap);
5314 		if (ret < 0)
5315 			goto bye;
5316 	}
5317 
5318 	if (is_bypass_device(adap->pdev->device))
5319 		adap->params.bypass = 1;
5320 
5321 	/*
5322 	 * Grab some of our basic fundamental operating parameters.
5323 	 */
5324 #define FW_PARAM_DEV(param) \
5325 	(FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
5326 	FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
5327 
5328 #define FW_PARAM_PFVF(param) \
5329 	FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
5330 	FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)|  \
5331 	FW_PARAMS_PARAM_Y(0) | \
5332 	FW_PARAMS_PARAM_Z(0)
5333 
5334 	params[0] = FW_PARAM_PFVF(EQ_START);
5335 	params[1] = FW_PARAM_PFVF(L2T_START);
5336 	params[2] = FW_PARAM_PFVF(L2T_END);
5337 	params[3] = FW_PARAM_PFVF(FILTER_START);
5338 	params[4] = FW_PARAM_PFVF(FILTER_END);
5339 	params[5] = FW_PARAM_PFVF(IQFLINT_START);
5340 	ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
5341 	if (ret < 0)
5342 		goto bye;
5343 	adap->sge.egr_start = val[0];
5344 	adap->l2t_start = val[1];
5345 	adap->l2t_end = val[2];
5346 	adap->tids.ftid_base = val[3];
5347 	adap->tids.nftids = val[4] - val[3] + 1;
5348 	adap->sge.ingr_start = val[5];
5349 
5350 	/* query params related to active filter region */
5351 	params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5352 	params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5353 	ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5354 	/* If Active filter size is set we enable establishing
5355 	 * offload connection through firmware work request
5356 	 */
5357 	if ((val[0] != val[1]) && (ret >= 0)) {
5358 		adap->flags |= FW_OFLD_CONN;
5359 		adap->tids.aftid_base = val[0];
5360 		adap->tids.aftid_end = val[1];
5361 	}
5362 
5363 	/* If we're running on newer firmware, let it know that we're
5364 	 * prepared to deal with encapsulated CPL messages.  Older
5365 	 * firmware won't understand this and we'll just get
5366 	 * unencapsulated messages ...
5367 	 */
5368 	params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5369 	val[0] = 1;
5370 	(void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5371 
5372 	/*
5373 	 * Get device capabilities so we can determine what resources we need
5374 	 * to manage.
5375 	 */
5376 	memset(&caps_cmd, 0, sizeof(caps_cmd));
5377 	caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5378 				     FW_CMD_REQUEST | FW_CMD_READ);
5379 	caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5380 	ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5381 			 &caps_cmd);
5382 	if (ret < 0)
5383 		goto bye;
5384 
5385 	if (caps_cmd.ofldcaps) {
5386 		/* query offload-related parameters */
5387 		params[0] = FW_PARAM_DEV(NTID);
5388 		params[1] = FW_PARAM_PFVF(SERVER_START);
5389 		params[2] = FW_PARAM_PFVF(SERVER_END);
5390 		params[3] = FW_PARAM_PFVF(TDDP_START);
5391 		params[4] = FW_PARAM_PFVF(TDDP_END);
5392 		params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5393 		ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5394 				      params, val);
5395 		if (ret < 0)
5396 			goto bye;
5397 		adap->tids.ntids = val[0];
5398 		adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5399 		adap->tids.stid_base = val[1];
5400 		adap->tids.nstids = val[2] - val[1] + 1;
5401 		/*
5402 		 * Setup server filter region. Divide the availble filter
5403 		 * region into two parts. Regular filters get 1/3rd and server
5404 		 * filters get 2/3rd part. This is only enabled if workarond
5405 		 * path is enabled.
5406 		 * 1. For regular filters.
5407 		 * 2. Server filter: This are special filters which are used
5408 		 * to redirect SYN packets to offload queue.
5409 		 */
5410 		if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5411 			adap->tids.sftid_base = adap->tids.ftid_base +
5412 					DIV_ROUND_UP(adap->tids.nftids, 3);
5413 			adap->tids.nsftids = adap->tids.nftids -
5414 					 DIV_ROUND_UP(adap->tids.nftids, 3);
5415 			adap->tids.nftids = adap->tids.sftid_base -
5416 						adap->tids.ftid_base;
5417 		}
5418 		adap->vres.ddp.start = val[3];
5419 		adap->vres.ddp.size = val[4] - val[3] + 1;
5420 		adap->params.ofldq_wr_cred = val[5];
5421 
5422 		adap->params.offload = 1;
5423 	}
5424 	if (caps_cmd.rdmacaps) {
5425 		params[0] = FW_PARAM_PFVF(STAG_START);
5426 		params[1] = FW_PARAM_PFVF(STAG_END);
5427 		params[2] = FW_PARAM_PFVF(RQ_START);
5428 		params[3] = FW_PARAM_PFVF(RQ_END);
5429 		params[4] = FW_PARAM_PFVF(PBL_START);
5430 		params[5] = FW_PARAM_PFVF(PBL_END);
5431 		ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5432 				      params, val);
5433 		if (ret < 0)
5434 			goto bye;
5435 		adap->vres.stag.start = val[0];
5436 		adap->vres.stag.size = val[1] - val[0] + 1;
5437 		adap->vres.rq.start = val[2];
5438 		adap->vres.rq.size = val[3] - val[2] + 1;
5439 		adap->vres.pbl.start = val[4];
5440 		adap->vres.pbl.size = val[5] - val[4] + 1;
5441 
5442 		params[0] = FW_PARAM_PFVF(SQRQ_START);
5443 		params[1] = FW_PARAM_PFVF(SQRQ_END);
5444 		params[2] = FW_PARAM_PFVF(CQ_START);
5445 		params[3] = FW_PARAM_PFVF(CQ_END);
5446 		params[4] = FW_PARAM_PFVF(OCQ_START);
5447 		params[5] = FW_PARAM_PFVF(OCQ_END);
5448 		ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
5449 		if (ret < 0)
5450 			goto bye;
5451 		adap->vres.qp.start = val[0];
5452 		adap->vres.qp.size = val[1] - val[0] + 1;
5453 		adap->vres.cq.start = val[2];
5454 		adap->vres.cq.size = val[3] - val[2] + 1;
5455 		adap->vres.ocq.start = val[4];
5456 		adap->vres.ocq.size = val[5] - val[4] + 1;
5457 	}
5458 	if (caps_cmd.iscsicaps) {
5459 		params[0] = FW_PARAM_PFVF(ISCSI_START);
5460 		params[1] = FW_PARAM_PFVF(ISCSI_END);
5461 		ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5462 				      params, val);
5463 		if (ret < 0)
5464 			goto bye;
5465 		adap->vres.iscsi.start = val[0];
5466 		adap->vres.iscsi.size = val[1] - val[0] + 1;
5467 	}
5468 #undef FW_PARAM_PFVF
5469 #undef FW_PARAM_DEV
5470 
5471 	/*
5472 	 * These are finalized by FW initialization, load their values now.
5473 	 */
5474 	t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5475 	t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5476 		     adap->params.b_wnd);
5477 
5478 	t4_init_tp_params(adap);
5479 	adap->flags |= FW_OK;
5480 	return 0;
5481 
5482 	/*
5483 	 * Something bad happened.  If a command timed out or failed with EIO
5484 	 * FW does not operate within its spec or something catastrophic
5485 	 * happened to HW/FW, stop issuing commands.
5486 	 */
5487 bye:
5488 	if (ret != -ETIMEDOUT && ret != -EIO)
5489 		t4_fw_bye(adap, adap->mbox);
5490 	return ret;
5491 }
5492 
5493 /* EEH callbacks */
5494 
5495 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5496 					 pci_channel_state_t state)
5497 {
5498 	int i;
5499 	struct adapter *adap = pci_get_drvdata(pdev);
5500 
5501 	if (!adap)
5502 		goto out;
5503 
5504 	rtnl_lock();
5505 	adap->flags &= ~FW_OK;
5506 	notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5507 	spin_lock(&adap->stats_lock);
5508 	for_each_port(adap, i) {
5509 		struct net_device *dev = adap->port[i];
5510 
5511 		netif_device_detach(dev);
5512 		netif_carrier_off(dev);
5513 	}
5514 	spin_unlock(&adap->stats_lock);
5515 	if (adap->flags & FULL_INIT_DONE)
5516 		cxgb_down(adap);
5517 	rtnl_unlock();
5518 	if ((adap->flags & DEV_ENABLED)) {
5519 		pci_disable_device(pdev);
5520 		adap->flags &= ~DEV_ENABLED;
5521 	}
5522 out:	return state == pci_channel_io_perm_failure ?
5523 		PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5524 }
5525 
5526 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5527 {
5528 	int i, ret;
5529 	struct fw_caps_config_cmd c;
5530 	struct adapter *adap = pci_get_drvdata(pdev);
5531 
5532 	if (!adap) {
5533 		pci_restore_state(pdev);
5534 		pci_save_state(pdev);
5535 		return PCI_ERS_RESULT_RECOVERED;
5536 	}
5537 
5538 	if (!(adap->flags & DEV_ENABLED)) {
5539 		if (pci_enable_device(pdev)) {
5540 			dev_err(&pdev->dev, "Cannot reenable PCI "
5541 					    "device after reset\n");
5542 			return PCI_ERS_RESULT_DISCONNECT;
5543 		}
5544 		adap->flags |= DEV_ENABLED;
5545 	}
5546 
5547 	pci_set_master(pdev);
5548 	pci_restore_state(pdev);
5549 	pci_save_state(pdev);
5550 	pci_cleanup_aer_uncorrect_error_status(pdev);
5551 
5552 	if (t4_wait_dev_ready(adap) < 0)
5553 		return PCI_ERS_RESULT_DISCONNECT;
5554 	if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
5555 		return PCI_ERS_RESULT_DISCONNECT;
5556 	adap->flags |= FW_OK;
5557 	if (adap_init1(adap, &c))
5558 		return PCI_ERS_RESULT_DISCONNECT;
5559 
5560 	for_each_port(adap, i) {
5561 		struct port_info *p = adap2pinfo(adap, i);
5562 
5563 		ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
5564 				  NULL, NULL);
5565 		if (ret < 0)
5566 			return PCI_ERS_RESULT_DISCONNECT;
5567 		p->viid = ret;
5568 		p->xact_addr_filt = -1;
5569 	}
5570 
5571 	t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5572 		     adap->params.b_wnd);
5573 	setup_memwin(adap);
5574 	if (cxgb_up(adap))
5575 		return PCI_ERS_RESULT_DISCONNECT;
5576 	return PCI_ERS_RESULT_RECOVERED;
5577 }
5578 
5579 static void eeh_resume(struct pci_dev *pdev)
5580 {
5581 	int i;
5582 	struct adapter *adap = pci_get_drvdata(pdev);
5583 
5584 	if (!adap)
5585 		return;
5586 
5587 	rtnl_lock();
5588 	for_each_port(adap, i) {
5589 		struct net_device *dev = adap->port[i];
5590 
5591 		if (netif_running(dev)) {
5592 			link_start(dev);
5593 			cxgb_set_rxmode(dev);
5594 		}
5595 		netif_device_attach(dev);
5596 	}
5597 	rtnl_unlock();
5598 }
5599 
5600 static const struct pci_error_handlers cxgb4_eeh = {
5601 	.error_detected = eeh_err_detected,
5602 	.slot_reset     = eeh_slot_reset,
5603 	.resume         = eeh_resume,
5604 };
5605 
5606 static inline bool is_10g_port(const struct link_config *lc)
5607 {
5608 	return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
5609 }
5610 
5611 static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
5612 			     unsigned int size, unsigned int iqe_size)
5613 {
5614 	q->intr_params = QINTR_TIMER_IDX(timer_idx) |
5615 			 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
5616 	q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
5617 	q->iqe_len = iqe_size;
5618 	q->size = size;
5619 }
5620 
5621 /*
5622  * Perform default configuration of DMA queues depending on the number and type
5623  * of ports we found and the number of available CPUs.  Most settings can be
5624  * modified by the admin prior to actual use.
5625  */
5626 static void cfg_queues(struct adapter *adap)
5627 {
5628 	struct sge *s = &adap->sge;
5629 	int i, q10g = 0, n10g = 0, qidx = 0;
5630 
5631 	for_each_port(adap, i)
5632 		n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
5633 
5634 	/*
5635 	 * We default to 1 queue per non-10G port and up to # of cores queues
5636 	 * per 10G port.
5637 	 */
5638 	if (n10g)
5639 		q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
5640 	if (q10g > netif_get_num_default_rss_queues())
5641 		q10g = netif_get_num_default_rss_queues();
5642 
5643 	for_each_port(adap, i) {
5644 		struct port_info *pi = adap2pinfo(adap, i);
5645 
5646 		pi->first_qset = qidx;
5647 		pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
5648 		qidx += pi->nqsets;
5649 	}
5650 
5651 	s->ethqsets = qidx;
5652 	s->max_ethqsets = qidx;   /* MSI-X may lower it later */
5653 
5654 	if (is_offload(adap)) {
5655 		/*
5656 		 * For offload we use 1 queue/channel if all ports are up to 1G,
5657 		 * otherwise we divide all available queues amongst the channels
5658 		 * capped by the number of available cores.
5659 		 */
5660 		if (n10g) {
5661 			i = min_t(int, ARRAY_SIZE(s->ofldrxq),
5662 				  num_online_cpus());
5663 			s->ofldqsets = roundup(i, adap->params.nports);
5664 		} else
5665 			s->ofldqsets = adap->params.nports;
5666 		/* For RDMA one Rx queue per channel suffices */
5667 		s->rdmaqs = adap->params.nports;
5668 	}
5669 
5670 	for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5671 		struct sge_eth_rxq *r = &s->ethrxq[i];
5672 
5673 		init_rspq(&r->rspq, 0, 0, 1024, 64);
5674 		r->fl.size = 72;
5675 	}
5676 
5677 	for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5678 		s->ethtxq[i].q.size = 1024;
5679 
5680 	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5681 		s->ctrlq[i].q.size = 512;
5682 
5683 	for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
5684 		s->ofldtxq[i].q.size = 1024;
5685 
5686 	for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
5687 		struct sge_ofld_rxq *r = &s->ofldrxq[i];
5688 
5689 		init_rspq(&r->rspq, 0, 0, 1024, 64);
5690 		r->rspq.uld = CXGB4_ULD_ISCSI;
5691 		r->fl.size = 72;
5692 	}
5693 
5694 	for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
5695 		struct sge_ofld_rxq *r = &s->rdmarxq[i];
5696 
5697 		init_rspq(&r->rspq, 0, 0, 511, 64);
5698 		r->rspq.uld = CXGB4_ULD_RDMA;
5699 		r->fl.size = 72;
5700 	}
5701 
5702 	init_rspq(&s->fw_evtq, 6, 0, 512, 64);
5703 	init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
5704 }
5705 
5706 /*
5707  * Reduce the number of Ethernet queues across all ports to at most n.
5708  * n provides at least one queue per port.
5709  */
5710 static void reduce_ethqs(struct adapter *adap, int n)
5711 {
5712 	int i;
5713 	struct port_info *pi;
5714 
5715 	while (n < adap->sge.ethqsets)
5716 		for_each_port(adap, i) {
5717 			pi = adap2pinfo(adap, i);
5718 			if (pi->nqsets > 1) {
5719 				pi->nqsets--;
5720 				adap->sge.ethqsets--;
5721 				if (adap->sge.ethqsets <= n)
5722 					break;
5723 			}
5724 		}
5725 
5726 	n = 0;
5727 	for_each_port(adap, i) {
5728 		pi = adap2pinfo(adap, i);
5729 		pi->first_qset = n;
5730 		n += pi->nqsets;
5731 	}
5732 }
5733 
5734 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5735 #define EXTRA_VECS 2
5736 
5737 static int enable_msix(struct adapter *adap)
5738 {
5739 	int ofld_need = 0;
5740 	int i, err, want, need;
5741 	struct sge *s = &adap->sge;
5742 	unsigned int nchan = adap->params.nports;
5743 	struct msix_entry entries[MAX_INGQ + 1];
5744 
5745 	for (i = 0; i < ARRAY_SIZE(entries); ++i)
5746 		entries[i].entry = i;
5747 
5748 	want = s->max_ethqsets + EXTRA_VECS;
5749 	if (is_offload(adap)) {
5750 		want += s->rdmaqs + s->ofldqsets;
5751 		/* need nchan for each possible ULD */
5752 		ofld_need = 2 * nchan;
5753 	}
5754 	need = adap->params.nports + EXTRA_VECS + ofld_need;
5755 
5756 	while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
5757 		want = err;
5758 
5759 	if (!err) {
5760 		/*
5761 		 * Distribute available vectors to the various queue groups.
5762 		 * Every group gets its minimum requirement and NIC gets top
5763 		 * priority for leftovers.
5764 		 */
5765 		i = want - EXTRA_VECS - ofld_need;
5766 		if (i < s->max_ethqsets) {
5767 			s->max_ethqsets = i;
5768 			if (i < s->ethqsets)
5769 				reduce_ethqs(adap, i);
5770 		}
5771 		if (is_offload(adap)) {
5772 			i = want - EXTRA_VECS - s->max_ethqsets;
5773 			i -= ofld_need - nchan;
5774 			s->ofldqsets = (i / nchan) * nchan;  /* round down */
5775 		}
5776 		for (i = 0; i < want; ++i)
5777 			adap->msix_info[i].vec = entries[i].vector;
5778 	} else if (err > 0)
5779 		dev_info(adap->pdev_dev,
5780 			 "only %d MSI-X vectors left, not using MSI-X\n", err);
5781 	return err;
5782 }
5783 
5784 #undef EXTRA_VECS
5785 
5786 static int init_rss(struct adapter *adap)
5787 {
5788 	unsigned int i, j;
5789 
5790 	for_each_port(adap, i) {
5791 		struct port_info *pi = adap2pinfo(adap, i);
5792 
5793 		pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5794 		if (!pi->rss)
5795 			return -ENOMEM;
5796 		for (j = 0; j < pi->rss_size; j++)
5797 			pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
5798 	}
5799 	return 0;
5800 }
5801 
5802 static void print_port_info(const struct net_device *dev)
5803 {
5804 	static const char *base[] = {
5805 		"R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
5806 		"KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
5807 	};
5808 
5809 	char buf[80];
5810 	char *bufp = buf;
5811 	const char *spd = "";
5812 	const struct port_info *pi = netdev_priv(dev);
5813 	const struct adapter *adap = pi->adapter;
5814 
5815 	if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
5816 		spd = " 2.5 GT/s";
5817 	else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5818 		spd = " 5 GT/s";
5819 
5820 	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
5821 		bufp += sprintf(bufp, "100/");
5822 	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
5823 		bufp += sprintf(bufp, "1000/");
5824 	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
5825 		bufp += sprintf(bufp, "10G/");
5826 	if (bufp != buf)
5827 		--bufp;
5828 	sprintf(bufp, "BASE-%s", base[pi->port_type]);
5829 
5830 	netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
5831 		    adap->params.vpd.id,
5832 		    CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
5833 		    is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
5834 		    (adap->flags & USING_MSIX) ? " MSI-X" :
5835 		    (adap->flags & USING_MSI) ? " MSI" : "");
5836 	netdev_info(dev, "S/N: %s, E/C: %s\n",
5837 		    adap->params.vpd.sn, adap->params.vpd.ec);
5838 }
5839 
5840 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
5841 {
5842 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
5843 }
5844 
5845 /*
5846  * Free the following resources:
5847  * - memory used for tables
5848  * - MSI/MSI-X
5849  * - net devices
5850  * - resources FW is holding for us
5851  */
5852 static void free_some_resources(struct adapter *adapter)
5853 {
5854 	unsigned int i;
5855 
5856 	t4_free_mem(adapter->l2t);
5857 	t4_free_mem(adapter->tids.tid_tab);
5858 	disable_msi(adapter);
5859 
5860 	for_each_port(adapter, i)
5861 		if (adapter->port[i]) {
5862 			kfree(adap2pinfo(adapter, i)->rss);
5863 			free_netdev(adapter->port[i]);
5864 		}
5865 	if (adapter->flags & FW_OK)
5866 		t4_fw_bye(adapter, adapter->fn);
5867 }
5868 
5869 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
5870 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
5871 		   NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
5872 #define SEGMENT_SIZE 128
5873 
5874 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5875 {
5876 	int func, i, err, s_qpp, qpp, num_seg;
5877 	struct port_info *pi;
5878 	bool highdma = false;
5879 	struct adapter *adapter = NULL;
5880 
5881 	printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
5882 
5883 	err = pci_request_regions(pdev, KBUILD_MODNAME);
5884 	if (err) {
5885 		/* Just info, some other driver may have claimed the device. */
5886 		dev_info(&pdev->dev, "cannot obtain PCI resources\n");
5887 		return err;
5888 	}
5889 
5890 	/* We control everything through one PF */
5891 	func = PCI_FUNC(pdev->devfn);
5892 	if (func != ent->driver_data) {
5893 		pci_save_state(pdev);        /* to restore SR-IOV later */
5894 		goto sriov;
5895 	}
5896 
5897 	err = pci_enable_device(pdev);
5898 	if (err) {
5899 		dev_err(&pdev->dev, "cannot enable PCI device\n");
5900 		goto out_release_regions;
5901 	}
5902 
5903 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
5904 		highdma = true;
5905 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5906 		if (err) {
5907 			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
5908 				"coherent allocations\n");
5909 			goto out_disable_device;
5910 		}
5911 	} else {
5912 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5913 		if (err) {
5914 			dev_err(&pdev->dev, "no usable DMA configuration\n");
5915 			goto out_disable_device;
5916 		}
5917 	}
5918 
5919 	pci_enable_pcie_error_reporting(pdev);
5920 	enable_pcie_relaxed_ordering(pdev);
5921 	pci_set_master(pdev);
5922 	pci_save_state(pdev);
5923 
5924 	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5925 	if (!adapter) {
5926 		err = -ENOMEM;
5927 		goto out_disable_device;
5928 	}
5929 
5930 	/* PCI device has been enabled */
5931 	adapter->flags |= DEV_ENABLED;
5932 
5933 	adapter->regs = pci_ioremap_bar(pdev, 0);
5934 	if (!adapter->regs) {
5935 		dev_err(&pdev->dev, "cannot map device registers\n");
5936 		err = -ENOMEM;
5937 		goto out_free_adapter;
5938 	}
5939 
5940 	adapter->pdev = pdev;
5941 	adapter->pdev_dev = &pdev->dev;
5942 	adapter->mbox = func;
5943 	adapter->fn = func;
5944 	adapter->msg_enable = dflt_msg_enable;
5945 	memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
5946 
5947 	spin_lock_init(&adapter->stats_lock);
5948 	spin_lock_init(&adapter->tid_release_lock);
5949 
5950 	INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
5951 	INIT_WORK(&adapter->db_full_task, process_db_full);
5952 	INIT_WORK(&adapter->db_drop_task, process_db_drop);
5953 
5954 	err = t4_prep_adapter(adapter);
5955 	if (err)
5956 		goto out_unmap_bar0;
5957 
5958 	if (!is_t4(adapter->params.chip)) {
5959 		s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
5960 		qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
5961 		      SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
5962 		num_seg = PAGE_SIZE / SEGMENT_SIZE;
5963 
5964 		/* Each segment size is 128B. Write coalescing is enabled only
5965 		 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5966 		 * queue is less no of segments that can be accommodated in
5967 		 * a page size.
5968 		 */
5969 		if (qpp > num_seg) {
5970 			dev_err(&pdev->dev,
5971 				"Incorrect number of egress queues per page\n");
5972 			err = -EINVAL;
5973 			goto out_unmap_bar0;
5974 		}
5975 		adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
5976 		pci_resource_len(pdev, 2));
5977 		if (!adapter->bar2) {
5978 			dev_err(&pdev->dev, "cannot map device bar2 region\n");
5979 			err = -ENOMEM;
5980 			goto out_unmap_bar0;
5981 		}
5982 	}
5983 
5984 	setup_memwin(adapter);
5985 	err = adap_init0(adapter);
5986 	setup_memwin_rdma(adapter);
5987 	if (err)
5988 		goto out_unmap_bar;
5989 
5990 	for_each_port(adapter, i) {
5991 		struct net_device *netdev;
5992 
5993 		netdev = alloc_etherdev_mq(sizeof(struct port_info),
5994 					   MAX_ETH_QSETS);
5995 		if (!netdev) {
5996 			err = -ENOMEM;
5997 			goto out_free_dev;
5998 		}
5999 
6000 		SET_NETDEV_DEV(netdev, &pdev->dev);
6001 
6002 		adapter->port[i] = netdev;
6003 		pi = netdev_priv(netdev);
6004 		pi->adapter = adapter;
6005 		pi->xact_addr_filt = -1;
6006 		pi->port_id = i;
6007 		netdev->irq = pdev->irq;
6008 
6009 		netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6010 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6011 			NETIF_F_RXCSUM | NETIF_F_RXHASH |
6012 			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
6013 		if (highdma)
6014 			netdev->hw_features |= NETIF_F_HIGHDMA;
6015 		netdev->features |= netdev->hw_features;
6016 		netdev->vlan_features = netdev->features & VLAN_FEAT;
6017 
6018 		netdev->priv_flags |= IFF_UNICAST_FLT;
6019 
6020 		netdev->netdev_ops = &cxgb4_netdev_ops;
6021 		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
6022 	}
6023 
6024 	pci_set_drvdata(pdev, adapter);
6025 
6026 	if (adapter->flags & FW_OK) {
6027 		err = t4_port_init(adapter, func, func, 0);
6028 		if (err)
6029 			goto out_free_dev;
6030 	}
6031 
6032 	/*
6033 	 * Configure queues and allocate tables now, they can be needed as
6034 	 * soon as the first register_netdev completes.
6035 	 */
6036 	cfg_queues(adapter);
6037 
6038 	adapter->l2t = t4_init_l2t();
6039 	if (!adapter->l2t) {
6040 		/* We tolerate a lack of L2T, giving up some functionality */
6041 		dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6042 		adapter->params.offload = 0;
6043 	}
6044 
6045 	if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6046 		dev_warn(&pdev->dev, "could not allocate TID table, "
6047 			 "continuing\n");
6048 		adapter->params.offload = 0;
6049 	}
6050 
6051 	/* See what interrupts we'll be using */
6052 	if (msi > 1 && enable_msix(adapter) == 0)
6053 		adapter->flags |= USING_MSIX;
6054 	else if (msi > 0 && pci_enable_msi(pdev) == 0)
6055 		adapter->flags |= USING_MSI;
6056 
6057 	err = init_rss(adapter);
6058 	if (err)
6059 		goto out_free_dev;
6060 
6061 	/*
6062 	 * The card is now ready to go.  If any errors occur during device
6063 	 * registration we do not fail the whole card but rather proceed only
6064 	 * with the ports we manage to register successfully.  However we must
6065 	 * register at least one net device.
6066 	 */
6067 	for_each_port(adapter, i) {
6068 		pi = adap2pinfo(adapter, i);
6069 		netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6070 		netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6071 
6072 		err = register_netdev(adapter->port[i]);
6073 		if (err)
6074 			break;
6075 		adapter->chan_map[pi->tx_chan] = i;
6076 		print_port_info(adapter->port[i]);
6077 	}
6078 	if (i == 0) {
6079 		dev_err(&pdev->dev, "could not register any net devices\n");
6080 		goto out_free_dev;
6081 	}
6082 	if (err) {
6083 		dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6084 		err = 0;
6085 	}
6086 
6087 	if (cxgb4_debugfs_root) {
6088 		adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6089 							   cxgb4_debugfs_root);
6090 		setup_debugfs(adapter);
6091 	}
6092 
6093 	/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6094 	pdev->needs_freset = 1;
6095 
6096 	if (is_offload(adapter))
6097 		attach_ulds(adapter);
6098 
6099 sriov:
6100 #ifdef CONFIG_PCI_IOV
6101 	if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
6102 		if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6103 			dev_info(&pdev->dev,
6104 				 "instantiated %u virtual functions\n",
6105 				 num_vf[func]);
6106 #endif
6107 	return 0;
6108 
6109  out_free_dev:
6110 	free_some_resources(adapter);
6111  out_unmap_bar:
6112 	if (!is_t4(adapter->params.chip))
6113 		iounmap(adapter->bar2);
6114  out_unmap_bar0:
6115 	iounmap(adapter->regs);
6116  out_free_adapter:
6117 	kfree(adapter);
6118  out_disable_device:
6119 	pci_disable_pcie_error_reporting(pdev);
6120 	pci_disable_device(pdev);
6121  out_release_regions:
6122 	pci_release_regions(pdev);
6123 	return err;
6124 }
6125 
6126 static void remove_one(struct pci_dev *pdev)
6127 {
6128 	struct adapter *adapter = pci_get_drvdata(pdev);
6129 
6130 #ifdef CONFIG_PCI_IOV
6131 	pci_disable_sriov(pdev);
6132 
6133 #endif
6134 
6135 	if (adapter) {
6136 		int i;
6137 
6138 		if (is_offload(adapter))
6139 			detach_ulds(adapter);
6140 
6141 		for_each_port(adapter, i)
6142 			if (adapter->port[i]->reg_state == NETREG_REGISTERED)
6143 				unregister_netdev(adapter->port[i]);
6144 
6145 		if (adapter->debugfs_root)
6146 			debugfs_remove_recursive(adapter->debugfs_root);
6147 
6148 		/* If we allocated filters, free up state associated with any
6149 		 * valid filters ...
6150 		 */
6151 		if (adapter->tids.ftid_tab) {
6152 			struct filter_entry *f = &adapter->tids.ftid_tab[0];
6153 			for (i = 0; i < (adapter->tids.nftids +
6154 					adapter->tids.nsftids); i++, f++)
6155 				if (f->valid)
6156 					clear_filter(adapter, f);
6157 		}
6158 
6159 		if (adapter->flags & FULL_INIT_DONE)
6160 			cxgb_down(adapter);
6161 
6162 		free_some_resources(adapter);
6163 		iounmap(adapter->regs);
6164 		if (!is_t4(adapter->params.chip))
6165 			iounmap(adapter->bar2);
6166 		pci_disable_pcie_error_reporting(pdev);
6167 		if ((adapter->flags & DEV_ENABLED)) {
6168 			pci_disable_device(pdev);
6169 			adapter->flags &= ~DEV_ENABLED;
6170 		}
6171 		pci_release_regions(pdev);
6172 		kfree(adapter);
6173 	} else
6174 		pci_release_regions(pdev);
6175 }
6176 
6177 static struct pci_driver cxgb4_driver = {
6178 	.name     = KBUILD_MODNAME,
6179 	.id_table = cxgb4_pci_tbl,
6180 	.probe    = init_one,
6181 	.remove   = remove_one,
6182 	.err_handler = &cxgb4_eeh,
6183 };
6184 
6185 static int __init cxgb4_init_module(void)
6186 {
6187 	int ret;
6188 
6189 	workq = create_singlethread_workqueue("cxgb4");
6190 	if (!workq)
6191 		return -ENOMEM;
6192 
6193 	/* Debugfs support is optional, just warn if this fails */
6194 	cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6195 	if (!cxgb4_debugfs_root)
6196 		pr_warn("could not create debugfs entry, continuing\n");
6197 
6198 	ret = pci_register_driver(&cxgb4_driver);
6199 	if (ret < 0) {
6200 		debugfs_remove(cxgb4_debugfs_root);
6201 		destroy_workqueue(workq);
6202 	}
6203 
6204 	register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6205 
6206 	return ret;
6207 }
6208 
6209 static void __exit cxgb4_cleanup_module(void)
6210 {
6211 	unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6212 	pci_unregister_driver(&cxgb4_driver);
6213 	debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
6214 	flush_workqueue(workq);
6215 	destroy_workqueue(workq);
6216 }
6217 
6218 module_init(cxgb4_init_module);
6219 module_exit(cxgb4_cleanup_module);
6220