1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36 
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
44 #include <linux/if.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <net/bonding.h>
65 #include <linux/uaccess.h>
66 #include <linux/crash_dump.h>
67 #include <net/udp_tunnel.h>
68 
69 #include "cxgb4.h"
70 #include "cxgb4_filter.h"
71 #include "t4_regs.h"
72 #include "t4_values.h"
73 #include "t4_msg.h"
74 #include "t4fw_api.h"
75 #include "t4fw_version.h"
76 #include "cxgb4_dcb.h"
77 #include "srq.h"
78 #include "cxgb4_debugfs.h"
79 #include "clip_tbl.h"
80 #include "l2t.h"
81 #include "smt.h"
82 #include "sched.h"
83 #include "cxgb4_tc_u32.h"
84 #include "cxgb4_tc_flower.h"
85 #include "cxgb4_ptp.h"
86 #include "cxgb4_cudbg.h"
87 
88 char cxgb4_driver_name[] = KBUILD_MODNAME;
89 
90 #ifdef DRV_VERSION
91 #undef DRV_VERSION
92 #endif
93 #define DRV_VERSION "2.0.0-ko"
94 const char cxgb4_driver_version[] = DRV_VERSION;
95 #define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
96 
97 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
98 			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
99 			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
100 
101 /* Macros needed to support the PCI Device ID Table ...
102  */
103 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
104 	static const struct pci_device_id cxgb4_pci_tbl[] = {
105 #define CXGB4_UNIFIED_PF 0x4
106 
107 #define CH_PCI_DEVICE_ID_FUNCTION CXGB4_UNIFIED_PF
108 
109 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
110  * called for both.
111  */
112 #define CH_PCI_DEVICE_ID_FUNCTION2 0x0
113 
114 #define CH_PCI_ID_TABLE_ENTRY(devid) \
115 		{PCI_VDEVICE(CHELSIO, (devid)), CXGB4_UNIFIED_PF}
116 
117 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
118 		{ 0, } \
119 	}
120 
121 #include "t4_pci_id_tbl.h"
122 
123 #define FW4_FNAME "cxgb4/t4fw.bin"
124 #define FW5_FNAME "cxgb4/t5fw.bin"
125 #define FW6_FNAME "cxgb4/t6fw.bin"
126 #define FW4_CFNAME "cxgb4/t4-config.txt"
127 #define FW5_CFNAME "cxgb4/t5-config.txt"
128 #define FW6_CFNAME "cxgb4/t6-config.txt"
129 #define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
130 #define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
131 #define PHY_AQ1202_DEVICEID 0x4409
132 #define PHY_BCM84834_DEVICEID 0x4486
133 
134 MODULE_DESCRIPTION(DRV_DESC);
135 MODULE_AUTHOR("Chelsio Communications");
136 MODULE_LICENSE("Dual BSD/GPL");
137 MODULE_VERSION(DRV_VERSION);
138 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
139 MODULE_FIRMWARE(FW4_FNAME);
140 MODULE_FIRMWARE(FW5_FNAME);
141 MODULE_FIRMWARE(FW6_FNAME);
142 
143 /*
144  * The driver uses the best interrupt scheme available on a platform in the
145  * order MSI-X, MSI, legacy INTx interrupts.  This parameter determines which
146  * of these schemes the driver may consider as follows:
147  *
148  * msi = 2: choose from among all three options
149  * msi = 1: only consider MSI and INTx interrupts
150  * msi = 0: force INTx interrupts
151  */
152 static int msi = 2;
153 
154 module_param(msi, int, 0644);
155 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
156 
157 /*
158  * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
159  * offset by 2 bytes in order to have the IP headers line up on 4-byte
160  * boundaries.  This is a requirement for many architectures which will throw
161  * a machine check fault if an attempt is made to access one of the 4-byte IP
162  * header fields on a non-4-byte boundary.  And it's a major performance issue
163  * even on some architectures which allow it like some implementations of the
164  * x86 ISA.  However, some architectures don't mind this and for some very
165  * edge-case performance sensitive applications (like forwarding large volumes
166  * of small packets), setting this DMA offset to 0 will decrease the number of
167  * PCI-E Bus transfers enough to measurably affect performance.
168  */
169 static int rx_dma_offset = 2;
170 
171 /* TX Queue select used to determine what algorithm to use for selecting TX
172  * queue. Select between the kernel provided function (select_queue=0) or user
173  * cxgb_select_queue function (select_queue=1)
174  *
175  * Default: select_queue=0
176  */
177 static int select_queue;
178 module_param(select_queue, int, 0644);
179 MODULE_PARM_DESC(select_queue,
180 		 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
181 
182 static struct dentry *cxgb4_debugfs_root;
183 
184 LIST_HEAD(adapter_list);
185 DEFINE_MUTEX(uld_mutex);
186 
187 static void link_report(struct net_device *dev)
188 {
189 	if (!netif_carrier_ok(dev))
190 		netdev_info(dev, "link down\n");
191 	else {
192 		static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
193 
194 		const char *s;
195 		const struct port_info *p = netdev_priv(dev);
196 
197 		switch (p->link_cfg.speed) {
198 		case 100:
199 			s = "100Mbps";
200 			break;
201 		case 1000:
202 			s = "1Gbps";
203 			break;
204 		case 10000:
205 			s = "10Gbps";
206 			break;
207 		case 25000:
208 			s = "25Gbps";
209 			break;
210 		case 40000:
211 			s = "40Gbps";
212 			break;
213 		case 50000:
214 			s = "50Gbps";
215 			break;
216 		case 100000:
217 			s = "100Gbps";
218 			break;
219 		default:
220 			pr_info("%s: unsupported speed: %d\n",
221 				dev->name, p->link_cfg.speed);
222 			return;
223 		}
224 
225 		netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
226 			    fc[p->link_cfg.fc]);
227 	}
228 }
229 
230 #ifdef CONFIG_CHELSIO_T4_DCB
231 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
232 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
233 {
234 	struct port_info *pi = netdev_priv(dev);
235 	struct adapter *adap = pi->adapter;
236 	struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
237 	int i;
238 
239 	/* We use a simple mapping of Port TX Queue Index to DCB
240 	 * Priority when we're enabling DCB.
241 	 */
242 	for (i = 0; i < pi->nqsets; i++, txq++) {
243 		u32 name, value;
244 		int err;
245 
246 		name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
247 			FW_PARAMS_PARAM_X_V(
248 				FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
249 			FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
250 		value = enable ? i : 0xffffffff;
251 
252 		/* Since we can be called while atomic (from "interrupt
253 		 * level") we need to issue the Set Parameters Commannd
254 		 * without sleeping (timeout < 0).
255 		 */
256 		err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
257 					    &name, &value,
258 					    -FW_CMD_MAX_TIMEOUT);
259 
260 		if (err)
261 			dev_err(adap->pdev_dev,
262 				"Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
263 				enable ? "set" : "unset", pi->port_id, i, -err);
264 		else
265 			txq->dcb_prio = enable ? value : 0;
266 	}
267 }
268 
269 int cxgb4_dcb_enabled(const struct net_device *dev)
270 {
271 	struct port_info *pi = netdev_priv(dev);
272 
273 	if (!pi->dcb.enabled)
274 		return 0;
275 
276 	return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
277 		(pi->dcb.state == CXGB4_DCB_STATE_HOST));
278 }
279 #endif /* CONFIG_CHELSIO_T4_DCB */
280 
281 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
282 {
283 	struct net_device *dev = adapter->port[port_id];
284 
285 	/* Skip changes from disabled ports. */
286 	if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
287 		if (link_stat)
288 			netif_carrier_on(dev);
289 		else {
290 #ifdef CONFIG_CHELSIO_T4_DCB
291 			if (cxgb4_dcb_enabled(dev)) {
292 				cxgb4_dcb_reset(dev);
293 				dcb_tx_queue_prio_enable(dev, false);
294 			}
295 #endif /* CONFIG_CHELSIO_T4_DCB */
296 			netif_carrier_off(dev);
297 		}
298 
299 		link_report(dev);
300 	}
301 }
302 
303 void t4_os_portmod_changed(struct adapter *adap, int port_id)
304 {
305 	static const char *mod_str[] = {
306 		NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
307 	};
308 
309 	struct net_device *dev = adap->port[port_id];
310 	struct port_info *pi = netdev_priv(dev);
311 
312 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
313 		netdev_info(dev, "port module unplugged\n");
314 	else if (pi->mod_type < ARRAY_SIZE(mod_str))
315 		netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
316 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
317 		netdev_info(dev, "%s: unsupported port module inserted\n",
318 			    dev->name);
319 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
320 		netdev_info(dev, "%s: unknown port module inserted\n",
321 			    dev->name);
322 	else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
323 		netdev_info(dev, "%s: transceiver module error\n", dev->name);
324 	else
325 		netdev_info(dev, "%s: unknown module type %d inserted\n",
326 			    dev->name, pi->mod_type);
327 
328 	/* If the interface is running, then we'll need any "sticky" Link
329 	 * Parameters redone with a new Transceiver Module.
330 	 */
331 	pi->link_cfg.redo_l1cfg = netif_running(dev);
332 }
333 
334 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
335 module_param(dbfifo_int_thresh, int, 0644);
336 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
337 
338 /*
339  * usecs to sleep while draining the dbfifo
340  */
341 static int dbfifo_drain_delay = 1000;
342 module_param(dbfifo_drain_delay, int, 0644);
343 MODULE_PARM_DESC(dbfifo_drain_delay,
344 		 "usecs to sleep while draining the dbfifo");
345 
346 static inline int cxgb4_set_addr_hash(struct port_info *pi)
347 {
348 	struct adapter *adap = pi->adapter;
349 	u64 vec = 0;
350 	bool ucast = false;
351 	struct hash_mac_addr *entry;
352 
353 	/* Calculate the hash vector for the updated list and program it */
354 	list_for_each_entry(entry, &adap->mac_hlist, list) {
355 		ucast |= is_unicast_ether_addr(entry->addr);
356 		vec |= (1ULL << hash_mac_addr(entry->addr));
357 	}
358 	return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast,
359 				vec, false);
360 }
361 
362 static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
363 {
364 	struct port_info *pi = netdev_priv(netdev);
365 	struct adapter *adap = pi->adapter;
366 	int ret;
367 	u64 mhash = 0;
368 	u64 uhash = 0;
369 	bool free = false;
370 	bool ucast = is_unicast_ether_addr(mac_addr);
371 	const u8 *maclist[1] = {mac_addr};
372 	struct hash_mac_addr *new_entry;
373 
374 	ret = t4_alloc_mac_filt(adap, adap->mbox, pi->viid, free, 1, maclist,
375 				NULL, ucast ? &uhash : &mhash, false);
376 	if (ret < 0)
377 		goto out;
378 	/* if hash != 0, then add the addr to hash addr list
379 	 * so on the end we will calculate the hash for the
380 	 * list and program it
381 	 */
382 	if (uhash || mhash) {
383 		new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
384 		if (!new_entry)
385 			return -ENOMEM;
386 		ether_addr_copy(new_entry->addr, mac_addr);
387 		list_add_tail(&new_entry->list, &adap->mac_hlist);
388 		ret = cxgb4_set_addr_hash(pi);
389 	}
390 out:
391 	return ret < 0 ? ret : 0;
392 }
393 
394 static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
395 {
396 	struct port_info *pi = netdev_priv(netdev);
397 	struct adapter *adap = pi->adapter;
398 	int ret;
399 	const u8 *maclist[1] = {mac_addr};
400 	struct hash_mac_addr *entry, *tmp;
401 
402 	/* If the MAC address to be removed is in the hash addr
403 	 * list, delete it from the list and update hash vector
404 	 */
405 	list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) {
406 		if (ether_addr_equal(entry->addr, mac_addr)) {
407 			list_del(&entry->list);
408 			kfree(entry);
409 			return cxgb4_set_addr_hash(pi);
410 		}
411 	}
412 
413 	ret = t4_free_mac_filt(adap, adap->mbox, pi->viid, 1, maclist, false);
414 	return ret < 0 ? -EINVAL : 0;
415 }
416 
417 /*
418  * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
419  * If @mtu is -1 it is left unchanged.
420  */
421 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
422 {
423 	struct port_info *pi = netdev_priv(dev);
424 	struct adapter *adapter = pi->adapter;
425 
426 	__dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
427 	__dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
428 
429 	return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu,
430 			     (dev->flags & IFF_PROMISC) ? 1 : 0,
431 			     (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
432 			     sleep_ok);
433 }
434 
435 /**
436  *	link_start - enable a port
437  *	@dev: the port to enable
438  *
439  *	Performs the MAC and PHY actions needed to enable a port.
440  */
441 static int link_start(struct net_device *dev)
442 {
443 	int ret;
444 	struct port_info *pi = netdev_priv(dev);
445 	unsigned int mb = pi->adapter->pf;
446 
447 	/*
448 	 * We do not set address filters and promiscuity here, the stack does
449 	 * that step explicitly.
450 	 */
451 	ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
452 			    !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
453 	if (ret == 0) {
454 		ret = t4_change_mac(pi->adapter, mb, pi->viid,
455 				    pi->xact_addr_filt, dev->dev_addr, true,
456 				    true);
457 		if (ret >= 0) {
458 			pi->xact_addr_filt = ret;
459 			ret = 0;
460 		}
461 	}
462 	if (ret == 0)
463 		ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
464 				    &pi->link_cfg);
465 	if (ret == 0) {
466 		local_bh_disable();
467 		ret = t4_enable_pi_params(pi->adapter, mb, pi, true,
468 					  true, CXGB4_DCB_ENABLED);
469 		local_bh_enable();
470 	}
471 
472 	return ret;
473 }
474 
475 #ifdef CONFIG_CHELSIO_T4_DCB
476 /* Handle a Data Center Bridging update message from the firmware. */
477 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
478 {
479 	int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
480 	struct net_device *dev = adap->port[adap->chan_map[port]];
481 	int old_dcb_enabled = cxgb4_dcb_enabled(dev);
482 	int new_dcb_enabled;
483 
484 	cxgb4_dcb_handle_fw_update(adap, pcmd);
485 	new_dcb_enabled = cxgb4_dcb_enabled(dev);
486 
487 	/* If the DCB has become enabled or disabled on the port then we're
488 	 * going to need to set up/tear down DCB Priority parameters for the
489 	 * TX Queues associated with the port.
490 	 */
491 	if (new_dcb_enabled != old_dcb_enabled)
492 		dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
493 }
494 #endif /* CONFIG_CHELSIO_T4_DCB */
495 
496 /* Response queue handler for the FW event queue.
497  */
498 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
499 			  const struct pkt_gl *gl)
500 {
501 	u8 opcode = ((const struct rss_header *)rsp)->opcode;
502 
503 	rsp++;                                          /* skip RSS header */
504 
505 	/* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
506 	 */
507 	if (unlikely(opcode == CPL_FW4_MSG &&
508 	   ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
509 		rsp++;
510 		opcode = ((const struct rss_header *)rsp)->opcode;
511 		rsp++;
512 		if (opcode != CPL_SGE_EGR_UPDATE) {
513 			dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
514 				, opcode);
515 			goto out;
516 		}
517 	}
518 
519 	if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
520 		const struct cpl_sge_egr_update *p = (void *)rsp;
521 		unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
522 		struct sge_txq *txq;
523 
524 		txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
525 		txq->restarts++;
526 		if (txq->q_type == CXGB4_TXQ_ETH) {
527 			struct sge_eth_txq *eq;
528 
529 			eq = container_of(txq, struct sge_eth_txq, q);
530 			netif_tx_wake_queue(eq->txq);
531 		} else {
532 			struct sge_uld_txq *oq;
533 
534 			oq = container_of(txq, struct sge_uld_txq, q);
535 			tasklet_schedule(&oq->qresume_tsk);
536 		}
537 	} else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
538 		const struct cpl_fw6_msg *p = (void *)rsp;
539 
540 #ifdef CONFIG_CHELSIO_T4_DCB
541 		const struct fw_port_cmd *pcmd = (const void *)p->data;
542 		unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
543 		unsigned int action =
544 			FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
545 
546 		if (cmd == FW_PORT_CMD &&
547 		    (action == FW_PORT_ACTION_GET_PORT_INFO ||
548 		     action == FW_PORT_ACTION_GET_PORT_INFO32)) {
549 			int port = FW_PORT_CMD_PORTID_G(
550 					be32_to_cpu(pcmd->op_to_portid));
551 			struct net_device *dev;
552 			int dcbxdis, state_input;
553 
554 			dev = q->adap->port[q->adap->chan_map[port]];
555 			dcbxdis = (action == FW_PORT_ACTION_GET_PORT_INFO
556 			  ? !!(pcmd->u.info.dcbxdis_pkd & FW_PORT_CMD_DCBXDIS_F)
557 			  : !!(be32_to_cpu(pcmd->u.info32.lstatus32_to_cbllen32)
558 			       & FW_PORT_CMD_DCBXDIS32_F));
559 			state_input = (dcbxdis
560 				       ? CXGB4_DCB_INPUT_FW_DISABLED
561 				       : CXGB4_DCB_INPUT_FW_ENABLED);
562 
563 			cxgb4_dcb_state_fsm(dev, state_input);
564 		}
565 
566 		if (cmd == FW_PORT_CMD &&
567 		    action == FW_PORT_ACTION_L2_DCB_CFG)
568 			dcb_rpl(q->adap, pcmd);
569 		else
570 #endif
571 			if (p->type == 0)
572 				t4_handle_fw_rpl(q->adap, p->data);
573 	} else if (opcode == CPL_L2T_WRITE_RPL) {
574 		const struct cpl_l2t_write_rpl *p = (void *)rsp;
575 
576 		do_l2t_write_rpl(q->adap, p);
577 	} else if (opcode == CPL_SMT_WRITE_RPL) {
578 		const struct cpl_smt_write_rpl *p = (void *)rsp;
579 
580 		do_smt_write_rpl(q->adap, p);
581 	} else if (opcode == CPL_SET_TCB_RPL) {
582 		const struct cpl_set_tcb_rpl *p = (void *)rsp;
583 
584 		filter_rpl(q->adap, p);
585 	} else if (opcode == CPL_ACT_OPEN_RPL) {
586 		const struct cpl_act_open_rpl *p = (void *)rsp;
587 
588 		hash_filter_rpl(q->adap, p);
589 	} else if (opcode == CPL_ABORT_RPL_RSS) {
590 		const struct cpl_abort_rpl_rss *p = (void *)rsp;
591 
592 		hash_del_filter_rpl(q->adap, p);
593 	} else if (opcode == CPL_SRQ_TABLE_RPL) {
594 		const struct cpl_srq_table_rpl *p = (void *)rsp;
595 
596 		do_srq_table_rpl(q->adap, p);
597 	} else
598 		dev_err(q->adap->pdev_dev,
599 			"unexpected CPL %#x on FW event queue\n", opcode);
600 out:
601 	return 0;
602 }
603 
604 static void disable_msi(struct adapter *adapter)
605 {
606 	if (adapter->flags & USING_MSIX) {
607 		pci_disable_msix(adapter->pdev);
608 		adapter->flags &= ~USING_MSIX;
609 	} else if (adapter->flags & USING_MSI) {
610 		pci_disable_msi(adapter->pdev);
611 		adapter->flags &= ~USING_MSI;
612 	}
613 }
614 
615 /*
616  * Interrupt handler for non-data events used with MSI-X.
617  */
618 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
619 {
620 	struct adapter *adap = cookie;
621 	u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
622 
623 	if (v & PFSW_F) {
624 		adap->swintr = 1;
625 		t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
626 	}
627 	if (adap->flags & MASTER_PF)
628 		t4_slow_intr_handler(adap);
629 	return IRQ_HANDLED;
630 }
631 
632 /*
633  * Name the MSI-X interrupts.
634  */
635 static void name_msix_vecs(struct adapter *adap)
636 {
637 	int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
638 
639 	/* non-data interrupts */
640 	snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
641 
642 	/* FW events */
643 	snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
644 		 adap->port[0]->name);
645 
646 	/* Ethernet queues */
647 	for_each_port(adap, j) {
648 		struct net_device *d = adap->port[j];
649 		const struct port_info *pi = netdev_priv(d);
650 
651 		for (i = 0; i < pi->nqsets; i++, msi_idx++)
652 			snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
653 				 d->name, i);
654 	}
655 }
656 
657 static int request_msix_queue_irqs(struct adapter *adap)
658 {
659 	struct sge *s = &adap->sge;
660 	int err, ethqidx;
661 	int msi_index = 2;
662 
663 	err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
664 			  adap->msix_info[1].desc, &s->fw_evtq);
665 	if (err)
666 		return err;
667 
668 	for_each_ethrxq(s, ethqidx) {
669 		err = request_irq(adap->msix_info[msi_index].vec,
670 				  t4_sge_intr_msix, 0,
671 				  adap->msix_info[msi_index].desc,
672 				  &s->ethrxq[ethqidx].rspq);
673 		if (err)
674 			goto unwind;
675 		msi_index++;
676 	}
677 	return 0;
678 
679 unwind:
680 	while (--ethqidx >= 0)
681 		free_irq(adap->msix_info[--msi_index].vec,
682 			 &s->ethrxq[ethqidx].rspq);
683 	free_irq(adap->msix_info[1].vec, &s->fw_evtq);
684 	return err;
685 }
686 
687 static void free_msix_queue_irqs(struct adapter *adap)
688 {
689 	int i, msi_index = 2;
690 	struct sge *s = &adap->sge;
691 
692 	free_irq(adap->msix_info[1].vec, &s->fw_evtq);
693 	for_each_ethrxq(s, i)
694 		free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
695 }
696 
697 /**
698  *	cxgb4_write_rss - write the RSS table for a given port
699  *	@pi: the port
700  *	@queues: array of queue indices for RSS
701  *
702  *	Sets up the portion of the HW RSS table for the port's VI to distribute
703  *	packets to the Rx queues in @queues.
704  *	Should never be called before setting up sge eth rx queues
705  */
706 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
707 {
708 	u16 *rss;
709 	int i, err;
710 	struct adapter *adapter = pi->adapter;
711 	const struct sge_eth_rxq *rxq;
712 
713 	rxq = &adapter->sge.ethrxq[pi->first_qset];
714 	rss = kmalloc_array(pi->rss_size, sizeof(u16), GFP_KERNEL);
715 	if (!rss)
716 		return -ENOMEM;
717 
718 	/* map the queue indices to queue ids */
719 	for (i = 0; i < pi->rss_size; i++, queues++)
720 		rss[i] = rxq[*queues].rspq.abs_id;
721 
722 	err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
723 				  pi->rss_size, rss, pi->rss_size);
724 	/* If Tunnel All Lookup isn't specified in the global RSS
725 	 * Configuration, then we need to specify a default Ingress
726 	 * Queue for any ingress packets which aren't hashed.  We'll
727 	 * use our first ingress queue ...
728 	 */
729 	if (!err)
730 		err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
731 				       FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
732 				       FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
733 				       FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
734 				       FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
735 				       FW_RSS_VI_CONFIG_CMD_UDPEN_F,
736 				       rss[0]);
737 	kfree(rss);
738 	return err;
739 }
740 
741 /**
742  *	setup_rss - configure RSS
743  *	@adap: the adapter
744  *
745  *	Sets up RSS for each port.
746  */
747 static int setup_rss(struct adapter *adap)
748 {
749 	int i, j, err;
750 
751 	for_each_port(adap, i) {
752 		const struct port_info *pi = adap2pinfo(adap, i);
753 
754 		/* Fill default values with equal distribution */
755 		for (j = 0; j < pi->rss_size; j++)
756 			pi->rss[j] = j % pi->nqsets;
757 
758 		err = cxgb4_write_rss(pi, pi->rss);
759 		if (err)
760 			return err;
761 	}
762 	return 0;
763 }
764 
765 /*
766  * Return the channel of the ingress queue with the given qid.
767  */
768 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
769 {
770 	qid -= p->ingr_start;
771 	return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
772 }
773 
774 /*
775  * Wait until all NAPI handlers are descheduled.
776  */
777 static void quiesce_rx(struct adapter *adap)
778 {
779 	int i;
780 
781 	for (i = 0; i < adap->sge.ingr_sz; i++) {
782 		struct sge_rspq *q = adap->sge.ingr_map[i];
783 
784 		if (q && q->handler)
785 			napi_disable(&q->napi);
786 	}
787 }
788 
789 /* Disable interrupt and napi handler */
790 static void disable_interrupts(struct adapter *adap)
791 {
792 	if (adap->flags & FULL_INIT_DONE) {
793 		t4_intr_disable(adap);
794 		if (adap->flags & USING_MSIX) {
795 			free_msix_queue_irqs(adap);
796 			free_irq(adap->msix_info[0].vec, adap);
797 		} else {
798 			free_irq(adap->pdev->irq, adap);
799 		}
800 		quiesce_rx(adap);
801 	}
802 }
803 
804 /*
805  * Enable NAPI scheduling and interrupt generation for all Rx queues.
806  */
807 static void enable_rx(struct adapter *adap)
808 {
809 	int i;
810 
811 	for (i = 0; i < adap->sge.ingr_sz; i++) {
812 		struct sge_rspq *q = adap->sge.ingr_map[i];
813 
814 		if (!q)
815 			continue;
816 		if (q->handler)
817 			napi_enable(&q->napi);
818 
819 		/* 0-increment GTS to start the timer and enable interrupts */
820 		t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
821 			     SEINTARM_V(q->intr_params) |
822 			     INGRESSQID_V(q->cntxt_id));
823 	}
824 }
825 
826 
827 static int setup_fw_sge_queues(struct adapter *adap)
828 {
829 	struct sge *s = &adap->sge;
830 	int err = 0;
831 
832 	bitmap_zero(s->starving_fl, s->egr_sz);
833 	bitmap_zero(s->txq_maperr, s->egr_sz);
834 
835 	if (adap->flags & USING_MSIX)
836 		adap->msi_idx = 1;         /* vector 0 is for non-queue interrupts */
837 	else {
838 		err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
839 				       NULL, NULL, NULL, -1);
840 		if (err)
841 			return err;
842 		adap->msi_idx = -((int)s->intrq.abs_id + 1);
843 	}
844 
845 	err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
846 			       adap->msi_idx, NULL, fwevtq_handler, NULL, -1);
847 	return err;
848 }
849 
850 /**
851  *	setup_sge_queues - configure SGE Tx/Rx/response queues
852  *	@adap: the adapter
853  *
854  *	Determines how many sets of SGE queues to use and initializes them.
855  *	We support multiple queue sets per port if we have MSI-X, otherwise
856  *	just one queue set per port.
857  */
858 static int setup_sge_queues(struct adapter *adap)
859 {
860 	int err, i, j;
861 	struct sge *s = &adap->sge;
862 	struct sge_uld_rxq_info *rxq_info = NULL;
863 	unsigned int cmplqid = 0;
864 
865 	if (is_uld(adap))
866 		rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
867 
868 	for_each_port(adap, i) {
869 		struct net_device *dev = adap->port[i];
870 		struct port_info *pi = netdev_priv(dev);
871 		struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
872 		struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
873 
874 		for (j = 0; j < pi->nqsets; j++, q++) {
875 			if (adap->msi_idx > 0)
876 				adap->msi_idx++;
877 			err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
878 					       adap->msi_idx, &q->fl,
879 					       t4_ethrx_handler,
880 					       NULL,
881 					       t4_get_tp_ch_map(adap,
882 								pi->tx_chan));
883 			if (err)
884 				goto freeout;
885 			q->rspq.idx = j;
886 			memset(&q->stats, 0, sizeof(q->stats));
887 		}
888 		for (j = 0; j < pi->nqsets; j++, t++) {
889 			err = t4_sge_alloc_eth_txq(adap, t, dev,
890 					netdev_get_tx_queue(dev, j),
891 					s->fw_evtq.cntxt_id);
892 			if (err)
893 				goto freeout;
894 		}
895 	}
896 
897 	for_each_port(adap, i) {
898 		/* Note that cmplqid below is 0 if we don't
899 		 * have RDMA queues, and that's the right value.
900 		 */
901 		if (rxq_info)
902 			cmplqid	= rxq_info->uldrxq[i].rspq.cntxt_id;
903 
904 		err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
905 					    s->fw_evtq.cntxt_id, cmplqid);
906 		if (err)
907 			goto freeout;
908 	}
909 
910 	if (!is_t4(adap->params.chip)) {
911 		err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0],
912 					   netdev_get_tx_queue(adap->port[0], 0)
913 					   , s->fw_evtq.cntxt_id);
914 		if (err)
915 			goto freeout;
916 	}
917 
918 	t4_write_reg(adap, is_t4(adap->params.chip) ?
919 				MPS_TRC_RSS_CONTROL_A :
920 				MPS_T5_TRC_RSS_CONTROL_A,
921 		     RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
922 		     QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
923 	return 0;
924 freeout:
925 	dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err);
926 	t4_free_sge_resources(adap);
927 	return err;
928 }
929 
930 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
931 			     struct net_device *sb_dev,
932 			     select_queue_fallback_t fallback)
933 {
934 	int txq;
935 
936 #ifdef CONFIG_CHELSIO_T4_DCB
937 	/* If a Data Center Bridging has been successfully negotiated on this
938 	 * link then we'll use the skb's priority to map it to a TX Queue.
939 	 * The skb's priority is determined via the VLAN Tag Priority Code
940 	 * Point field.
941 	 */
942 	if (cxgb4_dcb_enabled(dev) && !is_kdump_kernel()) {
943 		u16 vlan_tci;
944 		int err;
945 
946 		err = vlan_get_tag(skb, &vlan_tci);
947 		if (unlikely(err)) {
948 			if (net_ratelimit())
949 				netdev_warn(dev,
950 					    "TX Packet without VLAN Tag on DCB Link\n");
951 			txq = 0;
952 		} else {
953 			txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
954 #ifdef CONFIG_CHELSIO_T4_FCOE
955 			if (skb->protocol == htons(ETH_P_FCOE))
956 				txq = skb->priority & 0x7;
957 #endif /* CONFIG_CHELSIO_T4_FCOE */
958 		}
959 		return txq;
960 	}
961 #endif /* CONFIG_CHELSIO_T4_DCB */
962 
963 	if (select_queue) {
964 		txq = (skb_rx_queue_recorded(skb)
965 			? skb_get_rx_queue(skb)
966 			: smp_processor_id());
967 
968 		while (unlikely(txq >= dev->real_num_tx_queues))
969 			txq -= dev->real_num_tx_queues;
970 
971 		return txq;
972 	}
973 
974 	return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
975 }
976 
977 static int closest_timer(const struct sge *s, int time)
978 {
979 	int i, delta, match = 0, min_delta = INT_MAX;
980 
981 	for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
982 		delta = time - s->timer_val[i];
983 		if (delta < 0)
984 			delta = -delta;
985 		if (delta < min_delta) {
986 			min_delta = delta;
987 			match = i;
988 		}
989 	}
990 	return match;
991 }
992 
993 static int closest_thres(const struct sge *s, int thres)
994 {
995 	int i, delta, match = 0, min_delta = INT_MAX;
996 
997 	for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
998 		delta = thres - s->counter_val[i];
999 		if (delta < 0)
1000 			delta = -delta;
1001 		if (delta < min_delta) {
1002 			min_delta = delta;
1003 			match = i;
1004 		}
1005 	}
1006 	return match;
1007 }
1008 
1009 /**
1010  *	cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
1011  *	@q: the Rx queue
1012  *	@us: the hold-off time in us, or 0 to disable timer
1013  *	@cnt: the hold-off packet count, or 0 to disable counter
1014  *
1015  *	Sets an Rx queue's interrupt hold-off time and packet count.  At least
1016  *	one of the two needs to be enabled for the queue to generate interrupts.
1017  */
1018 int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
1019 			       unsigned int us, unsigned int cnt)
1020 {
1021 	struct adapter *adap = q->adap;
1022 
1023 	if ((us | cnt) == 0)
1024 		cnt = 1;
1025 
1026 	if (cnt) {
1027 		int err;
1028 		u32 v, new_idx;
1029 
1030 		new_idx = closest_thres(&adap->sge, cnt);
1031 		if (q->desc && q->pktcnt_idx != new_idx) {
1032 			/* the queue has already been created, update it */
1033 			v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1034 			    FW_PARAMS_PARAM_X_V(
1035 					FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1036 			    FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
1037 			err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1038 					    &v, &new_idx);
1039 			if (err)
1040 				return err;
1041 		}
1042 		q->pktcnt_idx = new_idx;
1043 	}
1044 
1045 	us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1046 	q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
1047 	return 0;
1048 }
1049 
1050 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
1051 {
1052 	const struct port_info *pi = netdev_priv(dev);
1053 	netdev_features_t changed = dev->features ^ features;
1054 	int err;
1055 
1056 	if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
1057 		return 0;
1058 
1059 	err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
1060 			    -1, -1, -1,
1061 			    !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
1062 	if (unlikely(err))
1063 		dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
1064 	return err;
1065 }
1066 
1067 static int setup_debugfs(struct adapter *adap)
1068 {
1069 	if (IS_ERR_OR_NULL(adap->debugfs_root))
1070 		return -1;
1071 
1072 #ifdef CONFIG_DEBUG_FS
1073 	t4_setup_debugfs(adap);
1074 #endif
1075 	return 0;
1076 }
1077 
1078 /*
1079  * upper-layer driver support
1080  */
1081 
1082 /*
1083  * Allocate an active-open TID and set it to the supplied value.
1084  */
1085 int cxgb4_alloc_atid(struct tid_info *t, void *data)
1086 {
1087 	int atid = -1;
1088 
1089 	spin_lock_bh(&t->atid_lock);
1090 	if (t->afree) {
1091 		union aopen_entry *p = t->afree;
1092 
1093 		atid = (p - t->atid_tab) + t->atid_base;
1094 		t->afree = p->next;
1095 		p->data = data;
1096 		t->atids_in_use++;
1097 	}
1098 	spin_unlock_bh(&t->atid_lock);
1099 	return atid;
1100 }
1101 EXPORT_SYMBOL(cxgb4_alloc_atid);
1102 
1103 /*
1104  * Release an active-open TID.
1105  */
1106 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1107 {
1108 	union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
1109 
1110 	spin_lock_bh(&t->atid_lock);
1111 	p->next = t->afree;
1112 	t->afree = p;
1113 	t->atids_in_use--;
1114 	spin_unlock_bh(&t->atid_lock);
1115 }
1116 EXPORT_SYMBOL(cxgb4_free_atid);
1117 
1118 /*
1119  * Allocate a server TID and set it to the supplied value.
1120  */
1121 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1122 {
1123 	int stid;
1124 
1125 	spin_lock_bh(&t->stid_lock);
1126 	if (family == PF_INET) {
1127 		stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1128 		if (stid < t->nstids)
1129 			__set_bit(stid, t->stid_bmap);
1130 		else
1131 			stid = -1;
1132 	} else {
1133 		stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
1134 		if (stid < 0)
1135 			stid = -1;
1136 	}
1137 	if (stid >= 0) {
1138 		t->stid_tab[stid].data = data;
1139 		stid += t->stid_base;
1140 		/* IPv6 requires max of 520 bits or 16 cells in TCAM
1141 		 * This is equivalent to 4 TIDs. With CLIP enabled it
1142 		 * needs 2 TIDs.
1143 		 */
1144 		if (family == PF_INET6) {
1145 			t->stids_in_use += 2;
1146 			t->v6_stids_in_use += 2;
1147 		} else {
1148 			t->stids_in_use++;
1149 		}
1150 	}
1151 	spin_unlock_bh(&t->stid_lock);
1152 	return stid;
1153 }
1154 EXPORT_SYMBOL(cxgb4_alloc_stid);
1155 
1156 /* Allocate a server filter TID and set it to the supplied value.
1157  */
1158 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
1159 {
1160 	int stid;
1161 
1162 	spin_lock_bh(&t->stid_lock);
1163 	if (family == PF_INET) {
1164 		stid = find_next_zero_bit(t->stid_bmap,
1165 				t->nstids + t->nsftids, t->nstids);
1166 		if (stid < (t->nstids + t->nsftids))
1167 			__set_bit(stid, t->stid_bmap);
1168 		else
1169 			stid = -1;
1170 	} else {
1171 		stid = -1;
1172 	}
1173 	if (stid >= 0) {
1174 		t->stid_tab[stid].data = data;
1175 		stid -= t->nstids;
1176 		stid += t->sftid_base;
1177 		t->sftids_in_use++;
1178 	}
1179 	spin_unlock_bh(&t->stid_lock);
1180 	return stid;
1181 }
1182 EXPORT_SYMBOL(cxgb4_alloc_sftid);
1183 
1184 /* Release a server TID.
1185  */
1186 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1187 {
1188 	/* Is it a server filter TID? */
1189 	if (t->nsftids && (stid >= t->sftid_base)) {
1190 		stid -= t->sftid_base;
1191 		stid += t->nstids;
1192 	} else {
1193 		stid -= t->stid_base;
1194 	}
1195 
1196 	spin_lock_bh(&t->stid_lock);
1197 	if (family == PF_INET)
1198 		__clear_bit(stid, t->stid_bmap);
1199 	else
1200 		bitmap_release_region(t->stid_bmap, stid, 1);
1201 	t->stid_tab[stid].data = NULL;
1202 	if (stid < t->nstids) {
1203 		if (family == PF_INET6) {
1204 			t->stids_in_use -= 2;
1205 			t->v6_stids_in_use -= 2;
1206 		} else {
1207 			t->stids_in_use--;
1208 		}
1209 	} else {
1210 		t->sftids_in_use--;
1211 	}
1212 
1213 	spin_unlock_bh(&t->stid_lock);
1214 }
1215 EXPORT_SYMBOL(cxgb4_free_stid);
1216 
1217 /*
1218  * Populate a TID_RELEASE WR.  Caller must properly size the skb.
1219  */
1220 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1221 			   unsigned int tid)
1222 {
1223 	struct cpl_tid_release *req;
1224 
1225 	set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1226 	req = __skb_put(skb, sizeof(*req));
1227 	INIT_TP_WR(req, tid);
1228 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1229 }
1230 
1231 /*
1232  * Queue a TID release request and if necessary schedule a work queue to
1233  * process it.
1234  */
1235 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1236 				    unsigned int tid)
1237 {
1238 	void **p = &t->tid_tab[tid];
1239 	struct adapter *adap = container_of(t, struct adapter, tids);
1240 
1241 	spin_lock_bh(&adap->tid_release_lock);
1242 	*p = adap->tid_release_head;
1243 	/* Low 2 bits encode the Tx channel number */
1244 	adap->tid_release_head = (void **)((uintptr_t)p | chan);
1245 	if (!adap->tid_release_task_busy) {
1246 		adap->tid_release_task_busy = true;
1247 		queue_work(adap->workq, &adap->tid_release_task);
1248 	}
1249 	spin_unlock_bh(&adap->tid_release_lock);
1250 }
1251 
1252 /*
1253  * Process the list of pending TID release requests.
1254  */
1255 static void process_tid_release_list(struct work_struct *work)
1256 {
1257 	struct sk_buff *skb;
1258 	struct adapter *adap;
1259 
1260 	adap = container_of(work, struct adapter, tid_release_task);
1261 
1262 	spin_lock_bh(&adap->tid_release_lock);
1263 	while (adap->tid_release_head) {
1264 		void **p = adap->tid_release_head;
1265 		unsigned int chan = (uintptr_t)p & 3;
1266 		p = (void *)p - chan;
1267 
1268 		adap->tid_release_head = *p;
1269 		*p = NULL;
1270 		spin_unlock_bh(&adap->tid_release_lock);
1271 
1272 		while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1273 					 GFP_KERNEL)))
1274 			schedule_timeout_uninterruptible(1);
1275 
1276 		mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1277 		t4_ofld_send(adap, skb);
1278 		spin_lock_bh(&adap->tid_release_lock);
1279 	}
1280 	adap->tid_release_task_busy = false;
1281 	spin_unlock_bh(&adap->tid_release_lock);
1282 }
1283 
1284 /*
1285  * Release a TID and inform HW.  If we are unable to allocate the release
1286  * message we defer to a work queue.
1287  */
1288 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
1289 		      unsigned short family)
1290 {
1291 	struct sk_buff *skb;
1292 	struct adapter *adap = container_of(t, struct adapter, tids);
1293 
1294 	WARN_ON(tid >= t->ntids);
1295 
1296 	if (t->tid_tab[tid]) {
1297 		t->tid_tab[tid] = NULL;
1298 		atomic_dec(&t->conns_in_use);
1299 		if (t->hash_base && (tid >= t->hash_base)) {
1300 			if (family == AF_INET6)
1301 				atomic_sub(2, &t->hash_tids_in_use);
1302 			else
1303 				atomic_dec(&t->hash_tids_in_use);
1304 		} else {
1305 			if (family == AF_INET6)
1306 				atomic_sub(2, &t->tids_in_use);
1307 			else
1308 				atomic_dec(&t->tids_in_use);
1309 		}
1310 	}
1311 
1312 	skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
1313 	if (likely(skb)) {
1314 		mk_tid_release(skb, chan, tid);
1315 		t4_ofld_send(adap, skb);
1316 	} else
1317 		cxgb4_queue_tid_release(t, chan, tid);
1318 }
1319 EXPORT_SYMBOL(cxgb4_remove_tid);
1320 
1321 /*
1322  * Allocate and initialize the TID tables.  Returns 0 on success.
1323  */
1324 static int tid_init(struct tid_info *t)
1325 {
1326 	struct adapter *adap = container_of(t, struct adapter, tids);
1327 	unsigned int max_ftids = t->nftids + t->nsftids;
1328 	unsigned int natids = t->natids;
1329 	unsigned int stid_bmap_size;
1330 	unsigned int ftid_bmap_size;
1331 	size_t size;
1332 
1333 	stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
1334 	ftid_bmap_size = BITS_TO_LONGS(t->nftids);
1335 	size = t->ntids * sizeof(*t->tid_tab) +
1336 	       natids * sizeof(*t->atid_tab) +
1337 	       t->nstids * sizeof(*t->stid_tab) +
1338 	       t->nsftids * sizeof(*t->stid_tab) +
1339 	       stid_bmap_size * sizeof(long) +
1340 	       max_ftids * sizeof(*t->ftid_tab) +
1341 	       ftid_bmap_size * sizeof(long);
1342 
1343 	t->tid_tab = kvzalloc(size, GFP_KERNEL);
1344 	if (!t->tid_tab)
1345 		return -ENOMEM;
1346 
1347 	t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
1348 	t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
1349 	t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
1350 	t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
1351 	t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
1352 	spin_lock_init(&t->stid_lock);
1353 	spin_lock_init(&t->atid_lock);
1354 	spin_lock_init(&t->ftid_lock);
1355 
1356 	t->stids_in_use = 0;
1357 	t->v6_stids_in_use = 0;
1358 	t->sftids_in_use = 0;
1359 	t->afree = NULL;
1360 	t->atids_in_use = 0;
1361 	atomic_set(&t->tids_in_use, 0);
1362 	atomic_set(&t->conns_in_use, 0);
1363 	atomic_set(&t->hash_tids_in_use, 0);
1364 
1365 	/* Setup the free list for atid_tab and clear the stid bitmap. */
1366 	if (natids) {
1367 		while (--natids)
1368 			t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1369 		t->afree = t->atid_tab;
1370 	}
1371 
1372 	if (is_offload(adap)) {
1373 		bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
1374 		/* Reserve stid 0 for T4/T5 adapters */
1375 		if (!t->stid_base &&
1376 		    CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1377 			__set_bit(0, t->stid_bmap);
1378 	}
1379 
1380 	bitmap_zero(t->ftid_bmap, t->nftids);
1381 	return 0;
1382 }
1383 
1384 /**
1385  *	cxgb4_create_server - create an IP server
1386  *	@dev: the device
1387  *	@stid: the server TID
1388  *	@sip: local IP address to bind server to
1389  *	@sport: the server's TCP port
1390  *	@queue: queue to direct messages from this server to
1391  *
1392  *	Create an IP server for the given port and address.
1393  *	Returns <0 on error and one of the %NET_XMIT_* values on success.
1394  */
1395 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
1396 			__be32 sip, __be16 sport, __be16 vlan,
1397 			unsigned int queue)
1398 {
1399 	unsigned int chan;
1400 	struct sk_buff *skb;
1401 	struct adapter *adap;
1402 	struct cpl_pass_open_req *req;
1403 	int ret;
1404 
1405 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1406 	if (!skb)
1407 		return -ENOMEM;
1408 
1409 	adap = netdev2adap(dev);
1410 	req = __skb_put(skb, sizeof(*req));
1411 	INIT_TP_WR(req, 0);
1412 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
1413 	req->local_port = sport;
1414 	req->peer_port = htons(0);
1415 	req->local_ip = sip;
1416 	req->peer_ip = htonl(0);
1417 	chan = rxq_to_chan(&adap->sge, queue);
1418 	req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1419 	req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1420 				SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1421 	ret = t4_mgmt_tx(adap, skb);
1422 	return net_xmit_eval(ret);
1423 }
1424 EXPORT_SYMBOL(cxgb4_create_server);
1425 
1426 /*	cxgb4_create_server6 - create an IPv6 server
1427  *	@dev: the device
1428  *	@stid: the server TID
1429  *	@sip: local IPv6 address to bind server to
1430  *	@sport: the server's TCP port
1431  *	@queue: queue to direct messages from this server to
1432  *
1433  *	Create an IPv6 server for the given port and address.
1434  *	Returns <0 on error and one of the %NET_XMIT_* values on success.
1435  */
1436 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
1437 			 const struct in6_addr *sip, __be16 sport,
1438 			 unsigned int queue)
1439 {
1440 	unsigned int chan;
1441 	struct sk_buff *skb;
1442 	struct adapter *adap;
1443 	struct cpl_pass_open_req6 *req;
1444 	int ret;
1445 
1446 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1447 	if (!skb)
1448 		return -ENOMEM;
1449 
1450 	adap = netdev2adap(dev);
1451 	req = __skb_put(skb, sizeof(*req));
1452 	INIT_TP_WR(req, 0);
1453 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
1454 	req->local_port = sport;
1455 	req->peer_port = htons(0);
1456 	req->local_ip_hi = *(__be64 *)(sip->s6_addr);
1457 	req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
1458 	req->peer_ip_hi = cpu_to_be64(0);
1459 	req->peer_ip_lo = cpu_to_be64(0);
1460 	chan = rxq_to_chan(&adap->sge, queue);
1461 	req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1462 	req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1463 				SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1464 	ret = t4_mgmt_tx(adap, skb);
1465 	return net_xmit_eval(ret);
1466 }
1467 EXPORT_SYMBOL(cxgb4_create_server6);
1468 
1469 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
1470 			unsigned int queue, bool ipv6)
1471 {
1472 	struct sk_buff *skb;
1473 	struct adapter *adap;
1474 	struct cpl_close_listsvr_req *req;
1475 	int ret;
1476 
1477 	adap = netdev2adap(dev);
1478 
1479 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1480 	if (!skb)
1481 		return -ENOMEM;
1482 
1483 	req = __skb_put(skb, sizeof(*req));
1484 	INIT_TP_WR(req, 0);
1485 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
1486 	req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
1487 				LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
1488 	ret = t4_mgmt_tx(adap, skb);
1489 	return net_xmit_eval(ret);
1490 }
1491 EXPORT_SYMBOL(cxgb4_remove_server);
1492 
1493 /**
1494  *	cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
1495  *	@mtus: the HW MTU table
1496  *	@mtu: the target MTU
1497  *	@idx: index of selected entry in the MTU table
1498  *
1499  *	Returns the index and the value in the HW MTU table that is closest to
1500  *	but does not exceed @mtu, unless @mtu is smaller than any value in the
1501  *	table, in which case that smallest available value is selected.
1502  */
1503 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
1504 			    unsigned int *idx)
1505 {
1506 	unsigned int i = 0;
1507 
1508 	while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
1509 		++i;
1510 	if (idx)
1511 		*idx = i;
1512 	return mtus[i];
1513 }
1514 EXPORT_SYMBOL(cxgb4_best_mtu);
1515 
1516 /**
1517  *     cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
1518  *     @mtus: the HW MTU table
1519  *     @header_size: Header Size
1520  *     @data_size_max: maximum Data Segment Size
1521  *     @data_size_align: desired Data Segment Size Alignment (2^N)
1522  *     @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
1523  *
1524  *     Similar to cxgb4_best_mtu() but instead of searching the Hardware
1525  *     MTU Table based solely on a Maximum MTU parameter, we break that
1526  *     parameter up into a Header Size and Maximum Data Segment Size, and
1527  *     provide a desired Data Segment Size Alignment.  If we find an MTU in
1528  *     the Hardware MTU Table which will result in a Data Segment Size with
1529  *     the requested alignment _and_ that MTU isn't "too far" from the
1530  *     closest MTU, then we'll return that rather than the closest MTU.
1531  */
1532 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
1533 				    unsigned short header_size,
1534 				    unsigned short data_size_max,
1535 				    unsigned short data_size_align,
1536 				    unsigned int *mtu_idxp)
1537 {
1538 	unsigned short max_mtu = header_size + data_size_max;
1539 	unsigned short data_size_align_mask = data_size_align - 1;
1540 	int mtu_idx, aligned_mtu_idx;
1541 
1542 	/* Scan the MTU Table till we find an MTU which is larger than our
1543 	 * Maximum MTU or we reach the end of the table.  Along the way,
1544 	 * record the last MTU found, if any, which will result in a Data
1545 	 * Segment Length matching the requested alignment.
1546 	 */
1547 	for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
1548 		unsigned short data_size = mtus[mtu_idx] - header_size;
1549 
1550 		/* If this MTU minus the Header Size would result in a
1551 		 * Data Segment Size of the desired alignment, remember it.
1552 		 */
1553 		if ((data_size & data_size_align_mask) == 0)
1554 			aligned_mtu_idx = mtu_idx;
1555 
1556 		/* If we're not at the end of the Hardware MTU Table and the
1557 		 * next element is larger than our Maximum MTU, drop out of
1558 		 * the loop.
1559 		 */
1560 		if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
1561 			break;
1562 	}
1563 
1564 	/* If we fell out of the loop because we ran to the end of the table,
1565 	 * then we just have to use the last [largest] entry.
1566 	 */
1567 	if (mtu_idx == NMTUS)
1568 		mtu_idx--;
1569 
1570 	/* If we found an MTU which resulted in the requested Data Segment
1571 	 * Length alignment and that's "not far" from the largest MTU which is
1572 	 * less than or equal to the maximum MTU, then use that.
1573 	 */
1574 	if (aligned_mtu_idx >= 0 &&
1575 	    mtu_idx - aligned_mtu_idx <= 1)
1576 		mtu_idx = aligned_mtu_idx;
1577 
1578 	/* If the caller has passed in an MTU Index pointer, pass the
1579 	 * MTU Index back.  Return the MTU value.
1580 	 */
1581 	if (mtu_idxp)
1582 		*mtu_idxp = mtu_idx;
1583 	return mtus[mtu_idx];
1584 }
1585 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
1586 
1587 /**
1588  *	cxgb4_tp_smt_idx - Get the Source Mac Table index for this VI
1589  *	@chip: chip type
1590  *	@viid: VI id of the given port
1591  *
1592  *	Return the SMT index for this VI.
1593  */
1594 unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid)
1595 {
1596 	/* In T4/T5, SMT contains 256 SMAC entries organized in
1597 	 * 128 rows of 2 entries each.
1598 	 * In T6, SMT contains 256 SMAC entries in 256 rows.
1599 	 * TODO: The below code needs to be updated when we add support
1600 	 * for 256 VFs.
1601 	 */
1602 	if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
1603 		return ((viid & 0x7f) << 1);
1604 	else
1605 		return (viid & 0x7f);
1606 }
1607 EXPORT_SYMBOL(cxgb4_tp_smt_idx);
1608 
1609 /**
1610  *	cxgb4_port_chan - get the HW channel of a port
1611  *	@dev: the net device for the port
1612  *
1613  *	Return the HW Tx channel of the given port.
1614  */
1615 unsigned int cxgb4_port_chan(const struct net_device *dev)
1616 {
1617 	return netdev2pinfo(dev)->tx_chan;
1618 }
1619 EXPORT_SYMBOL(cxgb4_port_chan);
1620 
1621 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
1622 {
1623 	struct adapter *adap = netdev2adap(dev);
1624 	u32 v1, v2, lp_count, hp_count;
1625 
1626 	v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
1627 	v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
1628 	if (is_t4(adap->params.chip)) {
1629 		lp_count = LP_COUNT_G(v1);
1630 		hp_count = HP_COUNT_G(v1);
1631 	} else {
1632 		lp_count = LP_COUNT_T5_G(v1);
1633 		hp_count = HP_COUNT_T5_G(v2);
1634 	}
1635 	return lpfifo ? lp_count : hp_count;
1636 }
1637 EXPORT_SYMBOL(cxgb4_dbfifo_count);
1638 
1639 /**
1640  *	cxgb4_port_viid - get the VI id of a port
1641  *	@dev: the net device for the port
1642  *
1643  *	Return the VI id of the given port.
1644  */
1645 unsigned int cxgb4_port_viid(const struct net_device *dev)
1646 {
1647 	return netdev2pinfo(dev)->viid;
1648 }
1649 EXPORT_SYMBOL(cxgb4_port_viid);
1650 
1651 /**
1652  *	cxgb4_port_idx - get the index of a port
1653  *	@dev: the net device for the port
1654  *
1655  *	Return the index of the given port.
1656  */
1657 unsigned int cxgb4_port_idx(const struct net_device *dev)
1658 {
1659 	return netdev2pinfo(dev)->port_id;
1660 }
1661 EXPORT_SYMBOL(cxgb4_port_idx);
1662 
1663 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
1664 			 struct tp_tcp_stats *v6)
1665 {
1666 	struct adapter *adap = pci_get_drvdata(pdev);
1667 
1668 	spin_lock(&adap->stats_lock);
1669 	t4_tp_get_tcp_stats(adap, v4, v6, false);
1670 	spin_unlock(&adap->stats_lock);
1671 }
1672 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
1673 
1674 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
1675 		      const unsigned int *pgsz_order)
1676 {
1677 	struct adapter *adap = netdev2adap(dev);
1678 
1679 	t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
1680 	t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
1681 		     HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
1682 		     HPZ3_V(pgsz_order[3]));
1683 }
1684 EXPORT_SYMBOL(cxgb4_iscsi_init);
1685 
1686 int cxgb4_flush_eq_cache(struct net_device *dev)
1687 {
1688 	struct adapter *adap = netdev2adap(dev);
1689 
1690 	return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS);
1691 }
1692 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
1693 
1694 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
1695 {
1696 	u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
1697 	__be64 indices;
1698 	int ret;
1699 
1700 	spin_lock(&adap->win0_lock);
1701 	ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
1702 			   sizeof(indices), (__be32 *)&indices,
1703 			   T4_MEMORY_READ);
1704 	spin_unlock(&adap->win0_lock);
1705 	if (!ret) {
1706 		*cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
1707 		*pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
1708 	}
1709 	return ret;
1710 }
1711 
1712 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
1713 			u16 size)
1714 {
1715 	struct adapter *adap = netdev2adap(dev);
1716 	u16 hw_pidx, hw_cidx;
1717 	int ret;
1718 
1719 	ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
1720 	if (ret)
1721 		goto out;
1722 
1723 	if (pidx != hw_pidx) {
1724 		u16 delta;
1725 		u32 val;
1726 
1727 		if (pidx >= hw_pidx)
1728 			delta = pidx - hw_pidx;
1729 		else
1730 			delta = size - hw_pidx + pidx;
1731 
1732 		if (is_t4(adap->params.chip))
1733 			val = PIDX_V(delta);
1734 		else
1735 			val = PIDX_T5_V(delta);
1736 		wmb();
1737 		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
1738 			     QID_V(qid) | val);
1739 	}
1740 out:
1741 	return ret;
1742 }
1743 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
1744 
1745 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
1746 {
1747 	u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
1748 	u32 edc0_end, edc1_end, mc0_end, mc1_end;
1749 	u32 offset, memtype, memaddr;
1750 	struct adapter *adap;
1751 	u32 hma_size = 0;
1752 	int ret;
1753 
1754 	adap = netdev2adap(dev);
1755 
1756 	offset = ((stag >> 8) * 32) + adap->vres.stag.start;
1757 
1758 	/* Figure out where the offset lands in the Memory Type/Address scheme.
1759 	 * This code assumes that the memory is laid out starting at offset 0
1760 	 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
1761 	 * and EDC1.  Some cards will have neither MC0 nor MC1, most cards have
1762 	 * MC0, and some have both MC0 and MC1.
1763 	 */
1764 	size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
1765 	edc0_size = EDRAM0_SIZE_G(size) << 20;
1766 	size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
1767 	edc1_size = EDRAM1_SIZE_G(size) << 20;
1768 	size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
1769 	mc0_size = EXT_MEM0_SIZE_G(size) << 20;
1770 
1771 	if (t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A) & HMA_MUX_F) {
1772 		size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
1773 		hma_size = EXT_MEM1_SIZE_G(size) << 20;
1774 	}
1775 	edc0_end = edc0_size;
1776 	edc1_end = edc0_end + edc1_size;
1777 	mc0_end = edc1_end + mc0_size;
1778 
1779 	if (offset < edc0_end) {
1780 		memtype = MEM_EDC0;
1781 		memaddr = offset;
1782 	} else if (offset < edc1_end) {
1783 		memtype = MEM_EDC1;
1784 		memaddr = offset - edc0_end;
1785 	} else {
1786 		if (hma_size && (offset < (edc1_end + hma_size))) {
1787 			memtype = MEM_HMA;
1788 			memaddr = offset - edc1_end;
1789 		} else if (offset < mc0_end) {
1790 			memtype = MEM_MC0;
1791 			memaddr = offset - edc1_end;
1792 		} else if (is_t5(adap->params.chip)) {
1793 			size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
1794 			mc1_size = EXT_MEM1_SIZE_G(size) << 20;
1795 			mc1_end = mc0_end + mc1_size;
1796 			if (offset < mc1_end) {
1797 				memtype = MEM_MC1;
1798 				memaddr = offset - mc0_end;
1799 			} else {
1800 				/* offset beyond the end of any memory */
1801 				goto err;
1802 			}
1803 		} else {
1804 			/* T4/T6 only has a single memory channel */
1805 			goto err;
1806 		}
1807 	}
1808 
1809 	spin_lock(&adap->win0_lock);
1810 	ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
1811 	spin_unlock(&adap->win0_lock);
1812 	return ret;
1813 
1814 err:
1815 	dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
1816 		stag, offset);
1817 	return -EINVAL;
1818 }
1819 EXPORT_SYMBOL(cxgb4_read_tpte);
1820 
1821 u64 cxgb4_read_sge_timestamp(struct net_device *dev)
1822 {
1823 	u32 hi, lo;
1824 	struct adapter *adap;
1825 
1826 	adap = netdev2adap(dev);
1827 	lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
1828 	hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
1829 
1830 	return ((u64)hi << 32) | (u64)lo;
1831 }
1832 EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
1833 
1834 int cxgb4_bar2_sge_qregs(struct net_device *dev,
1835 			 unsigned int qid,
1836 			 enum cxgb4_bar2_qtype qtype,
1837 			 int user,
1838 			 u64 *pbar2_qoffset,
1839 			 unsigned int *pbar2_qid)
1840 {
1841 	return t4_bar2_sge_qregs(netdev2adap(dev),
1842 				 qid,
1843 				 (qtype == CXGB4_BAR2_QTYPE_EGRESS
1844 				  ? T4_BAR2_QTYPE_EGRESS
1845 				  : T4_BAR2_QTYPE_INGRESS),
1846 				 user,
1847 				 pbar2_qoffset,
1848 				 pbar2_qid);
1849 }
1850 EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
1851 
1852 static struct pci_driver cxgb4_driver;
1853 
1854 static void check_neigh_update(struct neighbour *neigh)
1855 {
1856 	const struct device *parent;
1857 	const struct net_device *netdev = neigh->dev;
1858 
1859 	if (is_vlan_dev(netdev))
1860 		netdev = vlan_dev_real_dev(netdev);
1861 	parent = netdev->dev.parent;
1862 	if (parent && parent->driver == &cxgb4_driver.driver)
1863 		t4_l2t_update(dev_get_drvdata(parent), neigh);
1864 }
1865 
1866 static int netevent_cb(struct notifier_block *nb, unsigned long event,
1867 		       void *data)
1868 {
1869 	switch (event) {
1870 	case NETEVENT_NEIGH_UPDATE:
1871 		check_neigh_update(data);
1872 		break;
1873 	case NETEVENT_REDIRECT:
1874 	default:
1875 		break;
1876 	}
1877 	return 0;
1878 }
1879 
1880 static bool netevent_registered;
1881 static struct notifier_block cxgb4_netevent_nb = {
1882 	.notifier_call = netevent_cb
1883 };
1884 
1885 static void drain_db_fifo(struct adapter *adap, int usecs)
1886 {
1887 	u32 v1, v2, lp_count, hp_count;
1888 
1889 	do {
1890 		v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
1891 		v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
1892 		if (is_t4(adap->params.chip)) {
1893 			lp_count = LP_COUNT_G(v1);
1894 			hp_count = HP_COUNT_G(v1);
1895 		} else {
1896 			lp_count = LP_COUNT_T5_G(v1);
1897 			hp_count = HP_COUNT_T5_G(v2);
1898 		}
1899 
1900 		if (lp_count == 0 && hp_count == 0)
1901 			break;
1902 		set_current_state(TASK_UNINTERRUPTIBLE);
1903 		schedule_timeout(usecs_to_jiffies(usecs));
1904 	} while (1);
1905 }
1906 
1907 static void disable_txq_db(struct sge_txq *q)
1908 {
1909 	unsigned long flags;
1910 
1911 	spin_lock_irqsave(&q->db_lock, flags);
1912 	q->db_disabled = 1;
1913 	spin_unlock_irqrestore(&q->db_lock, flags);
1914 }
1915 
1916 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
1917 {
1918 	spin_lock_irq(&q->db_lock);
1919 	if (q->db_pidx_inc) {
1920 		/* Make sure that all writes to the TX descriptors
1921 		 * are committed before we tell HW about them.
1922 		 */
1923 		wmb();
1924 		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
1925 			     QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
1926 		q->db_pidx_inc = 0;
1927 	}
1928 	q->db_disabled = 0;
1929 	spin_unlock_irq(&q->db_lock);
1930 }
1931 
1932 static void disable_dbs(struct adapter *adap)
1933 {
1934 	int i;
1935 
1936 	for_each_ethrxq(&adap->sge, i)
1937 		disable_txq_db(&adap->sge.ethtxq[i].q);
1938 	if (is_offload(adap)) {
1939 		struct sge_uld_txq_info *txq_info =
1940 			adap->sge.uld_txq_info[CXGB4_TX_OFLD];
1941 
1942 		if (txq_info) {
1943 			for_each_ofldtxq(&adap->sge, i) {
1944 				struct sge_uld_txq *txq = &txq_info->uldtxq[i];
1945 
1946 				disable_txq_db(&txq->q);
1947 			}
1948 		}
1949 	}
1950 	for_each_port(adap, i)
1951 		disable_txq_db(&adap->sge.ctrlq[i].q);
1952 }
1953 
1954 static void enable_dbs(struct adapter *adap)
1955 {
1956 	int i;
1957 
1958 	for_each_ethrxq(&adap->sge, i)
1959 		enable_txq_db(adap, &adap->sge.ethtxq[i].q);
1960 	if (is_offload(adap)) {
1961 		struct sge_uld_txq_info *txq_info =
1962 			adap->sge.uld_txq_info[CXGB4_TX_OFLD];
1963 
1964 		if (txq_info) {
1965 			for_each_ofldtxq(&adap->sge, i) {
1966 				struct sge_uld_txq *txq = &txq_info->uldtxq[i];
1967 
1968 				enable_txq_db(adap, &txq->q);
1969 			}
1970 		}
1971 	}
1972 	for_each_port(adap, i)
1973 		enable_txq_db(adap, &adap->sge.ctrlq[i].q);
1974 }
1975 
1976 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
1977 {
1978 	enum cxgb4_uld type = CXGB4_ULD_RDMA;
1979 
1980 	if (adap->uld && adap->uld[type].handle)
1981 		adap->uld[type].control(adap->uld[type].handle, cmd);
1982 }
1983 
1984 static void process_db_full(struct work_struct *work)
1985 {
1986 	struct adapter *adap;
1987 
1988 	adap = container_of(work, struct adapter, db_full_task);
1989 
1990 	drain_db_fifo(adap, dbfifo_drain_delay);
1991 	enable_dbs(adap);
1992 	notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
1993 	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1994 		t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
1995 				 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
1996 				 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
1997 	else
1998 		t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
1999 				 DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
2000 }
2001 
2002 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2003 {
2004 	u16 hw_pidx, hw_cidx;
2005 	int ret;
2006 
2007 	spin_lock_irq(&q->db_lock);
2008 	ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2009 	if (ret)
2010 		goto out;
2011 	if (q->db_pidx != hw_pidx) {
2012 		u16 delta;
2013 		u32 val;
2014 
2015 		if (q->db_pidx >= hw_pidx)
2016 			delta = q->db_pidx - hw_pidx;
2017 		else
2018 			delta = q->size - hw_pidx + q->db_pidx;
2019 
2020 		if (is_t4(adap->params.chip))
2021 			val = PIDX_V(delta);
2022 		else
2023 			val = PIDX_T5_V(delta);
2024 		wmb();
2025 		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2026 			     QID_V(q->cntxt_id) | val);
2027 	}
2028 out:
2029 	q->db_disabled = 0;
2030 	q->db_pidx_inc = 0;
2031 	spin_unlock_irq(&q->db_lock);
2032 	if (ret)
2033 		CH_WARN(adap, "DB drop recovery failed.\n");
2034 }
2035 
2036 static void recover_all_queues(struct adapter *adap)
2037 {
2038 	int i;
2039 
2040 	for_each_ethrxq(&adap->sge, i)
2041 		sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2042 	if (is_offload(adap)) {
2043 		struct sge_uld_txq_info *txq_info =
2044 			adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2045 		if (txq_info) {
2046 			for_each_ofldtxq(&adap->sge, i) {
2047 				struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2048 
2049 				sync_txq_pidx(adap, &txq->q);
2050 			}
2051 		}
2052 	}
2053 	for_each_port(adap, i)
2054 		sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2055 }
2056 
2057 static void process_db_drop(struct work_struct *work)
2058 {
2059 	struct adapter *adap;
2060 
2061 	adap = container_of(work, struct adapter, db_drop_task);
2062 
2063 	if (is_t4(adap->params.chip)) {
2064 		drain_db_fifo(adap, dbfifo_drain_delay);
2065 		notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
2066 		drain_db_fifo(adap, dbfifo_drain_delay);
2067 		recover_all_queues(adap);
2068 		drain_db_fifo(adap, dbfifo_drain_delay);
2069 		enable_dbs(adap);
2070 		notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2071 	} else if (is_t5(adap->params.chip)) {
2072 		u32 dropped_db = t4_read_reg(adap, 0x010ac);
2073 		u16 qid = (dropped_db >> 15) & 0x1ffff;
2074 		u16 pidx_inc = dropped_db & 0x1fff;
2075 		u64 bar2_qoffset;
2076 		unsigned int bar2_qid;
2077 		int ret;
2078 
2079 		ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
2080 					0, &bar2_qoffset, &bar2_qid);
2081 		if (ret)
2082 			dev_err(adap->pdev_dev, "doorbell drop recovery: "
2083 				"qid=%d, pidx_inc=%d\n", qid, pidx_inc);
2084 		else
2085 			writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
2086 			       adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
2087 
2088 		/* Re-enable BAR2 WC */
2089 		t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
2090 	}
2091 
2092 	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2093 		t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
2094 }
2095 
2096 void t4_db_full(struct adapter *adap)
2097 {
2098 	if (is_t4(adap->params.chip)) {
2099 		disable_dbs(adap);
2100 		notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2101 		t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2102 				 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
2103 		queue_work(adap->workq, &adap->db_full_task);
2104 	}
2105 }
2106 
2107 void t4_db_dropped(struct adapter *adap)
2108 {
2109 	if (is_t4(adap->params.chip)) {
2110 		disable_dbs(adap);
2111 		notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2112 	}
2113 	queue_work(adap->workq, &adap->db_drop_task);
2114 }
2115 
2116 void t4_register_netevent_notifier(void)
2117 {
2118 	if (!netevent_registered) {
2119 		register_netevent_notifier(&cxgb4_netevent_nb);
2120 		netevent_registered = true;
2121 	}
2122 }
2123 
2124 static void detach_ulds(struct adapter *adap)
2125 {
2126 	unsigned int i;
2127 
2128 	mutex_lock(&uld_mutex);
2129 	list_del(&adap->list_node);
2130 
2131 	for (i = 0; i < CXGB4_ULD_MAX; i++)
2132 		if (adap->uld && adap->uld[i].handle)
2133 			adap->uld[i].state_change(adap->uld[i].handle,
2134 					     CXGB4_STATE_DETACH);
2135 
2136 	if (netevent_registered && list_empty(&adapter_list)) {
2137 		unregister_netevent_notifier(&cxgb4_netevent_nb);
2138 		netevent_registered = false;
2139 	}
2140 	mutex_unlock(&uld_mutex);
2141 }
2142 
2143 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2144 {
2145 	unsigned int i;
2146 
2147 	mutex_lock(&uld_mutex);
2148 	for (i = 0; i < CXGB4_ULD_MAX; i++)
2149 		if (adap->uld && adap->uld[i].handle)
2150 			adap->uld[i].state_change(adap->uld[i].handle,
2151 						  new_state);
2152 	mutex_unlock(&uld_mutex);
2153 }
2154 
2155 #if IS_ENABLED(CONFIG_IPV6)
2156 static int cxgb4_inet6addr_handler(struct notifier_block *this,
2157 				   unsigned long event, void *data)
2158 {
2159 	struct inet6_ifaddr *ifa = data;
2160 	struct net_device *event_dev = ifa->idev->dev;
2161 	const struct device *parent = NULL;
2162 #if IS_ENABLED(CONFIG_BONDING)
2163 	struct adapter *adap;
2164 #endif
2165 	if (is_vlan_dev(event_dev))
2166 		event_dev = vlan_dev_real_dev(event_dev);
2167 #if IS_ENABLED(CONFIG_BONDING)
2168 	if (event_dev->flags & IFF_MASTER) {
2169 		list_for_each_entry(adap, &adapter_list, list_node) {
2170 			switch (event) {
2171 			case NETDEV_UP:
2172 				cxgb4_clip_get(adap->port[0],
2173 					       (const u32 *)ifa, 1);
2174 				break;
2175 			case NETDEV_DOWN:
2176 				cxgb4_clip_release(adap->port[0],
2177 						   (const u32 *)ifa, 1);
2178 				break;
2179 			default:
2180 				break;
2181 			}
2182 		}
2183 		return NOTIFY_OK;
2184 	}
2185 #endif
2186 
2187 	if (event_dev)
2188 		parent = event_dev->dev.parent;
2189 
2190 	if (parent && parent->driver == &cxgb4_driver.driver) {
2191 		switch (event) {
2192 		case NETDEV_UP:
2193 			cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
2194 			break;
2195 		case NETDEV_DOWN:
2196 			cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
2197 			break;
2198 		default:
2199 			break;
2200 		}
2201 	}
2202 	return NOTIFY_OK;
2203 }
2204 
2205 static bool inet6addr_registered;
2206 static struct notifier_block cxgb4_inet6addr_notifier = {
2207 	.notifier_call = cxgb4_inet6addr_handler
2208 };
2209 
2210 static void update_clip(const struct adapter *adap)
2211 {
2212 	int i;
2213 	struct net_device *dev;
2214 	int ret;
2215 
2216 	rcu_read_lock();
2217 
2218 	for (i = 0; i < MAX_NPORTS; i++) {
2219 		dev = adap->port[i];
2220 		ret = 0;
2221 
2222 		if (dev)
2223 			ret = cxgb4_update_root_dev_clip(dev);
2224 
2225 		if (ret < 0)
2226 			break;
2227 	}
2228 	rcu_read_unlock();
2229 }
2230 #endif /* IS_ENABLED(CONFIG_IPV6) */
2231 
2232 /**
2233  *	cxgb_up - enable the adapter
2234  *	@adap: adapter being enabled
2235  *
2236  *	Called when the first port is enabled, this function performs the
2237  *	actions necessary to make an adapter operational, such as completing
2238  *	the initialization of HW modules, and enabling interrupts.
2239  *
2240  *	Must be called with the rtnl lock held.
2241  */
2242 static int cxgb_up(struct adapter *adap)
2243 {
2244 	int err;
2245 
2246 	mutex_lock(&uld_mutex);
2247 	err = setup_sge_queues(adap);
2248 	if (err)
2249 		goto rel_lock;
2250 	err = setup_rss(adap);
2251 	if (err)
2252 		goto freeq;
2253 
2254 	if (adap->flags & USING_MSIX) {
2255 		name_msix_vecs(adap);
2256 		err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2257 				  adap->msix_info[0].desc, adap);
2258 		if (err)
2259 			goto irq_err;
2260 		err = request_msix_queue_irqs(adap);
2261 		if (err) {
2262 			free_irq(adap->msix_info[0].vec, adap);
2263 			goto irq_err;
2264 		}
2265 	} else {
2266 		err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2267 				  (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2268 				  adap->port[0]->name, adap);
2269 		if (err)
2270 			goto irq_err;
2271 	}
2272 
2273 	enable_rx(adap);
2274 	t4_sge_start(adap);
2275 	t4_intr_enable(adap);
2276 	adap->flags |= FULL_INIT_DONE;
2277 	mutex_unlock(&uld_mutex);
2278 
2279 	notify_ulds(adap, CXGB4_STATE_UP);
2280 #if IS_ENABLED(CONFIG_IPV6)
2281 	update_clip(adap);
2282 #endif
2283 	/* Initialize hash mac addr list*/
2284 	INIT_LIST_HEAD(&adap->mac_hlist);
2285 	return err;
2286 
2287  irq_err:
2288 	dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2289  freeq:
2290 	t4_free_sge_resources(adap);
2291  rel_lock:
2292 	mutex_unlock(&uld_mutex);
2293 	return err;
2294 }
2295 
2296 static void cxgb_down(struct adapter *adapter)
2297 {
2298 	struct hash_mac_addr *entry, *tmp;
2299 
2300 	cancel_work_sync(&adapter->tid_release_task);
2301 	cancel_work_sync(&adapter->db_full_task);
2302 	cancel_work_sync(&adapter->db_drop_task);
2303 	adapter->tid_release_task_busy = false;
2304 	adapter->tid_release_head = NULL;
2305 
2306 	t4_sge_stop(adapter);
2307 	t4_free_sge_resources(adapter);
2308 
2309 	list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) {
2310 		list_del(&entry->list);
2311 		kfree(entry);
2312 	}
2313 
2314 	adapter->flags &= ~FULL_INIT_DONE;
2315 }
2316 
2317 /*
2318  * net_device operations
2319  */
2320 static int cxgb_open(struct net_device *dev)
2321 {
2322 	int err;
2323 	struct port_info *pi = netdev_priv(dev);
2324 	struct adapter *adapter = pi->adapter;
2325 
2326 	netif_carrier_off(dev);
2327 
2328 	if (!(adapter->flags & FULL_INIT_DONE)) {
2329 		err = cxgb_up(adapter);
2330 		if (err < 0)
2331 			return err;
2332 	}
2333 
2334 	/* It's possible that the basic port information could have
2335 	 * changed since we first read it.
2336 	 */
2337 	err = t4_update_port_info(pi);
2338 	if (err < 0)
2339 		return err;
2340 
2341 	err = link_start(dev);
2342 	if (!err)
2343 		netif_tx_start_all_queues(dev);
2344 	return err;
2345 }
2346 
2347 static int cxgb_close(struct net_device *dev)
2348 {
2349 	struct port_info *pi = netdev_priv(dev);
2350 	struct adapter *adapter = pi->adapter;
2351 	int ret;
2352 
2353 	netif_tx_stop_all_queues(dev);
2354 	netif_carrier_off(dev);
2355 	ret = t4_enable_pi_params(adapter, adapter->pf, pi,
2356 				  false, false, false);
2357 #ifdef CONFIG_CHELSIO_T4_DCB
2358 	cxgb4_dcb_reset(dev);
2359 	dcb_tx_queue_prio_enable(dev, false);
2360 #endif
2361 	return ret;
2362 }
2363 
2364 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
2365 		__be32 sip, __be16 sport, __be16 vlan,
2366 		unsigned int queue, unsigned char port, unsigned char mask)
2367 {
2368 	int ret;
2369 	struct filter_entry *f;
2370 	struct adapter *adap;
2371 	int i;
2372 	u8 *val;
2373 
2374 	adap = netdev2adap(dev);
2375 
2376 	/* Adjust stid to correct filter index */
2377 	stid -= adap->tids.sftid_base;
2378 	stid += adap->tids.nftids;
2379 
2380 	/* Check to make sure the filter requested is writable ...
2381 	 */
2382 	f = &adap->tids.ftid_tab[stid];
2383 	ret = writable_filter(f);
2384 	if (ret)
2385 		return ret;
2386 
2387 	/* Clear out any old resources being used by the filter before
2388 	 * we start constructing the new filter.
2389 	 */
2390 	if (f->valid)
2391 		clear_filter(adap, f);
2392 
2393 	/* Clear out filter specifications */
2394 	memset(&f->fs, 0, sizeof(struct ch_filter_specification));
2395 	f->fs.val.lport = cpu_to_be16(sport);
2396 	f->fs.mask.lport  = ~0;
2397 	val = (u8 *)&sip;
2398 	if ((val[0] | val[1] | val[2] | val[3]) != 0) {
2399 		for (i = 0; i < 4; i++) {
2400 			f->fs.val.lip[i] = val[i];
2401 			f->fs.mask.lip[i] = ~0;
2402 		}
2403 		if (adap->params.tp.vlan_pri_map & PORT_F) {
2404 			f->fs.val.iport = port;
2405 			f->fs.mask.iport = mask;
2406 		}
2407 	}
2408 
2409 	if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
2410 		f->fs.val.proto = IPPROTO_TCP;
2411 		f->fs.mask.proto = ~0;
2412 	}
2413 
2414 	f->fs.dirsteer = 1;
2415 	f->fs.iq = queue;
2416 	/* Mark filter as locked */
2417 	f->locked = 1;
2418 	f->fs.rpttid = 1;
2419 
2420 	/* Save the actual tid. We need this to get the corresponding
2421 	 * filter entry structure in filter_rpl.
2422 	 */
2423 	f->tid = stid + adap->tids.ftid_base;
2424 	ret = set_filter_wr(adap, stid);
2425 	if (ret) {
2426 		clear_filter(adap, f);
2427 		return ret;
2428 	}
2429 
2430 	return 0;
2431 }
2432 EXPORT_SYMBOL(cxgb4_create_server_filter);
2433 
2434 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
2435 		unsigned int queue, bool ipv6)
2436 {
2437 	struct filter_entry *f;
2438 	struct adapter *adap;
2439 
2440 	adap = netdev2adap(dev);
2441 
2442 	/* Adjust stid to correct filter index */
2443 	stid -= adap->tids.sftid_base;
2444 	stid += adap->tids.nftids;
2445 
2446 	f = &adap->tids.ftid_tab[stid];
2447 	/* Unlock the filter */
2448 	f->locked = 0;
2449 
2450 	return delete_filter(adap, stid);
2451 }
2452 EXPORT_SYMBOL(cxgb4_remove_server_filter);
2453 
2454 static void cxgb_get_stats(struct net_device *dev,
2455 			   struct rtnl_link_stats64 *ns)
2456 {
2457 	struct port_stats stats;
2458 	struct port_info *p = netdev_priv(dev);
2459 	struct adapter *adapter = p->adapter;
2460 
2461 	/* Block retrieving statistics during EEH error
2462 	 * recovery. Otherwise, the recovery might fail
2463 	 * and the PCI device will be removed permanently
2464 	 */
2465 	spin_lock(&adapter->stats_lock);
2466 	if (!netif_device_present(dev)) {
2467 		spin_unlock(&adapter->stats_lock);
2468 		return;
2469 	}
2470 	t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
2471 				 &p->stats_base);
2472 	spin_unlock(&adapter->stats_lock);
2473 
2474 	ns->tx_bytes   = stats.tx_octets;
2475 	ns->tx_packets = stats.tx_frames;
2476 	ns->rx_bytes   = stats.rx_octets;
2477 	ns->rx_packets = stats.rx_frames;
2478 	ns->multicast  = stats.rx_mcast_frames;
2479 
2480 	/* detailed rx_errors */
2481 	ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2482 			       stats.rx_runt;
2483 	ns->rx_over_errors   = 0;
2484 	ns->rx_crc_errors    = stats.rx_fcs_err;
2485 	ns->rx_frame_errors  = stats.rx_symbol_err;
2486 	ns->rx_dropped	     = stats.rx_ovflow0 + stats.rx_ovflow1 +
2487 			       stats.rx_ovflow2 + stats.rx_ovflow3 +
2488 			       stats.rx_trunc0 + stats.rx_trunc1 +
2489 			       stats.rx_trunc2 + stats.rx_trunc3;
2490 	ns->rx_missed_errors = 0;
2491 
2492 	/* detailed tx_errors */
2493 	ns->tx_aborted_errors   = 0;
2494 	ns->tx_carrier_errors   = 0;
2495 	ns->tx_fifo_errors      = 0;
2496 	ns->tx_heartbeat_errors = 0;
2497 	ns->tx_window_errors    = 0;
2498 
2499 	ns->tx_errors = stats.tx_error_frames;
2500 	ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2501 		ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2502 }
2503 
2504 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2505 {
2506 	unsigned int mbox;
2507 	int ret = 0, prtad, devad;
2508 	struct port_info *pi = netdev_priv(dev);
2509 	struct adapter *adapter = pi->adapter;
2510 	struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2511 
2512 	switch (cmd) {
2513 	case SIOCGMIIPHY:
2514 		if (pi->mdio_addr < 0)
2515 			return -EOPNOTSUPP;
2516 		data->phy_id = pi->mdio_addr;
2517 		break;
2518 	case SIOCGMIIREG:
2519 	case SIOCSMIIREG:
2520 		if (mdio_phy_id_is_c45(data->phy_id)) {
2521 			prtad = mdio_phy_id_prtad(data->phy_id);
2522 			devad = mdio_phy_id_devad(data->phy_id);
2523 		} else if (data->phy_id < 32) {
2524 			prtad = data->phy_id;
2525 			devad = 0;
2526 			data->reg_num &= 0x1f;
2527 		} else
2528 			return -EINVAL;
2529 
2530 		mbox = pi->adapter->pf;
2531 		if (cmd == SIOCGMIIREG)
2532 			ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
2533 					 data->reg_num, &data->val_out);
2534 		else
2535 			ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
2536 					 data->reg_num, data->val_in);
2537 		break;
2538 	case SIOCGHWTSTAMP:
2539 		return copy_to_user(req->ifr_data, &pi->tstamp_config,
2540 				    sizeof(pi->tstamp_config)) ?
2541 			-EFAULT : 0;
2542 	case SIOCSHWTSTAMP:
2543 		if (copy_from_user(&pi->tstamp_config, req->ifr_data,
2544 				   sizeof(pi->tstamp_config)))
2545 			return -EFAULT;
2546 
2547 		if (!is_t4(adapter->params.chip)) {
2548 			switch (pi->tstamp_config.tx_type) {
2549 			case HWTSTAMP_TX_OFF:
2550 			case HWTSTAMP_TX_ON:
2551 				break;
2552 			default:
2553 				return -ERANGE;
2554 			}
2555 
2556 			switch (pi->tstamp_config.rx_filter) {
2557 			case HWTSTAMP_FILTER_NONE:
2558 				pi->rxtstamp = false;
2559 				break;
2560 			case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2561 			case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2562 				cxgb4_ptprx_timestamping(pi, pi->port_id,
2563 							 PTP_TS_L4);
2564 				break;
2565 			case HWTSTAMP_FILTER_PTP_V2_EVENT:
2566 				cxgb4_ptprx_timestamping(pi, pi->port_id,
2567 							 PTP_TS_L2_L4);
2568 				break;
2569 			case HWTSTAMP_FILTER_ALL:
2570 			case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2571 			case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2572 			case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2573 			case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2574 				pi->rxtstamp = true;
2575 				break;
2576 			default:
2577 				pi->tstamp_config.rx_filter =
2578 					HWTSTAMP_FILTER_NONE;
2579 				return -ERANGE;
2580 			}
2581 
2582 			if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) &&
2583 			    (pi->tstamp_config.rx_filter ==
2584 				HWTSTAMP_FILTER_NONE)) {
2585 				if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0)
2586 					pi->ptp_enable = false;
2587 			}
2588 
2589 			if (pi->tstamp_config.rx_filter !=
2590 				HWTSTAMP_FILTER_NONE) {
2591 				if (cxgb4_ptp_redirect_rx_packet(adapter,
2592 								 pi) >= 0)
2593 					pi->ptp_enable = true;
2594 			}
2595 		} else {
2596 			/* For T4 Adapters */
2597 			switch (pi->tstamp_config.rx_filter) {
2598 			case HWTSTAMP_FILTER_NONE:
2599 			pi->rxtstamp = false;
2600 			break;
2601 			case HWTSTAMP_FILTER_ALL:
2602 			pi->rxtstamp = true;
2603 			break;
2604 			default:
2605 			pi->tstamp_config.rx_filter =
2606 			HWTSTAMP_FILTER_NONE;
2607 			return -ERANGE;
2608 			}
2609 		}
2610 		return copy_to_user(req->ifr_data, &pi->tstamp_config,
2611 				    sizeof(pi->tstamp_config)) ?
2612 			-EFAULT : 0;
2613 	default:
2614 		return -EOPNOTSUPP;
2615 	}
2616 	return ret;
2617 }
2618 
2619 static void cxgb_set_rxmode(struct net_device *dev)
2620 {
2621 	/* unfortunately we can't return errors to the stack */
2622 	set_rxmode(dev, -1, false);
2623 }
2624 
2625 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2626 {
2627 	int ret;
2628 	struct port_info *pi = netdev_priv(dev);
2629 
2630 	ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
2631 			    -1, -1, -1, true);
2632 	if (!ret)
2633 		dev->mtu = new_mtu;
2634 	return ret;
2635 }
2636 
2637 #ifdef CONFIG_PCI_IOV
2638 static int cxgb4_mgmt_open(struct net_device *dev)
2639 {
2640 	/* Turn carrier off since we don't have to transmit anything on this
2641 	 * interface.
2642 	 */
2643 	netif_carrier_off(dev);
2644 	return 0;
2645 }
2646 
2647 /* Fill MAC address that will be assigned by the FW */
2648 static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
2649 {
2650 	u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
2651 	unsigned int i, vf, nvfs;
2652 	u16 a, b;
2653 	int err;
2654 	u8 *na;
2655 
2656 	adap->params.pci.vpd_cap_addr = pci_find_capability(adap->pdev,
2657 							    PCI_CAP_ID_VPD);
2658 	err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
2659 	if (err)
2660 		return;
2661 
2662 	na = adap->params.vpd.na;
2663 	for (i = 0; i < ETH_ALEN; i++)
2664 		hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
2665 			      hex2val(na[2 * i + 1]));
2666 
2667 	a = (hw_addr[0] << 8) | hw_addr[1];
2668 	b = (hw_addr[1] << 8) | hw_addr[2];
2669 	a ^= b;
2670 	a |= 0x0200;    /* locally assigned Ethernet MAC address */
2671 	a &= ~0x0100;   /* not a multicast Ethernet MAC address */
2672 	macaddr[0] = a >> 8;
2673 	macaddr[1] = a & 0xff;
2674 
2675 	for (i = 2; i < 5; i++)
2676 		macaddr[i] = hw_addr[i + 1];
2677 
2678 	for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev);
2679 		vf < nvfs; vf++) {
2680 		macaddr[5] = adap->pf * 16 + vf;
2681 		ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr);
2682 	}
2683 }
2684 
2685 static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
2686 {
2687 	struct port_info *pi = netdev_priv(dev);
2688 	struct adapter *adap = pi->adapter;
2689 	int ret;
2690 
2691 	/* verify MAC addr is valid */
2692 	if (!is_valid_ether_addr(mac)) {
2693 		dev_err(pi->adapter->pdev_dev,
2694 			"Invalid Ethernet address %pM for VF %d\n",
2695 			mac, vf);
2696 		return -EINVAL;
2697 	}
2698 
2699 	dev_info(pi->adapter->pdev_dev,
2700 		 "Setting MAC %pM on VF %d\n", mac, vf);
2701 	ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
2702 	if (!ret)
2703 		ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
2704 	return ret;
2705 }
2706 
2707 static int cxgb4_mgmt_get_vf_config(struct net_device *dev,
2708 				    int vf, struct ifla_vf_info *ivi)
2709 {
2710 	struct port_info *pi = netdev_priv(dev);
2711 	struct adapter *adap = pi->adapter;
2712 	struct vf_info *vfinfo;
2713 
2714 	if (vf >= adap->num_vfs)
2715 		return -EINVAL;
2716 	vfinfo = &adap->vfinfo[vf];
2717 
2718 	ivi->vf = vf;
2719 	ivi->max_tx_rate = vfinfo->tx_rate;
2720 	ivi->min_tx_rate = 0;
2721 	ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr);
2722 	ivi->vlan = vfinfo->vlan;
2723 	return 0;
2724 }
2725 
2726 static int cxgb4_mgmt_get_phys_port_id(struct net_device *dev,
2727 				       struct netdev_phys_item_id *ppid)
2728 {
2729 	struct port_info *pi = netdev_priv(dev);
2730 	unsigned int phy_port_id;
2731 
2732 	phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id;
2733 	ppid->id_len = sizeof(phy_port_id);
2734 	memcpy(ppid->id, &phy_port_id, ppid->id_len);
2735 	return 0;
2736 }
2737 
2738 static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
2739 				  int min_tx_rate, int max_tx_rate)
2740 {
2741 	struct port_info *pi = netdev_priv(dev);
2742 	struct adapter *adap = pi->adapter;
2743 	unsigned int link_ok, speed, mtu;
2744 	u32 fw_pfvf, fw_class;
2745 	int class_id = vf;
2746 	int ret;
2747 	u16 pktsize;
2748 
2749 	if (vf >= adap->num_vfs)
2750 		return -EINVAL;
2751 
2752 	if (min_tx_rate) {
2753 		dev_err(adap->pdev_dev,
2754 			"Min tx rate (%d) (> 0) for VF %d is Invalid.\n",
2755 			min_tx_rate, vf);
2756 		return -EINVAL;
2757 	}
2758 
2759 	if (max_tx_rate == 0) {
2760 		/* unbind VF to to any Traffic Class */
2761 		fw_pfvf =
2762 		    (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
2763 		     FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
2764 		fw_class = 0xffffffff;
2765 		ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
2766 				    &fw_pfvf, &fw_class);
2767 		if (ret) {
2768 			dev_err(adap->pdev_dev,
2769 				"Err %d in unbinding PF %d VF %d from TX Rate Limiting\n",
2770 				ret, adap->pf, vf);
2771 			return -EINVAL;
2772 		}
2773 		dev_info(adap->pdev_dev,
2774 			 "PF %d VF %d is unbound from TX Rate Limiting\n",
2775 			 adap->pf, vf);
2776 		adap->vfinfo[vf].tx_rate = 0;
2777 		return 0;
2778 	}
2779 
2780 	ret = t4_get_link_params(pi, &link_ok, &speed, &mtu);
2781 	if (ret != FW_SUCCESS) {
2782 		dev_err(adap->pdev_dev,
2783 			"Failed to get link information for VF %d\n", vf);
2784 		return -EINVAL;
2785 	}
2786 
2787 	if (!link_ok) {
2788 		dev_err(adap->pdev_dev, "Link down for VF %d\n", vf);
2789 		return -EINVAL;
2790 	}
2791 
2792 	if (max_tx_rate > speed) {
2793 		dev_err(adap->pdev_dev,
2794 			"Max tx rate %d for VF %d can't be > link-speed %u",
2795 			max_tx_rate, vf, speed);
2796 		return -EINVAL;
2797 	}
2798 
2799 	pktsize = mtu;
2800 	/* subtract ethhdr size and 4 bytes crc since, f/w appends it */
2801 	pktsize = pktsize - sizeof(struct ethhdr) - 4;
2802 	/* subtract ipv4 hdr size, tcp hdr size to get typical IPv4 MSS size */
2803 	pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr);
2804 	/* configure Traffic Class for rate-limiting */
2805 	ret = t4_sched_params(adap, SCHED_CLASS_TYPE_PACKET,
2806 			      SCHED_CLASS_LEVEL_CL_RL,
2807 			      SCHED_CLASS_MODE_CLASS,
2808 			      SCHED_CLASS_RATEUNIT_BITS,
2809 			      SCHED_CLASS_RATEMODE_ABS,
2810 			      pi->tx_chan, class_id, 0,
2811 			      max_tx_rate * 1000, 0, pktsize);
2812 	if (ret) {
2813 		dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n",
2814 			ret);
2815 		return -EINVAL;
2816 	}
2817 	dev_info(adap->pdev_dev,
2818 		 "Class %d with MSS %u configured with rate %u\n",
2819 		 class_id, pktsize, max_tx_rate);
2820 
2821 	/* bind VF to configured Traffic Class */
2822 	fw_pfvf = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
2823 		   FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
2824 	fw_class = class_id;
2825 	ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf,
2826 			    &fw_class);
2827 	if (ret) {
2828 		dev_err(adap->pdev_dev,
2829 			"Err %d in binding PF %d VF %d to Traffic Class %d\n",
2830 			ret, adap->pf, vf, class_id);
2831 		return -EINVAL;
2832 	}
2833 	dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n",
2834 		 adap->pf, vf, class_id);
2835 	adap->vfinfo[vf].tx_rate = max_tx_rate;
2836 	return 0;
2837 }
2838 
2839 static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf,
2840 				  u16 vlan, u8 qos, __be16 vlan_proto)
2841 {
2842 	struct port_info *pi = netdev_priv(dev);
2843 	struct adapter *adap = pi->adapter;
2844 	int ret;
2845 
2846 	if (vf >= adap->num_vfs || vlan > 4095 || qos > 7)
2847 		return -EINVAL;
2848 
2849 	if (vlan_proto != htons(ETH_P_8021Q) || qos != 0)
2850 		return -EPROTONOSUPPORT;
2851 
2852 	ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan);
2853 	if (!ret) {
2854 		adap->vfinfo[vf].vlan = vlan;
2855 		return 0;
2856 	}
2857 
2858 	dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n",
2859 		ret, (vlan ? "setting" : "clearing"), adap->pf, vf);
2860 	return ret;
2861 }
2862 #endif /* CONFIG_PCI_IOV */
2863 
2864 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2865 {
2866 	int ret;
2867 	struct sockaddr *addr = p;
2868 	struct port_info *pi = netdev_priv(dev);
2869 
2870 	if (!is_valid_ether_addr(addr->sa_data))
2871 		return -EADDRNOTAVAIL;
2872 
2873 	ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
2874 			    pi->xact_addr_filt, addr->sa_data, true, true);
2875 	if (ret < 0)
2876 		return ret;
2877 
2878 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2879 	pi->xact_addr_filt = ret;
2880 	return 0;
2881 }
2882 
2883 #ifdef CONFIG_NET_POLL_CONTROLLER
2884 static void cxgb_netpoll(struct net_device *dev)
2885 {
2886 	struct port_info *pi = netdev_priv(dev);
2887 	struct adapter *adap = pi->adapter;
2888 
2889 	if (adap->flags & USING_MSIX) {
2890 		int i;
2891 		struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
2892 
2893 		for (i = pi->nqsets; i; i--, rx++)
2894 			t4_sge_intr_msix(0, &rx->rspq);
2895 	} else
2896 		t4_intr_handler(adap)(0, adap);
2897 }
2898 #endif
2899 
2900 static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
2901 {
2902 	struct port_info *pi = netdev_priv(dev);
2903 	struct adapter *adap = pi->adapter;
2904 	struct sched_class *e;
2905 	struct ch_sched_params p;
2906 	struct ch_sched_queue qe;
2907 	u32 req_rate;
2908 	int err = 0;
2909 
2910 	if (!can_sched(dev))
2911 		return -ENOTSUPP;
2912 
2913 	if (index < 0 || index > pi->nqsets - 1)
2914 		return -EINVAL;
2915 
2916 	if (!(adap->flags & FULL_INIT_DONE)) {
2917 		dev_err(adap->pdev_dev,
2918 			"Failed to rate limit on queue %d. Link Down?\n",
2919 			index);
2920 		return -EINVAL;
2921 	}
2922 
2923 	/* Convert from Mbps to Kbps */
2924 	req_rate = rate * 1000;
2925 
2926 	/* Max rate is 100 Gbps */
2927 	if (req_rate > SCHED_MAX_RATE_KBPS) {
2928 		dev_err(adap->pdev_dev,
2929 			"Invalid rate %u Mbps, Max rate is %u Mbps\n",
2930 			rate, SCHED_MAX_RATE_KBPS / 1000);
2931 		return -ERANGE;
2932 	}
2933 
2934 	/* First unbind the queue from any existing class */
2935 	memset(&qe, 0, sizeof(qe));
2936 	qe.queue = index;
2937 	qe.class = SCHED_CLS_NONE;
2938 
2939 	err = cxgb4_sched_class_unbind(dev, (void *)(&qe), SCHED_QUEUE);
2940 	if (err) {
2941 		dev_err(adap->pdev_dev,
2942 			"Unbinding Queue %d on port %d fail. Err: %d\n",
2943 			index, pi->port_id, err);
2944 		return err;
2945 	}
2946 
2947 	/* Queue already unbound */
2948 	if (!req_rate)
2949 		return 0;
2950 
2951 	/* Fetch any available unused or matching scheduling class */
2952 	memset(&p, 0, sizeof(p));
2953 	p.type = SCHED_CLASS_TYPE_PACKET;
2954 	p.u.params.level    = SCHED_CLASS_LEVEL_CL_RL;
2955 	p.u.params.mode     = SCHED_CLASS_MODE_CLASS;
2956 	p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS;
2957 	p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS;
2958 	p.u.params.channel  = pi->tx_chan;
2959 	p.u.params.class    = SCHED_CLS_NONE;
2960 	p.u.params.minrate  = 0;
2961 	p.u.params.maxrate  = req_rate;
2962 	p.u.params.weight   = 0;
2963 	p.u.params.pktsize  = dev->mtu;
2964 
2965 	e = cxgb4_sched_class_alloc(dev, &p);
2966 	if (!e)
2967 		return -ENOMEM;
2968 
2969 	/* Bind the queue to a scheduling class */
2970 	memset(&qe, 0, sizeof(qe));
2971 	qe.queue = index;
2972 	qe.class = e->idx;
2973 
2974 	err = cxgb4_sched_class_bind(dev, (void *)(&qe), SCHED_QUEUE);
2975 	if (err)
2976 		dev_err(adap->pdev_dev,
2977 			"Queue rate limiting failed. Err: %d\n", err);
2978 	return err;
2979 }
2980 
2981 static int cxgb_setup_tc_flower(struct net_device *dev,
2982 				struct tc_cls_flower_offload *cls_flower)
2983 {
2984 	switch (cls_flower->command) {
2985 	case TC_CLSFLOWER_REPLACE:
2986 		return cxgb4_tc_flower_replace(dev, cls_flower);
2987 	case TC_CLSFLOWER_DESTROY:
2988 		return cxgb4_tc_flower_destroy(dev, cls_flower);
2989 	case TC_CLSFLOWER_STATS:
2990 		return cxgb4_tc_flower_stats(dev, cls_flower);
2991 	default:
2992 		return -EOPNOTSUPP;
2993 	}
2994 }
2995 
2996 static int cxgb_setup_tc_cls_u32(struct net_device *dev,
2997 				 struct tc_cls_u32_offload *cls_u32)
2998 {
2999 	switch (cls_u32->command) {
3000 	case TC_CLSU32_NEW_KNODE:
3001 	case TC_CLSU32_REPLACE_KNODE:
3002 		return cxgb4_config_knode(dev, cls_u32);
3003 	case TC_CLSU32_DELETE_KNODE:
3004 		return cxgb4_delete_knode(dev, cls_u32);
3005 	default:
3006 		return -EOPNOTSUPP;
3007 	}
3008 }
3009 
3010 static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3011 				  void *cb_priv)
3012 {
3013 	struct net_device *dev = cb_priv;
3014 	struct port_info *pi = netdev2pinfo(dev);
3015 	struct adapter *adap = netdev2adap(dev);
3016 
3017 	if (!(adap->flags & FULL_INIT_DONE)) {
3018 		dev_err(adap->pdev_dev,
3019 			"Failed to setup tc on port %d. Link Down?\n",
3020 			pi->port_id);
3021 		return -EINVAL;
3022 	}
3023 
3024 	if (!tc_cls_can_offload_and_chain0(dev, type_data))
3025 		return -EOPNOTSUPP;
3026 
3027 	switch (type) {
3028 	case TC_SETUP_CLSU32:
3029 		return cxgb_setup_tc_cls_u32(dev, type_data);
3030 	case TC_SETUP_CLSFLOWER:
3031 		return cxgb_setup_tc_flower(dev, type_data);
3032 	default:
3033 		return -EOPNOTSUPP;
3034 	}
3035 }
3036 
3037 static int cxgb_setup_tc_block(struct net_device *dev,
3038 			       struct tc_block_offload *f)
3039 {
3040 	struct port_info *pi = netdev2pinfo(dev);
3041 
3042 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3043 		return -EOPNOTSUPP;
3044 
3045 	switch (f->command) {
3046 	case TC_BLOCK_BIND:
3047 		return tcf_block_cb_register(f->block, cxgb_setup_tc_block_cb,
3048 					     pi, dev, f->extack);
3049 	case TC_BLOCK_UNBIND:
3050 		tcf_block_cb_unregister(f->block, cxgb_setup_tc_block_cb, pi);
3051 		return 0;
3052 	default:
3053 		return -EOPNOTSUPP;
3054 	}
3055 }
3056 
3057 static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
3058 			 void *type_data)
3059 {
3060 	switch (type) {
3061 	case TC_SETUP_BLOCK:
3062 		return cxgb_setup_tc_block(dev, type_data);
3063 	default:
3064 		return -EOPNOTSUPP;
3065 	}
3066 }
3067 
3068 static void cxgb_del_udp_tunnel(struct net_device *netdev,
3069 				struct udp_tunnel_info *ti)
3070 {
3071 	struct port_info *pi = netdev_priv(netdev);
3072 	struct adapter *adapter = pi->adapter;
3073 	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
3074 	u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
3075 	int ret = 0, i;
3076 
3077 	if (chip_ver < CHELSIO_T6)
3078 		return;
3079 
3080 	switch (ti->type) {
3081 	case UDP_TUNNEL_TYPE_VXLAN:
3082 		if (!adapter->vxlan_port_cnt ||
3083 		    adapter->vxlan_port != ti->port)
3084 			return; /* Invalid VxLAN destination port */
3085 
3086 		adapter->vxlan_port_cnt--;
3087 		if (adapter->vxlan_port_cnt)
3088 			return;
3089 
3090 		adapter->vxlan_port = 0;
3091 		t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0);
3092 		break;
3093 	case UDP_TUNNEL_TYPE_GENEVE:
3094 		if (!adapter->geneve_port_cnt ||
3095 		    adapter->geneve_port != ti->port)
3096 			return; /* Invalid GENEVE destination port */
3097 
3098 		adapter->geneve_port_cnt--;
3099 		if (adapter->geneve_port_cnt)
3100 			return;
3101 
3102 		adapter->geneve_port = 0;
3103 		t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
3104 		break;
3105 	default:
3106 		return;
3107 	}
3108 
3109 	/* Matchall mac entries can be deleted only after all tunnel ports
3110 	 * are brought down or removed.
3111 	 */
3112 	if (!adapter->rawf_cnt)
3113 		return;
3114 	for_each_port(adapter, i) {
3115 		pi = adap2pinfo(adapter, i);
3116 		ret = t4_free_raw_mac_filt(adapter, pi->viid,
3117 					   match_all_mac, match_all_mac,
3118 					   adapter->rawf_start +
3119 					    pi->port_id,
3120 					   1, pi->port_id, false);
3121 		if (ret < 0) {
3122 			netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
3123 				    i);
3124 			return;
3125 		}
3126 		atomic_dec(&adapter->mps_encap[adapter->rawf_start +
3127 			   pi->port_id].refcnt);
3128 	}
3129 }
3130 
3131 static void cxgb_add_udp_tunnel(struct net_device *netdev,
3132 				struct udp_tunnel_info *ti)
3133 {
3134 	struct port_info *pi = netdev_priv(netdev);
3135 	struct adapter *adapter = pi->adapter;
3136 	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
3137 	u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
3138 	int i, ret;
3139 
3140 	if (chip_ver < CHELSIO_T6 || !adapter->rawf_cnt)
3141 		return;
3142 
3143 	switch (ti->type) {
3144 	case UDP_TUNNEL_TYPE_VXLAN:
3145 		/* Callback for adding vxlan port can be called with the same
3146 		 * port for both IPv4 and IPv6. We should not disable the
3147 		 * offloading when the same port for both protocols is added
3148 		 * and later one of them is removed.
3149 		 */
3150 		if (adapter->vxlan_port_cnt &&
3151 		    adapter->vxlan_port == ti->port) {
3152 			adapter->vxlan_port_cnt++;
3153 			return;
3154 		}
3155 
3156 		/* We will support only one VxLAN port */
3157 		if (adapter->vxlan_port_cnt) {
3158 			netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
3159 				    be16_to_cpu(adapter->vxlan_port),
3160 				    be16_to_cpu(ti->port));
3161 			return;
3162 		}
3163 
3164 		adapter->vxlan_port = ti->port;
3165 		adapter->vxlan_port_cnt = 1;
3166 
3167 		t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A,
3168 			     VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
3169 		break;
3170 	case UDP_TUNNEL_TYPE_GENEVE:
3171 		if (adapter->geneve_port_cnt &&
3172 		    adapter->geneve_port == ti->port) {
3173 			adapter->geneve_port_cnt++;
3174 			return;
3175 		}
3176 
3177 		/* We will support only one GENEVE port */
3178 		if (adapter->geneve_port_cnt) {
3179 			netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
3180 				    be16_to_cpu(adapter->geneve_port),
3181 				    be16_to_cpu(ti->port));
3182 			return;
3183 		}
3184 
3185 		adapter->geneve_port = ti->port;
3186 		adapter->geneve_port_cnt = 1;
3187 
3188 		t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
3189 			     GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
3190 		break;
3191 	default:
3192 		return;
3193 	}
3194 
3195 	/* Create a 'match all' mac filter entry for inner mac,
3196 	 * if raw mac interface is supported. Once the linux kernel provides
3197 	 * driver entry points for adding/deleting the inner mac addresses,
3198 	 * we will remove this 'match all' entry and fallback to adding
3199 	 * exact match filters.
3200 	 */
3201 	for_each_port(adapter, i) {
3202 		pi = adap2pinfo(adapter, i);
3203 
3204 		ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
3205 					    match_all_mac,
3206 					    match_all_mac,
3207 					    adapter->rawf_start +
3208 					    pi->port_id,
3209 					    1, pi->port_id, false);
3210 		if (ret < 0) {
3211 			netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
3212 				    be16_to_cpu(ti->port));
3213 			cxgb_del_udp_tunnel(netdev, ti);
3214 			return;
3215 		}
3216 		atomic_inc(&adapter->mps_encap[ret].refcnt);
3217 	}
3218 }
3219 
3220 static netdev_features_t cxgb_features_check(struct sk_buff *skb,
3221 					     struct net_device *dev,
3222 					     netdev_features_t features)
3223 {
3224 	struct port_info *pi = netdev_priv(dev);
3225 	struct adapter *adapter = pi->adapter;
3226 
3227 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
3228 		return features;
3229 
3230 	/* Check if hw supports offload for this packet */
3231 	if (!skb->encapsulation || cxgb_encap_offload_supported(skb))
3232 		return features;
3233 
3234 	/* Offload is not supported for this encapsulated packet */
3235 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3236 }
3237 
3238 static netdev_features_t cxgb_fix_features(struct net_device *dev,
3239 					   netdev_features_t features)
3240 {
3241 	/* Disable GRO, if RX_CSUM is disabled */
3242 	if (!(features & NETIF_F_RXCSUM))
3243 		features &= ~NETIF_F_GRO;
3244 
3245 	return features;
3246 }
3247 
3248 static const struct net_device_ops cxgb4_netdev_ops = {
3249 	.ndo_open             = cxgb_open,
3250 	.ndo_stop             = cxgb_close,
3251 	.ndo_start_xmit       = t4_start_xmit,
3252 	.ndo_select_queue     =	cxgb_select_queue,
3253 	.ndo_get_stats64      = cxgb_get_stats,
3254 	.ndo_set_rx_mode      = cxgb_set_rxmode,
3255 	.ndo_set_mac_address  = cxgb_set_mac_addr,
3256 	.ndo_set_features     = cxgb_set_features,
3257 	.ndo_validate_addr    = eth_validate_addr,
3258 	.ndo_do_ioctl         = cxgb_ioctl,
3259 	.ndo_change_mtu       = cxgb_change_mtu,
3260 #ifdef CONFIG_NET_POLL_CONTROLLER
3261 	.ndo_poll_controller  = cxgb_netpoll,
3262 #endif
3263 #ifdef CONFIG_CHELSIO_T4_FCOE
3264 	.ndo_fcoe_enable      = cxgb_fcoe_enable,
3265 	.ndo_fcoe_disable     = cxgb_fcoe_disable,
3266 #endif /* CONFIG_CHELSIO_T4_FCOE */
3267 	.ndo_set_tx_maxrate   = cxgb_set_tx_maxrate,
3268 	.ndo_setup_tc         = cxgb_setup_tc,
3269 	.ndo_udp_tunnel_add   = cxgb_add_udp_tunnel,
3270 	.ndo_udp_tunnel_del   = cxgb_del_udp_tunnel,
3271 	.ndo_features_check   = cxgb_features_check,
3272 	.ndo_fix_features     = cxgb_fix_features,
3273 };
3274 
3275 #ifdef CONFIG_PCI_IOV
3276 static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
3277 	.ndo_open             = cxgb4_mgmt_open,
3278 	.ndo_set_vf_mac       = cxgb4_mgmt_set_vf_mac,
3279 	.ndo_get_vf_config    = cxgb4_mgmt_get_vf_config,
3280 	.ndo_set_vf_rate      = cxgb4_mgmt_set_vf_rate,
3281 	.ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
3282 	.ndo_set_vf_vlan      = cxgb4_mgmt_set_vf_vlan,
3283 };
3284 #endif
3285 
3286 static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
3287 				   struct ethtool_drvinfo *info)
3288 {
3289 	struct adapter *adapter = netdev2adap(dev);
3290 
3291 	strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
3292 	strlcpy(info->version, cxgb4_driver_version,
3293 		sizeof(info->version));
3294 	strlcpy(info->bus_info, pci_name(adapter->pdev),
3295 		sizeof(info->bus_info));
3296 }
3297 
3298 static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
3299 	.get_drvinfo       = cxgb4_mgmt_get_drvinfo,
3300 };
3301 
3302 static void notify_fatal_err(struct work_struct *work)
3303 {
3304 	struct adapter *adap;
3305 
3306 	adap = container_of(work, struct adapter, fatal_err_notify_task);
3307 	notify_ulds(adap, CXGB4_STATE_FATAL_ERROR);
3308 }
3309 
3310 void t4_fatal_err(struct adapter *adap)
3311 {
3312 	int port;
3313 
3314 	if (pci_channel_offline(adap->pdev))
3315 		return;
3316 
3317 	/* Disable the SGE since ULDs are going to free resources that
3318 	 * could be exposed to the adapter.  RDMA MWs for example...
3319 	 */
3320 	t4_shutdown_adapter(adap);
3321 	for_each_port(adap, port) {
3322 		struct net_device *dev = adap->port[port];
3323 
3324 		/* If we get here in very early initialization the network
3325 		 * devices may not have been set up yet.
3326 		 */
3327 		if (!dev)
3328 			continue;
3329 
3330 		netif_tx_stop_all_queues(dev);
3331 		netif_carrier_off(dev);
3332 	}
3333 	dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
3334 	queue_work(adap->workq, &adap->fatal_err_notify_task);
3335 }
3336 
3337 static void setup_memwin(struct adapter *adap)
3338 {
3339 	u32 nic_win_base = t4_get_util_window(adap);
3340 
3341 	t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
3342 }
3343 
3344 static void setup_memwin_rdma(struct adapter *adap)
3345 {
3346 	if (adap->vres.ocq.size) {
3347 		u32 start;
3348 		unsigned int sz_kb;
3349 
3350 		start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
3351 		start &= PCI_BASE_ADDRESS_MEM_MASK;
3352 		start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
3353 		sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
3354 		t4_write_reg(adap,
3355 			     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
3356 			     start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
3357 		t4_write_reg(adap,
3358 			     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
3359 			     adap->vres.ocq.start);
3360 		t4_read_reg(adap,
3361 			    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
3362 	}
3363 }
3364 
3365 /* HMA Definitions */
3366 
3367 /* The maximum number of address that can be send in a single FW cmd */
3368 #define HMA_MAX_ADDR_IN_CMD	5
3369 
3370 #define HMA_PAGE_SIZE		PAGE_SIZE
3371 
3372 #define HMA_MAX_NO_FW_ADDRESS	(16 << 10)  /* FW supports 16K addresses */
3373 
3374 #define HMA_PAGE_ORDER					\
3375 	((HMA_PAGE_SIZE < HMA_MAX_NO_FW_ADDRESS) ?	\
3376 	ilog2(HMA_MAX_NO_FW_ADDRESS / HMA_PAGE_SIZE) : 0)
3377 
3378 /* The minimum and maximum possible HMA sizes that can be specified in the FW
3379  * configuration(in units of MB).
3380  */
3381 #define HMA_MIN_TOTAL_SIZE	1
3382 #define HMA_MAX_TOTAL_SIZE				\
3383 	(((HMA_PAGE_SIZE << HMA_PAGE_ORDER) *		\
3384 	  HMA_MAX_NO_FW_ADDRESS) >> 20)
3385 
3386 static void adap_free_hma_mem(struct adapter *adapter)
3387 {
3388 	struct scatterlist *iter;
3389 	struct page *page;
3390 	int i;
3391 
3392 	if (!adapter->hma.sgt)
3393 		return;
3394 
3395 	if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) {
3396 		dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl,
3397 			     adapter->hma.sgt->nents, PCI_DMA_BIDIRECTIONAL);
3398 		adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG;
3399 	}
3400 
3401 	for_each_sg(adapter->hma.sgt->sgl, iter,
3402 		    adapter->hma.sgt->orig_nents, i) {
3403 		page = sg_page(iter);
3404 		if (page)
3405 			__free_pages(page, HMA_PAGE_ORDER);
3406 	}
3407 
3408 	kfree(adapter->hma.phy_addr);
3409 	sg_free_table(adapter->hma.sgt);
3410 	kfree(adapter->hma.sgt);
3411 	adapter->hma.sgt = NULL;
3412 }
3413 
3414 static int adap_config_hma(struct adapter *adapter)
3415 {
3416 	struct scatterlist *sgl, *iter;
3417 	struct sg_table *sgt;
3418 	struct page *newpage;
3419 	unsigned int i, j, k;
3420 	u32 param, hma_size;
3421 	unsigned int ncmds;
3422 	size_t page_size;
3423 	u32 page_order;
3424 	int node, ret;
3425 
3426 	/* HMA is supported only for T6+ cards.
3427 	 * Avoid initializing HMA in kdump kernels.
3428 	 */
3429 	if (is_kdump_kernel() ||
3430 	    CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
3431 		return 0;
3432 
3433 	/* Get the HMA region size required by fw */
3434 	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3435 		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HMA_SIZE));
3436 	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3437 			      1, &param, &hma_size);
3438 	/* An error means card has its own memory or HMA is not supported by
3439 	 * the firmware. Return without any errors.
3440 	 */
3441 	if (ret || !hma_size)
3442 		return 0;
3443 
3444 	if (hma_size < HMA_MIN_TOTAL_SIZE ||
3445 	    hma_size > HMA_MAX_TOTAL_SIZE) {
3446 		dev_err(adapter->pdev_dev,
3447 			"HMA size %uMB beyond bounds(%u-%lu)MB\n",
3448 			hma_size, HMA_MIN_TOTAL_SIZE, HMA_MAX_TOTAL_SIZE);
3449 		return -EINVAL;
3450 	}
3451 
3452 	page_size = HMA_PAGE_SIZE;
3453 	page_order = HMA_PAGE_ORDER;
3454 	adapter->hma.sgt = kzalloc(sizeof(*adapter->hma.sgt), GFP_KERNEL);
3455 	if (unlikely(!adapter->hma.sgt)) {
3456 		dev_err(adapter->pdev_dev, "HMA SG table allocation failed\n");
3457 		return -ENOMEM;
3458 	}
3459 	sgt = adapter->hma.sgt;
3460 	/* FW returned value will be in MB's
3461 	 */
3462 	sgt->orig_nents = (hma_size << 20) / (page_size << page_order);
3463 	if (sg_alloc_table(sgt, sgt->orig_nents, GFP_KERNEL)) {
3464 		dev_err(adapter->pdev_dev, "HMA SGL allocation failed\n");
3465 		kfree(adapter->hma.sgt);
3466 		adapter->hma.sgt = NULL;
3467 		return -ENOMEM;
3468 	}
3469 
3470 	sgl = adapter->hma.sgt->sgl;
3471 	node = dev_to_node(adapter->pdev_dev);
3472 	for_each_sg(sgl, iter, sgt->orig_nents, i) {
3473 		newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL |
3474 					   __GFP_ZERO, page_order);
3475 		if (!newpage) {
3476 			dev_err(adapter->pdev_dev,
3477 				"Not enough memory for HMA page allocation\n");
3478 			ret = -ENOMEM;
3479 			goto free_hma;
3480 		}
3481 		sg_set_page(iter, newpage, page_size << page_order, 0);
3482 	}
3483 
3484 	sgt->nents = dma_map_sg(adapter->pdev_dev, sgl, sgt->orig_nents,
3485 				DMA_BIDIRECTIONAL);
3486 	if (!sgt->nents) {
3487 		dev_err(adapter->pdev_dev,
3488 			"Not enough memory for HMA DMA mapping");
3489 		ret = -ENOMEM;
3490 		goto free_hma;
3491 	}
3492 	adapter->hma.flags |= HMA_DMA_MAPPED_FLAG;
3493 
3494 	adapter->hma.phy_addr = kcalloc(sgt->nents, sizeof(dma_addr_t),
3495 					GFP_KERNEL);
3496 	if (unlikely(!adapter->hma.phy_addr))
3497 		goto free_hma;
3498 
3499 	for_each_sg(sgl, iter, sgt->nents, i) {
3500 		newpage = sg_page(iter);
3501 		adapter->hma.phy_addr[i] = sg_dma_address(iter);
3502 	}
3503 
3504 	ncmds = DIV_ROUND_UP(sgt->nents, HMA_MAX_ADDR_IN_CMD);
3505 	/* Pass on the addresses to firmware */
3506 	for (i = 0, k = 0; i < ncmds; i++, k += HMA_MAX_ADDR_IN_CMD) {
3507 		struct fw_hma_cmd hma_cmd;
3508 		u8 naddr = HMA_MAX_ADDR_IN_CMD;
3509 		u8 soc = 0, eoc = 0;
3510 		u8 hma_mode = 1; /* Presently we support only Page table mode */
3511 
3512 		soc = (i == 0) ? 1 : 0;
3513 		eoc = (i == ncmds - 1) ? 1 : 0;
3514 
3515 		/* For last cmd, set naddr corresponding to remaining
3516 		 * addresses
3517 		 */
3518 		if (i == ncmds - 1) {
3519 			naddr = sgt->nents % HMA_MAX_ADDR_IN_CMD;
3520 			naddr = naddr ? naddr : HMA_MAX_ADDR_IN_CMD;
3521 		}
3522 		memset(&hma_cmd, 0, sizeof(hma_cmd));
3523 		hma_cmd.op_pkd = htonl(FW_CMD_OP_V(FW_HMA_CMD) |
3524 				       FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3525 		hma_cmd.retval_len16 = htonl(FW_LEN16(hma_cmd));
3526 
3527 		hma_cmd.mode_to_pcie_params =
3528 			htonl(FW_HMA_CMD_MODE_V(hma_mode) |
3529 			      FW_HMA_CMD_SOC_V(soc) | FW_HMA_CMD_EOC_V(eoc));
3530 
3531 		/* HMA cmd size specified in MB's */
3532 		hma_cmd.naddr_size =
3533 			htonl(FW_HMA_CMD_SIZE_V(hma_size) |
3534 			      FW_HMA_CMD_NADDR_V(naddr));
3535 
3536 		/* Total Page size specified in units of 4K */
3537 		hma_cmd.addr_size_pkd =
3538 			htonl(FW_HMA_CMD_ADDR_SIZE_V
3539 				((page_size << page_order) >> 12));
3540 
3541 		/* Fill the 5 addresses */
3542 		for (j = 0; j < naddr; j++) {
3543 			hma_cmd.phy_address[j] =
3544 				cpu_to_be64(adapter->hma.phy_addr[j + k]);
3545 		}
3546 		ret = t4_wr_mbox(adapter, adapter->mbox, &hma_cmd,
3547 				 sizeof(hma_cmd), &hma_cmd);
3548 		if (ret) {
3549 			dev_err(adapter->pdev_dev,
3550 				"HMA FW command failed with err %d\n", ret);
3551 			goto free_hma;
3552 		}
3553 	}
3554 
3555 	if (!ret)
3556 		dev_info(adapter->pdev_dev,
3557 			 "Reserved %uMB host memory for HMA\n", hma_size);
3558 	return ret;
3559 
3560 free_hma:
3561 	adap_free_hma_mem(adapter);
3562 	return ret;
3563 }
3564 
3565 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
3566 {
3567 	u32 v;
3568 	int ret;
3569 
3570 	/* Now that we've successfully configured and initialized the adapter
3571 	 * can ask the Firmware what resources it has provisioned for us.
3572 	 */
3573 	ret = t4_get_pfres(adap);
3574 	if (ret) {
3575 		dev_err(adap->pdev_dev,
3576 			"Unable to retrieve resource provisioning information\n");
3577 		return ret;
3578 	}
3579 
3580 	/* get device capabilities */
3581 	memset(c, 0, sizeof(*c));
3582 	c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3583 			       FW_CMD_REQUEST_F | FW_CMD_READ_F);
3584 	c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
3585 	ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
3586 	if (ret < 0)
3587 		return ret;
3588 
3589 	c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3590 			       FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3591 	ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
3592 	if (ret < 0)
3593 		return ret;
3594 
3595 	ret = t4_config_glbl_rss(adap, adap->pf,
3596 				 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
3597 				 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
3598 				 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
3599 	if (ret < 0)
3600 		return ret;
3601 
3602 	ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
3603 			  MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
3604 			  FW_CMD_CAP_PF);
3605 	if (ret < 0)
3606 		return ret;
3607 
3608 	t4_sge_init(adap);
3609 
3610 	/* tweak some settings */
3611 	t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
3612 	t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
3613 	t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
3614 	v = t4_read_reg(adap, TP_PIO_DATA_A);
3615 	t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
3616 
3617 	/* first 4 Tx modulation queues point to consecutive Tx channels */
3618 	adap->params.tp.tx_modq_map = 0xE4;
3619 	t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
3620 		     TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
3621 
3622 	/* associate each Tx modulation queue with consecutive Tx channels */
3623 	v = 0x84218421;
3624 	t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3625 			  &v, 1, TP_TX_SCHED_HDR_A);
3626 	t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3627 			  &v, 1, TP_TX_SCHED_FIFO_A);
3628 	t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3629 			  &v, 1, TP_TX_SCHED_PCMD_A);
3630 
3631 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
3632 	if (is_offload(adap)) {
3633 		t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
3634 			     TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3635 			     TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3636 			     TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3637 			     TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3638 		t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
3639 			     TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3640 			     TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3641 			     TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3642 			     TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3643 	}
3644 
3645 	/* get basic stuff going */
3646 	return t4_early_init(adap, adap->pf);
3647 }
3648 
3649 /*
3650  * Max # of ATIDs.  The absolute HW max is 16K but we keep it lower.
3651  */
3652 #define MAX_ATIDS 8192U
3653 
3654 /*
3655  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3656  *
3657  * If the firmware we're dealing with has Configuration File support, then
3658  * we use that to perform all configuration
3659  */
3660 
3661 /*
3662  * Tweak configuration based on module parameters, etc.  Most of these have
3663  * defaults assigned to them by Firmware Configuration Files (if we're using
3664  * them) but need to be explicitly set if we're using hard-coded
3665  * initialization.  But even in the case of using Firmware Configuration
3666  * Files, we'd like to expose the ability to change these via module
3667  * parameters so these are essentially common tweaks/settings for
3668  * Configuration Files and hard-coded initialization ...
3669  */
3670 static int adap_init0_tweaks(struct adapter *adapter)
3671 {
3672 	/*
3673 	 * Fix up various Host-Dependent Parameters like Page Size, Cache
3674 	 * Line Size, etc.  The firmware default is for a 4KB Page Size and
3675 	 * 64B Cache Line Size ...
3676 	 */
3677 	t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
3678 
3679 	/*
3680 	 * Process module parameters which affect early initialization.
3681 	 */
3682 	if (rx_dma_offset != 2 && rx_dma_offset != 0) {
3683 		dev_err(&adapter->pdev->dev,
3684 			"Ignoring illegal rx_dma_offset=%d, using 2\n",
3685 			rx_dma_offset);
3686 		rx_dma_offset = 2;
3687 	}
3688 	t4_set_reg_field(adapter, SGE_CONTROL_A,
3689 			 PKTSHIFT_V(PKTSHIFT_M),
3690 			 PKTSHIFT_V(rx_dma_offset));
3691 
3692 	/*
3693 	 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
3694 	 * adds the pseudo header itself.
3695 	 */
3696 	t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
3697 			       CSUM_HAS_PSEUDO_HDR_F, 0);
3698 
3699 	return 0;
3700 }
3701 
3702 /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
3703  * unto themselves and they contain their own firmware to perform their
3704  * tasks ...
3705  */
3706 static int phy_aq1202_version(const u8 *phy_fw_data,
3707 			      size_t phy_fw_size)
3708 {
3709 	int offset;
3710 
3711 	/* At offset 0x8 you're looking for the primary image's
3712 	 * starting offset which is 3 Bytes wide
3713 	 *
3714 	 * At offset 0xa of the primary image, you look for the offset
3715 	 * of the DRAM segment which is 3 Bytes wide.
3716 	 *
3717 	 * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
3718 	 * wide
3719 	 */
3720 	#define be16(__p) (((__p)[0] << 8) | (__p)[1])
3721 	#define le16(__p) ((__p)[0] | ((__p)[1] << 8))
3722 	#define le24(__p) (le16(__p) | ((__p)[2] << 16))
3723 
3724 	offset = le24(phy_fw_data + 0x8) << 12;
3725 	offset = le24(phy_fw_data + offset + 0xa);
3726 	return be16(phy_fw_data + offset + 0x27e);
3727 
3728 	#undef be16
3729 	#undef le16
3730 	#undef le24
3731 }
3732 
3733 static struct info_10gbt_phy_fw {
3734 	unsigned int phy_fw_id;		/* PCI Device ID */
3735 	char *phy_fw_file;		/* /lib/firmware/ PHY Firmware file */
3736 	int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
3737 	int phy_flash;			/* Has FLASH for PHY Firmware */
3738 } phy_info_array[] = {
3739 	{
3740 		PHY_AQ1202_DEVICEID,
3741 		PHY_AQ1202_FIRMWARE,
3742 		phy_aq1202_version,
3743 		1,
3744 	},
3745 	{
3746 		PHY_BCM84834_DEVICEID,
3747 		PHY_BCM84834_FIRMWARE,
3748 		NULL,
3749 		0,
3750 	},
3751 	{ 0, NULL, NULL },
3752 };
3753 
3754 static struct info_10gbt_phy_fw *find_phy_info(int devid)
3755 {
3756 	int i;
3757 
3758 	for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
3759 		if (phy_info_array[i].phy_fw_id == devid)
3760 			return &phy_info_array[i];
3761 	}
3762 	return NULL;
3763 }
3764 
3765 /* Handle updating of chip-external 10Gb/s-BT PHY firmware.  This needs to
3766  * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD.  On error
3767  * we return a negative error number.  If we transfer new firmware we return 1
3768  * (from t4_load_phy_fw()).  If we don't do anything we return 0.
3769  */
3770 static int adap_init0_phy(struct adapter *adap)
3771 {
3772 	const struct firmware *phyf;
3773 	int ret;
3774 	struct info_10gbt_phy_fw *phy_info;
3775 
3776 	/* Use the device ID to determine which PHY file to flash.
3777 	 */
3778 	phy_info = find_phy_info(adap->pdev->device);
3779 	if (!phy_info) {
3780 		dev_warn(adap->pdev_dev,
3781 			 "No PHY Firmware file found for this PHY\n");
3782 		return -EOPNOTSUPP;
3783 	}
3784 
3785 	/* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
3786 	 * use that. The adapter firmware provides us with a memory buffer
3787 	 * where we can load a PHY firmware file from the host if we want to
3788 	 * override the PHY firmware File in flash.
3789 	 */
3790 	ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
3791 				      adap->pdev_dev);
3792 	if (ret < 0) {
3793 		/* For adapters without FLASH attached to PHY for their
3794 		 * firmware, it's obviously a fatal error if we can't get the
3795 		 * firmware to the adapter.  For adapters with PHY firmware
3796 		 * FLASH storage, it's worth a warning if we can't find the
3797 		 * PHY Firmware but we'll neuter the error ...
3798 		 */
3799 		dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
3800 			"/lib/firmware/%s, error %d\n",
3801 			phy_info->phy_fw_file, -ret);
3802 		if (phy_info->phy_flash) {
3803 			int cur_phy_fw_ver = 0;
3804 
3805 			t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3806 			dev_warn(adap->pdev_dev, "continuing with, on-adapter "
3807 				 "FLASH copy, version %#x\n", cur_phy_fw_ver);
3808 			ret = 0;
3809 		}
3810 
3811 		return ret;
3812 	}
3813 
3814 	/* Load PHY Firmware onto adapter.
3815 	 */
3816 	ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock,
3817 			     phy_info->phy_fw_version,
3818 			     (u8 *)phyf->data, phyf->size);
3819 	if (ret < 0)
3820 		dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
3821 			-ret);
3822 	else if (ret > 0) {
3823 		int new_phy_fw_ver = 0;
3824 
3825 		if (phy_info->phy_fw_version)
3826 			new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
3827 								  phyf->size);
3828 		dev_info(adap->pdev_dev, "Successfully transferred PHY "
3829 			 "Firmware /lib/firmware/%s, version %#x\n",
3830 			 phy_info->phy_fw_file, new_phy_fw_ver);
3831 	}
3832 
3833 	release_firmware(phyf);
3834 
3835 	return ret;
3836 }
3837 
3838 /*
3839  * Attempt to initialize the adapter via a Firmware Configuration File.
3840  */
3841 static int adap_init0_config(struct adapter *adapter, int reset)
3842 {
3843 	struct fw_caps_config_cmd caps_cmd;
3844 	const struct firmware *cf;
3845 	unsigned long mtype = 0, maddr = 0;
3846 	u32 finiver, finicsum, cfcsum;
3847 	int ret;
3848 	int config_issued = 0;
3849 	char *fw_config_file, fw_config_file_path[256];
3850 	char *config_name = NULL;
3851 
3852 	/*
3853 	 * Reset device if necessary.
3854 	 */
3855 	if (reset) {
3856 		ret = t4_fw_reset(adapter, adapter->mbox,
3857 				  PIORSTMODE_F | PIORST_F);
3858 		if (ret < 0)
3859 			goto bye;
3860 	}
3861 
3862 	/* If this is a 10Gb/s-BT adapter make sure the chip-external
3863 	 * 10Gb/s-BT PHYs have up-to-date firmware.  Note that this step needs
3864 	 * to be performed after any global adapter RESET above since some
3865 	 * PHYs only have local RAM copies of the PHY firmware.
3866 	 */
3867 	if (is_10gbt_device(adapter->pdev->device)) {
3868 		ret = adap_init0_phy(adapter);
3869 		if (ret < 0)
3870 			goto bye;
3871 	}
3872 	/*
3873 	 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
3874 	 * then use that.  Otherwise, use the configuration file stored
3875 	 * in the adapter flash ...
3876 	 */
3877 	switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
3878 	case CHELSIO_T4:
3879 		fw_config_file = FW4_CFNAME;
3880 		break;
3881 	case CHELSIO_T5:
3882 		fw_config_file = FW5_CFNAME;
3883 		break;
3884 	case CHELSIO_T6:
3885 		fw_config_file = FW6_CFNAME;
3886 		break;
3887 	default:
3888 		dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3889 		       adapter->pdev->device);
3890 		ret = -EINVAL;
3891 		goto bye;
3892 	}
3893 
3894 	ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
3895 	if (ret < 0) {
3896 		config_name = "On FLASH";
3897 		mtype = FW_MEMTYPE_CF_FLASH;
3898 		maddr = t4_flash_cfg_addr(adapter);
3899 	} else {
3900 		u32 params[7], val[7];
3901 
3902 		sprintf(fw_config_file_path,
3903 			"/lib/firmware/%s", fw_config_file);
3904 		config_name = fw_config_file_path;
3905 
3906 		if (cf->size >= FLASH_CFG_MAX_SIZE)
3907 			ret = -ENOMEM;
3908 		else {
3909 			params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3910 			     FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
3911 			ret = t4_query_params(adapter, adapter->mbox,
3912 					      adapter->pf, 0, 1, params, val);
3913 			if (ret == 0) {
3914 				/*
3915 				 * For t4_memory_rw() below addresses and
3916 				 * sizes have to be in terms of multiples of 4
3917 				 * bytes.  So, if the Configuration File isn't
3918 				 * a multiple of 4 bytes in length we'll have
3919 				 * to write that out separately since we can't
3920 				 * guarantee that the bytes following the
3921 				 * residual byte in the buffer returned by
3922 				 * request_firmware() are zeroed out ...
3923 				 */
3924 				size_t resid = cf->size & 0x3;
3925 				size_t size = cf->size & ~0x3;
3926 				__be32 *data = (__be32 *)cf->data;
3927 
3928 				mtype = FW_PARAMS_PARAM_Y_G(val[0]);
3929 				maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
3930 
3931 				spin_lock(&adapter->win0_lock);
3932 				ret = t4_memory_rw(adapter, 0, mtype, maddr,
3933 						   size, data, T4_MEMORY_WRITE);
3934 				if (ret == 0 && resid != 0) {
3935 					union {
3936 						__be32 word;
3937 						char buf[4];
3938 					} last;
3939 					int i;
3940 
3941 					last.word = data[size >> 2];
3942 					for (i = resid; i < 4; i++)
3943 						last.buf[i] = 0;
3944 					ret = t4_memory_rw(adapter, 0, mtype,
3945 							   maddr + size,
3946 							   4, &last.word,
3947 							   T4_MEMORY_WRITE);
3948 				}
3949 				spin_unlock(&adapter->win0_lock);
3950 			}
3951 		}
3952 
3953 		release_firmware(cf);
3954 		if (ret)
3955 			goto bye;
3956 	}
3957 
3958 	/*
3959 	 * Issue a Capability Configuration command to the firmware to get it
3960 	 * to parse the Configuration File.  We don't use t4_fw_config_file()
3961 	 * because we want the ability to modify various features after we've
3962 	 * processed the configuration file ...
3963 	 */
3964 	memset(&caps_cmd, 0, sizeof(caps_cmd));
3965 	caps_cmd.op_to_write =
3966 		htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3967 		      FW_CMD_REQUEST_F |
3968 		      FW_CMD_READ_F);
3969 	caps_cmd.cfvalid_to_len16 =
3970 		htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
3971 		      FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
3972 		      FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
3973 		      FW_LEN16(caps_cmd));
3974 	ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3975 			 &caps_cmd);
3976 
3977 	/* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
3978 	 * Configuration File in FLASH), our last gasp effort is to use the
3979 	 * Firmware Configuration File which is embedded in the firmware.  A
3980 	 * very few early versions of the firmware didn't have one embedded
3981 	 * but we can ignore those.
3982 	 */
3983 	if (ret == -ENOENT) {
3984 		memset(&caps_cmd, 0, sizeof(caps_cmd));
3985 		caps_cmd.op_to_write =
3986 			htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3987 					FW_CMD_REQUEST_F |
3988 					FW_CMD_READ_F);
3989 		caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3990 		ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
3991 				sizeof(caps_cmd), &caps_cmd);
3992 		config_name = "Firmware Default";
3993 	}
3994 
3995 	config_issued = 1;
3996 	if (ret < 0)
3997 		goto bye;
3998 
3999 	finiver = ntohl(caps_cmd.finiver);
4000 	finicsum = ntohl(caps_cmd.finicsum);
4001 	cfcsum = ntohl(caps_cmd.cfcsum);
4002 	if (finicsum != cfcsum)
4003 		dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4004 			 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4005 			 finicsum, cfcsum);
4006 
4007 	/*
4008 	 * And now tell the firmware to use the configuration we just loaded.
4009 	 */
4010 	caps_cmd.op_to_write =
4011 		htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4012 		      FW_CMD_REQUEST_F |
4013 		      FW_CMD_WRITE_F);
4014 	caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4015 	ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4016 			 NULL);
4017 	if (ret < 0)
4018 		goto bye;
4019 
4020 	/*
4021 	 * Tweak configuration based on system architecture, module
4022 	 * parameters, etc.
4023 	 */
4024 	ret = adap_init0_tweaks(adapter);
4025 	if (ret < 0)
4026 		goto bye;
4027 
4028 	/* We will proceed even if HMA init fails. */
4029 	ret = adap_config_hma(adapter);
4030 	if (ret)
4031 		dev_err(adapter->pdev_dev,
4032 			"HMA configuration failed with error %d\n", ret);
4033 
4034 	/*
4035 	 * And finally tell the firmware to initialize itself using the
4036 	 * parameters from the Configuration File.
4037 	 */
4038 	ret = t4_fw_initialize(adapter, adapter->mbox);
4039 	if (ret < 0)
4040 		goto bye;
4041 
4042 	/* Emit Firmware Configuration File information and return
4043 	 * successfully.
4044 	 */
4045 	dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4046 		 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
4047 		 config_name, finiver, cfcsum);
4048 	return 0;
4049 
4050 	/*
4051 	 * Something bad happened.  Return the error ...  (If the "error"
4052 	 * is that there's no Configuration File on the adapter we don't
4053 	 * want to issue a warning since this is fairly common.)
4054 	 */
4055 bye:
4056 	if (config_issued && ret != -ENOENT)
4057 		dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
4058 			 config_name, -ret);
4059 	return ret;
4060 }
4061 
4062 static struct fw_info fw_info_array[] = {
4063 	{
4064 		.chip = CHELSIO_T4,
4065 		.fs_name = FW4_CFNAME,
4066 		.fw_mod_name = FW4_FNAME,
4067 		.fw_hdr = {
4068 			.chip = FW_HDR_CHIP_T4,
4069 			.fw_ver = __cpu_to_be32(FW_VERSION(T4)),
4070 			.intfver_nic = FW_INTFVER(T4, NIC),
4071 			.intfver_vnic = FW_INTFVER(T4, VNIC),
4072 			.intfver_ri = FW_INTFVER(T4, RI),
4073 			.intfver_iscsi = FW_INTFVER(T4, ISCSI),
4074 			.intfver_fcoe = FW_INTFVER(T4, FCOE),
4075 		},
4076 	}, {
4077 		.chip = CHELSIO_T5,
4078 		.fs_name = FW5_CFNAME,
4079 		.fw_mod_name = FW5_FNAME,
4080 		.fw_hdr = {
4081 			.chip = FW_HDR_CHIP_T5,
4082 			.fw_ver = __cpu_to_be32(FW_VERSION(T5)),
4083 			.intfver_nic = FW_INTFVER(T5, NIC),
4084 			.intfver_vnic = FW_INTFVER(T5, VNIC),
4085 			.intfver_ri = FW_INTFVER(T5, RI),
4086 			.intfver_iscsi = FW_INTFVER(T5, ISCSI),
4087 			.intfver_fcoe = FW_INTFVER(T5, FCOE),
4088 		},
4089 	}, {
4090 		.chip = CHELSIO_T6,
4091 		.fs_name = FW6_CFNAME,
4092 		.fw_mod_name = FW6_FNAME,
4093 		.fw_hdr = {
4094 			.chip = FW_HDR_CHIP_T6,
4095 			.fw_ver = __cpu_to_be32(FW_VERSION(T6)),
4096 			.intfver_nic = FW_INTFVER(T6, NIC),
4097 			.intfver_vnic = FW_INTFVER(T6, VNIC),
4098 			.intfver_ofld = FW_INTFVER(T6, OFLD),
4099 			.intfver_ri = FW_INTFVER(T6, RI),
4100 			.intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
4101 			.intfver_iscsi = FW_INTFVER(T6, ISCSI),
4102 			.intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
4103 			.intfver_fcoe = FW_INTFVER(T6, FCOE),
4104 		},
4105 	}
4106 
4107 };
4108 
4109 static struct fw_info *find_fw_info(int chip)
4110 {
4111 	int i;
4112 
4113 	for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
4114 		if (fw_info_array[i].chip == chip)
4115 			return &fw_info_array[i];
4116 	}
4117 	return NULL;
4118 }
4119 
4120 /*
4121  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4122  */
4123 static int adap_init0(struct adapter *adap)
4124 {
4125 	int ret;
4126 	u32 v, port_vec;
4127 	enum dev_state state;
4128 	u32 params[7], val[7];
4129 	struct fw_caps_config_cmd caps_cmd;
4130 	int reset = 1;
4131 
4132 	/* Grab Firmware Device Log parameters as early as possible so we have
4133 	 * access to it for debugging, etc.
4134 	 */
4135 	ret = t4_init_devlog_params(adap);
4136 	if (ret < 0)
4137 		return ret;
4138 
4139 	/* Contact FW, advertising Master capability */
4140 	ret = t4_fw_hello(adap, adap->mbox, adap->mbox,
4141 			  is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state);
4142 	if (ret < 0) {
4143 		dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
4144 			ret);
4145 		return ret;
4146 	}
4147 	if (ret == adap->mbox)
4148 		adap->flags |= MASTER_PF;
4149 
4150 	/*
4151 	 * If we're the Master PF Driver and the device is uninitialized,
4152 	 * then let's consider upgrading the firmware ...  (We always want
4153 	 * to check the firmware version number in order to A. get it for
4154 	 * later reporting and B. to warn if the currently loaded firmware
4155 	 * is excessively mismatched relative to the driver.)
4156 	 */
4157 
4158 	t4_get_version_info(adap);
4159 	ret = t4_check_fw_version(adap);
4160 	/* If firmware is too old (not supported by driver) force an update. */
4161 	if (ret)
4162 		state = DEV_STATE_UNINIT;
4163 	if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
4164 		struct fw_info *fw_info;
4165 		struct fw_hdr *card_fw;
4166 		const struct firmware *fw;
4167 		const u8 *fw_data = NULL;
4168 		unsigned int fw_size = 0;
4169 
4170 		/* This is the firmware whose headers the driver was compiled
4171 		 * against
4172 		 */
4173 		fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
4174 		if (fw_info == NULL) {
4175 			dev_err(adap->pdev_dev,
4176 				"unable to get firmware info for chip %d.\n",
4177 				CHELSIO_CHIP_VERSION(adap->params.chip));
4178 			return -EINVAL;
4179 		}
4180 
4181 		/* allocate memory to read the header of the firmware on the
4182 		 * card
4183 		 */
4184 		card_fw = kvzalloc(sizeof(*card_fw), GFP_KERNEL);
4185 		if (!card_fw) {
4186 			ret = -ENOMEM;
4187 			goto bye;
4188 		}
4189 
4190 		/* Get FW from from /lib/firmware/ */
4191 		ret = request_firmware(&fw, fw_info->fw_mod_name,
4192 				       adap->pdev_dev);
4193 		if (ret < 0) {
4194 			dev_err(adap->pdev_dev,
4195 				"unable to load firmware image %s, error %d\n",
4196 				fw_info->fw_mod_name, ret);
4197 		} else {
4198 			fw_data = fw->data;
4199 			fw_size = fw->size;
4200 		}
4201 
4202 		/* upgrade FW logic */
4203 		ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
4204 				 state, &reset);
4205 
4206 		/* Cleaning up */
4207 		release_firmware(fw);
4208 		kvfree(card_fw);
4209 
4210 		if (ret < 0)
4211 			goto bye;
4212 	}
4213 
4214 	/* If the firmware is initialized already, emit a simply note to that
4215 	 * effect. Otherwise, it's time to try initializing the adapter.
4216 	 */
4217 	if (state == DEV_STATE_INIT) {
4218 		ret = adap_config_hma(adap);
4219 		if (ret)
4220 			dev_err(adap->pdev_dev,
4221 				"HMA configuration failed with error %d\n",
4222 				ret);
4223 		dev_info(adap->pdev_dev, "Coming up as %s: "\
4224 			 "Adapter already initialized\n",
4225 			 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
4226 	} else {
4227 		dev_info(adap->pdev_dev, "Coming up as MASTER: "\
4228 			 "Initializing adapter\n");
4229 
4230 		/* Find out whether we're dealing with a version of the
4231 		 * firmware which has configuration file support.
4232 		 */
4233 		params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4234 			     FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
4235 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
4236 				      params, val);
4237 
4238 		/* If the firmware doesn't support Configuration Files,
4239 		 * return an error.
4240 		 */
4241 		if (ret < 0) {
4242 			dev_err(adap->pdev_dev, "firmware doesn't support "
4243 				"Firmware Configuration Files\n");
4244 			goto bye;
4245 		}
4246 
4247 		/* The firmware provides us with a memory buffer where we can
4248 		 * load a Configuration File from the host if we want to
4249 		 * override the Configuration File in flash.
4250 		 */
4251 		ret = adap_init0_config(adap, reset);
4252 		if (ret == -ENOENT) {
4253 			dev_err(adap->pdev_dev, "no Configuration File "
4254 				"present on adapter.\n");
4255 			goto bye;
4256 		}
4257 		if (ret < 0) {
4258 			dev_err(adap->pdev_dev, "could not initialize "
4259 				"adapter, error %d\n", -ret);
4260 			goto bye;
4261 		}
4262 	}
4263 
4264 	/* Now that we've successfully configured and initialized the adapter
4265 	 * (or found it already initialized), we can ask the Firmware what
4266 	 * resources it has provisioned for us.
4267 	 */
4268 	ret = t4_get_pfres(adap);
4269 	if (ret) {
4270 		dev_err(adap->pdev_dev,
4271 			"Unable to retrieve resource provisioning information\n");
4272 		goto bye;
4273 	}
4274 
4275 	/* Grab VPD parameters.  This should be done after we establish a
4276 	 * connection to the firmware since some of the VPD parameters
4277 	 * (notably the Core Clock frequency) are retrieved via requests to
4278 	 * the firmware.  On the other hand, we need these fairly early on
4279 	 * so we do this right after getting ahold of the firmware.
4280 	 *
4281 	 * We need to do this after initializing the adapter because someone
4282 	 * could have FLASHed a new VPD which won't be read by the firmware
4283 	 * until we do the RESET ...
4284 	 */
4285 	ret = t4_get_vpd_params(adap, &adap->params.vpd);
4286 	if (ret < 0)
4287 		goto bye;
4288 
4289 	/* Find out what ports are available to us.  Note that we need to do
4290 	 * this before calling adap_init0_no_config() since it needs nports
4291 	 * and portvec ...
4292 	 */
4293 	v =
4294 	    FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4295 	    FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
4296 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
4297 	if (ret < 0)
4298 		goto bye;
4299 
4300 	adap->params.nports = hweight32(port_vec);
4301 	adap->params.portvec = port_vec;
4302 
4303 	/* Give the SGE code a chance to pull in anything that it needs ...
4304 	 * Note that this must be called after we retrieve our VPD parameters
4305 	 * in order to know how to convert core ticks to seconds, etc.
4306 	 */
4307 	ret = t4_sge_init(adap);
4308 	if (ret < 0)
4309 		goto bye;
4310 
4311 	if (is_bypass_device(adap->pdev->device))
4312 		adap->params.bypass = 1;
4313 
4314 	/*
4315 	 * Grab some of our basic fundamental operating parameters.
4316 	 */
4317 #define FW_PARAM_DEV(param) \
4318 	(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
4319 	FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
4320 
4321 #define FW_PARAM_PFVF(param) \
4322 	FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
4323 	FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)|  \
4324 	FW_PARAMS_PARAM_Y_V(0) | \
4325 	FW_PARAMS_PARAM_Z_V(0)
4326 
4327 	params[0] = FW_PARAM_PFVF(EQ_START);
4328 	params[1] = FW_PARAM_PFVF(L2T_START);
4329 	params[2] = FW_PARAM_PFVF(L2T_END);
4330 	params[3] = FW_PARAM_PFVF(FILTER_START);
4331 	params[4] = FW_PARAM_PFVF(FILTER_END);
4332 	params[5] = FW_PARAM_PFVF(IQFLINT_START);
4333 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
4334 	if (ret < 0)
4335 		goto bye;
4336 	adap->sge.egr_start = val[0];
4337 	adap->l2t_start = val[1];
4338 	adap->l2t_end = val[2];
4339 	adap->tids.ftid_base = val[3];
4340 	adap->tids.nftids = val[4] - val[3] + 1;
4341 	adap->sge.ingr_start = val[5];
4342 
4343 	if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
4344 		/* Read the raw mps entries. In T6, the last 2 tcam entries
4345 		 * are reserved for raw mac addresses (rawf = 2, one per port).
4346 		 */
4347 		params[0] = FW_PARAM_PFVF(RAWF_START);
4348 		params[1] = FW_PARAM_PFVF(RAWF_END);
4349 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4350 				      params, val);
4351 		if (ret == 0) {
4352 			adap->rawf_start = val[0];
4353 			adap->rawf_cnt = val[1] - val[0] + 1;
4354 		}
4355 	}
4356 
4357 	/* qids (ingress/egress) returned from firmware can be anywhere
4358 	 * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
4359 	 * Hence driver needs to allocate memory for this range to
4360 	 * store the queue info. Get the highest IQFLINT/EQ index returned
4361 	 * in FW_EQ_*_CMD.alloc command.
4362 	 */
4363 	params[0] = FW_PARAM_PFVF(EQ_END);
4364 	params[1] = FW_PARAM_PFVF(IQFLINT_END);
4365 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
4366 	if (ret < 0)
4367 		goto bye;
4368 	adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
4369 	adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
4370 
4371 	adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
4372 				    sizeof(*adap->sge.egr_map), GFP_KERNEL);
4373 	if (!adap->sge.egr_map) {
4374 		ret = -ENOMEM;
4375 		goto bye;
4376 	}
4377 
4378 	adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
4379 				     sizeof(*adap->sge.ingr_map), GFP_KERNEL);
4380 	if (!adap->sge.ingr_map) {
4381 		ret = -ENOMEM;
4382 		goto bye;
4383 	}
4384 
4385 	/* Allocate the memory for the vaious egress queue bitmaps
4386 	 * ie starving_fl, txq_maperr and blocked_fl.
4387 	 */
4388 	adap->sge.starving_fl =	kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
4389 					sizeof(long), GFP_KERNEL);
4390 	if (!adap->sge.starving_fl) {
4391 		ret = -ENOMEM;
4392 		goto bye;
4393 	}
4394 
4395 	adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
4396 				       sizeof(long), GFP_KERNEL);
4397 	if (!adap->sge.txq_maperr) {
4398 		ret = -ENOMEM;
4399 		goto bye;
4400 	}
4401 
4402 #ifdef CONFIG_DEBUG_FS
4403 	adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
4404 				       sizeof(long), GFP_KERNEL);
4405 	if (!adap->sge.blocked_fl) {
4406 		ret = -ENOMEM;
4407 		goto bye;
4408 	}
4409 #endif
4410 
4411 	params[0] = FW_PARAM_PFVF(CLIP_START);
4412 	params[1] = FW_PARAM_PFVF(CLIP_END);
4413 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
4414 	if (ret < 0)
4415 		goto bye;
4416 	adap->clipt_start = val[0];
4417 	adap->clipt_end = val[1];
4418 
4419 	/* We don't yet have a PARAMs calls to retrieve the number of Traffic
4420 	 * Classes supported by the hardware/firmware so we hard code it here
4421 	 * for now.
4422 	 */
4423 	adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
4424 
4425 	/* query params related to active filter region */
4426 	params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
4427 	params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
4428 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
4429 	/* If Active filter size is set we enable establishing
4430 	 * offload connection through firmware work request
4431 	 */
4432 	if ((val[0] != val[1]) && (ret >= 0)) {
4433 		adap->flags |= FW_OFLD_CONN;
4434 		adap->tids.aftid_base = val[0];
4435 		adap->tids.aftid_end = val[1];
4436 	}
4437 
4438 	/* If we're running on newer firmware, let it know that we're
4439 	 * prepared to deal with encapsulated CPL messages.  Older
4440 	 * firmware won't understand this and we'll just get
4441 	 * unencapsulated messages ...
4442 	 */
4443 	params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
4444 	val[0] = 1;
4445 	(void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
4446 
4447 	/*
4448 	 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
4449 	 * capability.  Earlier versions of the firmware didn't have the
4450 	 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
4451 	 * permission to use ULPTX MEMWRITE DSGL.
4452 	 */
4453 	if (is_t4(adap->params.chip)) {
4454 		adap->params.ulptx_memwrite_dsgl = false;
4455 	} else {
4456 		params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
4457 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4458 				      1, params, val);
4459 		adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
4460 	}
4461 
4462 	/* See if FW supports FW_RI_FR_NSMR_TPTE_WR work request */
4463 	params[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
4464 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4465 			      1, params, val);
4466 	adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0);
4467 
4468 	/* See if FW supports FW_FILTER2 work request */
4469 	if (is_t4(adap->params.chip)) {
4470 		adap->params.filter2_wr_support = 0;
4471 	} else {
4472 		params[0] = FW_PARAM_DEV(FILTER2_WR);
4473 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4474 				      1, params, val);
4475 		adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
4476 	}
4477 
4478 	/*
4479 	 * Get device capabilities so we can determine what resources we need
4480 	 * to manage.
4481 	 */
4482 	memset(&caps_cmd, 0, sizeof(caps_cmd));
4483 	caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4484 				     FW_CMD_REQUEST_F | FW_CMD_READ_F);
4485 	caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4486 	ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
4487 			 &caps_cmd);
4488 	if (ret < 0)
4489 		goto bye;
4490 
4491 	if (caps_cmd.ofldcaps ||
4492 	    (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER))) {
4493 		/* query offload-related parameters */
4494 		params[0] = FW_PARAM_DEV(NTID);
4495 		params[1] = FW_PARAM_PFVF(SERVER_START);
4496 		params[2] = FW_PARAM_PFVF(SERVER_END);
4497 		params[3] = FW_PARAM_PFVF(TDDP_START);
4498 		params[4] = FW_PARAM_PFVF(TDDP_END);
4499 		params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
4500 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
4501 				      params, val);
4502 		if (ret < 0)
4503 			goto bye;
4504 		adap->tids.ntids = val[0];
4505 		adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
4506 		adap->tids.stid_base = val[1];
4507 		adap->tids.nstids = val[2] - val[1] + 1;
4508 		/*
4509 		 * Setup server filter region. Divide the available filter
4510 		 * region into two parts. Regular filters get 1/3rd and server
4511 		 * filters get 2/3rd part. This is only enabled if workarond
4512 		 * path is enabled.
4513 		 * 1. For regular filters.
4514 		 * 2. Server filter: This are special filters which are used
4515 		 * to redirect SYN packets to offload queue.
4516 		 */
4517 		if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
4518 			adap->tids.sftid_base = adap->tids.ftid_base +
4519 					DIV_ROUND_UP(adap->tids.nftids, 3);
4520 			adap->tids.nsftids = adap->tids.nftids -
4521 					 DIV_ROUND_UP(adap->tids.nftids, 3);
4522 			adap->tids.nftids = adap->tids.sftid_base -
4523 						adap->tids.ftid_base;
4524 		}
4525 		adap->vres.ddp.start = val[3];
4526 		adap->vres.ddp.size = val[4] - val[3] + 1;
4527 		adap->params.ofldq_wr_cred = val[5];
4528 
4529 		if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
4530 			ret = init_hash_filter(adap);
4531 			if (ret < 0)
4532 				goto bye;
4533 		} else {
4534 			adap->params.offload = 1;
4535 			adap->num_ofld_uld += 1;
4536 		}
4537 	}
4538 	if (caps_cmd.rdmacaps) {
4539 		params[0] = FW_PARAM_PFVF(STAG_START);
4540 		params[1] = FW_PARAM_PFVF(STAG_END);
4541 		params[2] = FW_PARAM_PFVF(RQ_START);
4542 		params[3] = FW_PARAM_PFVF(RQ_END);
4543 		params[4] = FW_PARAM_PFVF(PBL_START);
4544 		params[5] = FW_PARAM_PFVF(PBL_END);
4545 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
4546 				      params, val);
4547 		if (ret < 0)
4548 			goto bye;
4549 		adap->vres.stag.start = val[0];
4550 		adap->vres.stag.size = val[1] - val[0] + 1;
4551 		adap->vres.rq.start = val[2];
4552 		adap->vres.rq.size = val[3] - val[2] + 1;
4553 		adap->vres.pbl.start = val[4];
4554 		adap->vres.pbl.size = val[5] - val[4] + 1;
4555 
4556 		params[0] = FW_PARAM_PFVF(SRQ_START);
4557 		params[1] = FW_PARAM_PFVF(SRQ_END);
4558 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4559 				      params, val);
4560 		if (!ret) {
4561 			adap->vres.srq.start = val[0];
4562 			adap->vres.srq.size = val[1] - val[0] + 1;
4563 		}
4564 		if (adap->vres.srq.size) {
4565 			adap->srq = t4_init_srq(adap->vres.srq.size);
4566 			if (!adap->srq)
4567 				dev_warn(&adap->pdev->dev, "could not allocate SRQ, continuing\n");
4568 		}
4569 
4570 		params[0] = FW_PARAM_PFVF(SQRQ_START);
4571 		params[1] = FW_PARAM_PFVF(SQRQ_END);
4572 		params[2] = FW_PARAM_PFVF(CQ_START);
4573 		params[3] = FW_PARAM_PFVF(CQ_END);
4574 		params[4] = FW_PARAM_PFVF(OCQ_START);
4575 		params[5] = FW_PARAM_PFVF(OCQ_END);
4576 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
4577 				      val);
4578 		if (ret < 0)
4579 			goto bye;
4580 		adap->vres.qp.start = val[0];
4581 		adap->vres.qp.size = val[1] - val[0] + 1;
4582 		adap->vres.cq.start = val[2];
4583 		adap->vres.cq.size = val[3] - val[2] + 1;
4584 		adap->vres.ocq.start = val[4];
4585 		adap->vres.ocq.size = val[5] - val[4] + 1;
4586 
4587 		params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
4588 		params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
4589 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
4590 				      val);
4591 		if (ret < 0) {
4592 			adap->params.max_ordird_qp = 8;
4593 			adap->params.max_ird_adapter = 32 * adap->tids.ntids;
4594 			ret = 0;
4595 		} else {
4596 			adap->params.max_ordird_qp = val[0];
4597 			adap->params.max_ird_adapter = val[1];
4598 		}
4599 		dev_info(adap->pdev_dev,
4600 			 "max_ordird_qp %d max_ird_adapter %d\n",
4601 			 adap->params.max_ordird_qp,
4602 			 adap->params.max_ird_adapter);
4603 
4604 		/* Enable write_with_immediate if FW supports it */
4605 		params[0] = FW_PARAM_DEV(RDMA_WRITE_WITH_IMM);
4606 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
4607 				      val);
4608 		adap->params.write_w_imm_support = (ret == 0 && val[0] != 0);
4609 
4610 		/* Enable write_cmpl if FW supports it */
4611 		params[0] = FW_PARAM_DEV(RI_WRITE_CMPL_WR);
4612 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
4613 				      val);
4614 		adap->params.write_cmpl_support = (ret == 0 && val[0] != 0);
4615 		adap->num_ofld_uld += 2;
4616 	}
4617 	if (caps_cmd.iscsicaps) {
4618 		params[0] = FW_PARAM_PFVF(ISCSI_START);
4619 		params[1] = FW_PARAM_PFVF(ISCSI_END);
4620 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4621 				      params, val);
4622 		if (ret < 0)
4623 			goto bye;
4624 		adap->vres.iscsi.start = val[0];
4625 		adap->vres.iscsi.size = val[1] - val[0] + 1;
4626 		/* LIO target and cxgb4i initiaitor */
4627 		adap->num_ofld_uld += 2;
4628 	}
4629 	if (caps_cmd.cryptocaps) {
4630 		if (ntohs(caps_cmd.cryptocaps) &
4631 		    FW_CAPS_CONFIG_CRYPTO_LOOKASIDE) {
4632 			params[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE);
4633 			ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4634 					      2, params, val);
4635 			if (ret < 0) {
4636 				if (ret != -EINVAL)
4637 					goto bye;
4638 			} else {
4639 				adap->vres.ncrypto_fc = val[0];
4640 			}
4641 			adap->num_ofld_uld += 1;
4642 		}
4643 		if (ntohs(caps_cmd.cryptocaps) &
4644 		    FW_CAPS_CONFIG_TLS_INLINE) {
4645 			params[0] = FW_PARAM_PFVF(TLS_START);
4646 			params[1] = FW_PARAM_PFVF(TLS_END);
4647 			ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4648 					      2, params, val);
4649 			if (ret < 0)
4650 				goto bye;
4651 			adap->vres.key.start = val[0];
4652 			adap->vres.key.size = val[1] - val[0] + 1;
4653 			adap->num_uld += 1;
4654 		}
4655 		adap->params.crypto = ntohs(caps_cmd.cryptocaps);
4656 	}
4657 #undef FW_PARAM_PFVF
4658 #undef FW_PARAM_DEV
4659 
4660 	/* The MTU/MSS Table is initialized by now, so load their values.  If
4661 	 * we're initializing the adapter, then we'll make any modifications
4662 	 * we want to the MTU/MSS Table and also initialize the congestion
4663 	 * parameters.
4664 	 */
4665 	t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
4666 	if (state != DEV_STATE_INIT) {
4667 		int i;
4668 
4669 		/* The default MTU Table contains values 1492 and 1500.
4670 		 * However, for TCP, it's better to have two values which are
4671 		 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
4672 		 * This allows us to have a TCP Data Payload which is a
4673 		 * multiple of 8 regardless of what combination of TCP Options
4674 		 * are in use (always a multiple of 4 bytes) which is
4675 		 * important for performance reasons.  For instance, if no
4676 		 * options are in use, then we have a 20-byte IP header and a
4677 		 * 20-byte TCP header.  In this case, a 1500-byte MSS would
4678 		 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
4679 		 * which is not a multiple of 8.  So using an MSS of 1488 in
4680 		 * this case results in a TCP Data Payload of 1448 bytes which
4681 		 * is a multiple of 8.  On the other hand, if 12-byte TCP Time
4682 		 * Stamps have been negotiated, then an MTU of 1500 bytes
4683 		 * results in a TCP Data Payload of 1448 bytes which, as
4684 		 * above, is a multiple of 8 bytes ...
4685 		 */
4686 		for (i = 0; i < NMTUS; i++)
4687 			if (adap->params.mtus[i] == 1492) {
4688 				adap->params.mtus[i] = 1488;
4689 				break;
4690 			}
4691 
4692 		t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4693 			     adap->params.b_wnd);
4694 	}
4695 	t4_init_sge_params(adap);
4696 	adap->flags |= FW_OK;
4697 	t4_init_tp_params(adap, true);
4698 	return 0;
4699 
4700 	/*
4701 	 * Something bad happened.  If a command timed out or failed with EIO
4702 	 * FW does not operate within its spec or something catastrophic
4703 	 * happened to HW/FW, stop issuing commands.
4704 	 */
4705 bye:
4706 	adap_free_hma_mem(adap);
4707 	kfree(adap->sge.egr_map);
4708 	kfree(adap->sge.ingr_map);
4709 	kfree(adap->sge.starving_fl);
4710 	kfree(adap->sge.txq_maperr);
4711 #ifdef CONFIG_DEBUG_FS
4712 	kfree(adap->sge.blocked_fl);
4713 #endif
4714 	if (ret != -ETIMEDOUT && ret != -EIO)
4715 		t4_fw_bye(adap, adap->mbox);
4716 	return ret;
4717 }
4718 
4719 /* EEH callbacks */
4720 
4721 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
4722 					 pci_channel_state_t state)
4723 {
4724 	int i;
4725 	struct adapter *adap = pci_get_drvdata(pdev);
4726 
4727 	if (!adap)
4728 		goto out;
4729 
4730 	rtnl_lock();
4731 	adap->flags &= ~FW_OK;
4732 	notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
4733 	spin_lock(&adap->stats_lock);
4734 	for_each_port(adap, i) {
4735 		struct net_device *dev = adap->port[i];
4736 		if (dev) {
4737 			netif_device_detach(dev);
4738 			netif_carrier_off(dev);
4739 		}
4740 	}
4741 	spin_unlock(&adap->stats_lock);
4742 	disable_interrupts(adap);
4743 	if (adap->flags & FULL_INIT_DONE)
4744 		cxgb_down(adap);
4745 	rtnl_unlock();
4746 	if ((adap->flags & DEV_ENABLED)) {
4747 		pci_disable_device(pdev);
4748 		adap->flags &= ~DEV_ENABLED;
4749 	}
4750 out:	return state == pci_channel_io_perm_failure ?
4751 		PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
4752 }
4753 
4754 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
4755 {
4756 	int i, ret;
4757 	struct fw_caps_config_cmd c;
4758 	struct adapter *adap = pci_get_drvdata(pdev);
4759 
4760 	if (!adap) {
4761 		pci_restore_state(pdev);
4762 		pci_save_state(pdev);
4763 		return PCI_ERS_RESULT_RECOVERED;
4764 	}
4765 
4766 	if (!(adap->flags & DEV_ENABLED)) {
4767 		if (pci_enable_device(pdev)) {
4768 			dev_err(&pdev->dev, "Cannot reenable PCI "
4769 					    "device after reset\n");
4770 			return PCI_ERS_RESULT_DISCONNECT;
4771 		}
4772 		adap->flags |= DEV_ENABLED;
4773 	}
4774 
4775 	pci_set_master(pdev);
4776 	pci_restore_state(pdev);
4777 	pci_save_state(pdev);
4778 
4779 	if (t4_wait_dev_ready(adap->regs) < 0)
4780 		return PCI_ERS_RESULT_DISCONNECT;
4781 	if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
4782 		return PCI_ERS_RESULT_DISCONNECT;
4783 	adap->flags |= FW_OK;
4784 	if (adap_init1(adap, &c))
4785 		return PCI_ERS_RESULT_DISCONNECT;
4786 
4787 	for_each_port(adap, i) {
4788 		struct port_info *p = adap2pinfo(adap, i);
4789 
4790 		ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
4791 				  NULL, NULL);
4792 		if (ret < 0)
4793 			return PCI_ERS_RESULT_DISCONNECT;
4794 		p->viid = ret;
4795 		p->xact_addr_filt = -1;
4796 	}
4797 
4798 	t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4799 		     adap->params.b_wnd);
4800 	setup_memwin(adap);
4801 	if (cxgb_up(adap))
4802 		return PCI_ERS_RESULT_DISCONNECT;
4803 	return PCI_ERS_RESULT_RECOVERED;
4804 }
4805 
4806 static void eeh_resume(struct pci_dev *pdev)
4807 {
4808 	int i;
4809 	struct adapter *adap = pci_get_drvdata(pdev);
4810 
4811 	if (!adap)
4812 		return;
4813 
4814 	rtnl_lock();
4815 	for_each_port(adap, i) {
4816 		struct net_device *dev = adap->port[i];
4817 		if (dev) {
4818 			if (netif_running(dev)) {
4819 				link_start(dev);
4820 				cxgb_set_rxmode(dev);
4821 			}
4822 			netif_device_attach(dev);
4823 		}
4824 	}
4825 	rtnl_unlock();
4826 }
4827 
4828 static const struct pci_error_handlers cxgb4_eeh = {
4829 	.error_detected = eeh_err_detected,
4830 	.slot_reset     = eeh_slot_reset,
4831 	.resume         = eeh_resume,
4832 };
4833 
4834 /* Return true if the Link Configuration supports "High Speeds" (those greater
4835  * than 1Gb/s).
4836  */
4837 static inline bool is_x_10g_port(const struct link_config *lc)
4838 {
4839 	unsigned int speeds, high_speeds;
4840 
4841 	speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps));
4842 	high_speeds = speeds &
4843 			~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G);
4844 
4845 	return high_speeds != 0;
4846 }
4847 
4848 /*
4849  * Perform default configuration of DMA queues depending on the number and type
4850  * of ports we found and the number of available CPUs.  Most settings can be
4851  * modified by the admin prior to actual use.
4852  */
4853 static int cfg_queues(struct adapter *adap)
4854 {
4855 	struct sge *s = &adap->sge;
4856 	int i, n10g = 0, qidx = 0;
4857 	int niqflint, neq, avail_eth_qsets;
4858 	int max_eth_qsets = 32;
4859 #ifndef CONFIG_CHELSIO_T4_DCB
4860 	int q10g = 0;
4861 #endif
4862 
4863 	/* Reduce memory usage in kdump environment, disable all offload.
4864 	 */
4865 	if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
4866 		adap->params.offload = 0;
4867 		adap->params.crypto = 0;
4868 	}
4869 
4870 	/* Calculate the number of Ethernet Queue Sets available based on
4871 	 * resources provisioned for us.  We always have an Asynchronous
4872 	 * Firmware Event Ingress Queue.  If we're operating in MSI or Legacy
4873 	 * IRQ Pin Interrupt mode, then we'll also have a Forwarded Interrupt
4874 	 * Ingress Queue.  Meanwhile, we need two Egress Queues for each
4875 	 * Queue Set: one for the Free List and one for the Ethernet TX Queue.
4876 	 *
4877 	 * Note that we should also take into account all of the various
4878 	 * Offload Queues.  But, in any situation where we're operating in
4879 	 * a Resource Constrained Provisioning environment, doing any Offload
4880 	 * at all is problematic ...
4881 	 */
4882 	niqflint = adap->params.pfres.niqflint - 1;
4883 	if (!(adap->flags & USING_MSIX))
4884 		niqflint--;
4885 	neq = adap->params.pfres.neq / 2;
4886 	avail_eth_qsets = min(niqflint, neq);
4887 
4888 	if (avail_eth_qsets > max_eth_qsets)
4889 		avail_eth_qsets = max_eth_qsets;
4890 
4891 	if (avail_eth_qsets < adap->params.nports) {
4892 		dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n",
4893 			avail_eth_qsets, adap->params.nports);
4894 		return -ENOMEM;
4895 	}
4896 
4897 	/* Count the number of 10Gb/s or better ports */
4898 	for_each_port(adap, i)
4899 		n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
4900 
4901 #ifdef CONFIG_CHELSIO_T4_DCB
4902 	/* For Data Center Bridging support we need to be able to support up
4903 	 * to 8 Traffic Priorities; each of which will be assigned to its
4904 	 * own TX Queue in order to prevent Head-Of-Line Blocking.
4905 	 */
4906 	if (adap->params.nports * 8 > avail_eth_qsets) {
4907 		dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n",
4908 			avail_eth_qsets, adap->params.nports * 8);
4909 		return -ENOMEM;
4910 	}
4911 
4912 	for_each_port(adap, i) {
4913 		struct port_info *pi = adap2pinfo(adap, i);
4914 
4915 		pi->first_qset = qidx;
4916 		pi->nqsets = is_kdump_kernel() ? 1 : 8;
4917 		qidx += pi->nqsets;
4918 	}
4919 #else /* !CONFIG_CHELSIO_T4_DCB */
4920 	/*
4921 	 * We default to 1 queue per non-10G port and up to # of cores queues
4922 	 * per 10G port.
4923 	 */
4924 	if (n10g)
4925 		q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
4926 	if (q10g > netif_get_num_default_rss_queues())
4927 		q10g = netif_get_num_default_rss_queues();
4928 
4929 	if (is_kdump_kernel())
4930 		q10g = 1;
4931 
4932 	for_each_port(adap, i) {
4933 		struct port_info *pi = adap2pinfo(adap, i);
4934 
4935 		pi->first_qset = qidx;
4936 		pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
4937 		qidx += pi->nqsets;
4938 	}
4939 #endif /* !CONFIG_CHELSIO_T4_DCB */
4940 
4941 	s->ethqsets = qidx;
4942 	s->max_ethqsets = qidx;   /* MSI-X may lower it later */
4943 
4944 	if (is_uld(adap)) {
4945 		/*
4946 		 * For offload we use 1 queue/channel if all ports are up to 1G,
4947 		 * otherwise we divide all available queues amongst the channels
4948 		 * capped by the number of available cores.
4949 		 */
4950 		if (n10g) {
4951 			i = min_t(int, MAX_OFLD_QSETS, num_online_cpus());
4952 			s->ofldqsets = roundup(i, adap->params.nports);
4953 		} else {
4954 			s->ofldqsets = adap->params.nports;
4955 		}
4956 	}
4957 
4958 	for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
4959 		struct sge_eth_rxq *r = &s->ethrxq[i];
4960 
4961 		init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
4962 		r->fl.size = 72;
4963 	}
4964 
4965 	for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
4966 		s->ethtxq[i].q.size = 1024;
4967 
4968 	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
4969 		s->ctrlq[i].q.size = 512;
4970 
4971 	if (!is_t4(adap->params.chip))
4972 		s->ptptxq.q.size = 8;
4973 
4974 	init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
4975 	init_rspq(adap, &s->intrq, 0, 1, 512, 64);
4976 
4977 	return 0;
4978 }
4979 
4980 /*
4981  * Reduce the number of Ethernet queues across all ports to at most n.
4982  * n provides at least one queue per port.
4983  */
4984 static void reduce_ethqs(struct adapter *adap, int n)
4985 {
4986 	int i;
4987 	struct port_info *pi;
4988 
4989 	while (n < adap->sge.ethqsets)
4990 		for_each_port(adap, i) {
4991 			pi = adap2pinfo(adap, i);
4992 			if (pi->nqsets > 1) {
4993 				pi->nqsets--;
4994 				adap->sge.ethqsets--;
4995 				if (adap->sge.ethqsets <= n)
4996 					break;
4997 			}
4998 		}
4999 
5000 	n = 0;
5001 	for_each_port(adap, i) {
5002 		pi = adap2pinfo(adap, i);
5003 		pi->first_qset = n;
5004 		n += pi->nqsets;
5005 	}
5006 }
5007 
5008 static int get_msix_info(struct adapter *adap)
5009 {
5010 	struct uld_msix_info *msix_info;
5011 	unsigned int max_ingq = 0;
5012 
5013 	if (is_offload(adap))
5014 		max_ingq += MAX_OFLD_QSETS * adap->num_ofld_uld;
5015 	if (is_pci_uld(adap))
5016 		max_ingq += MAX_OFLD_QSETS * adap->num_uld;
5017 
5018 	if (!max_ingq)
5019 		goto out;
5020 
5021 	msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL);
5022 	if (!msix_info)
5023 		return -ENOMEM;
5024 
5025 	adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq),
5026 						 sizeof(long), GFP_KERNEL);
5027 	if (!adap->msix_bmap_ulds.msix_bmap) {
5028 		kfree(msix_info);
5029 		return -ENOMEM;
5030 	}
5031 	spin_lock_init(&adap->msix_bmap_ulds.lock);
5032 	adap->msix_info_ulds = msix_info;
5033 out:
5034 	return 0;
5035 }
5036 
5037 static void free_msix_info(struct adapter *adap)
5038 {
5039 	if (!(adap->num_uld && adap->num_ofld_uld))
5040 		return;
5041 
5042 	kfree(adap->msix_info_ulds);
5043 	kfree(adap->msix_bmap_ulds.msix_bmap);
5044 }
5045 
5046 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5047 #define EXTRA_VECS 2
5048 
5049 static int enable_msix(struct adapter *adap)
5050 {
5051 	int ofld_need = 0, uld_need = 0;
5052 	int i, j, want, need, allocated;
5053 	struct sge *s = &adap->sge;
5054 	unsigned int nchan = adap->params.nports;
5055 	struct msix_entry *entries;
5056 	int max_ingq = MAX_INGQ;
5057 
5058 	if (is_pci_uld(adap))
5059 		max_ingq += (MAX_OFLD_QSETS * adap->num_uld);
5060 	if (is_offload(adap))
5061 		max_ingq += (MAX_OFLD_QSETS * adap->num_ofld_uld);
5062 	entries = kmalloc_array(max_ingq + 1, sizeof(*entries),
5063 				GFP_KERNEL);
5064 	if (!entries)
5065 		return -ENOMEM;
5066 
5067 	/* map for msix */
5068 	if (get_msix_info(adap)) {
5069 		adap->params.offload = 0;
5070 		adap->params.crypto = 0;
5071 	}
5072 
5073 	for (i = 0; i < max_ingq + 1; ++i)
5074 		entries[i].entry = i;
5075 
5076 	want = s->max_ethqsets + EXTRA_VECS;
5077 	if (is_offload(adap)) {
5078 		want += adap->num_ofld_uld * s->ofldqsets;
5079 		ofld_need = adap->num_ofld_uld * nchan;
5080 	}
5081 	if (is_pci_uld(adap)) {
5082 		want += adap->num_uld * s->ofldqsets;
5083 		uld_need = adap->num_uld * nchan;
5084 	}
5085 #ifdef CONFIG_CHELSIO_T4_DCB
5086 	/* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
5087 	 * each port.
5088 	 */
5089 	need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
5090 #else
5091 	need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
5092 #endif
5093 	allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
5094 	if (allocated < 0) {
5095 		dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
5096 			 " not using MSI-X\n");
5097 		kfree(entries);
5098 		return allocated;
5099 	}
5100 
5101 	/* Distribute available vectors to the various queue groups.
5102 	 * Every group gets its minimum requirement and NIC gets top
5103 	 * priority for leftovers.
5104 	 */
5105 	i = allocated - EXTRA_VECS - ofld_need - uld_need;
5106 	if (i < s->max_ethqsets) {
5107 		s->max_ethqsets = i;
5108 		if (i < s->ethqsets)
5109 			reduce_ethqs(adap, i);
5110 	}
5111 	if (is_uld(adap)) {
5112 		if (allocated < want)
5113 			s->nqs_per_uld = nchan;
5114 		else
5115 			s->nqs_per_uld = s->ofldqsets;
5116 	}
5117 
5118 	for (i = 0; i < (s->max_ethqsets + EXTRA_VECS); ++i)
5119 		adap->msix_info[i].vec = entries[i].vector;
5120 	if (is_uld(adap)) {
5121 		for (j = 0 ; i < allocated; ++i, j++) {
5122 			adap->msix_info_ulds[j].vec = entries[i].vector;
5123 			adap->msix_info_ulds[j].idx = i;
5124 		}
5125 		adap->msix_bmap_ulds.mapsize = j;
5126 	}
5127 	dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
5128 		 "nic %d per uld %d\n",
5129 		 allocated, s->max_ethqsets, s->nqs_per_uld);
5130 
5131 	kfree(entries);
5132 	return 0;
5133 }
5134 
5135 #undef EXTRA_VECS
5136 
5137 static int init_rss(struct adapter *adap)
5138 {
5139 	unsigned int i;
5140 	int err;
5141 
5142 	err = t4_init_rss_mode(adap, adap->mbox);
5143 	if (err)
5144 		return err;
5145 
5146 	for_each_port(adap, i) {
5147 		struct port_info *pi = adap2pinfo(adap, i);
5148 
5149 		pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5150 		if (!pi->rss)
5151 			return -ENOMEM;
5152 	}
5153 	return 0;
5154 }
5155 
5156 /* Dump basic information about the adapter */
5157 static void print_adapter_info(struct adapter *adapter)
5158 {
5159 	/* Hardware/Firmware/etc. Version/Revision IDs */
5160 	t4_dump_version_info(adapter);
5161 
5162 	/* Software/Hardware configuration */
5163 	dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
5164 		 is_offload(adapter) ? "R" : "",
5165 		 ((adapter->flags & USING_MSIX) ? "MSI-X" :
5166 		  (adapter->flags & USING_MSI) ? "MSI" : ""),
5167 		 is_offload(adapter) ? "Offload" : "non-Offload");
5168 }
5169 
5170 static void print_port_info(const struct net_device *dev)
5171 {
5172 	char buf[80];
5173 	char *bufp = buf;
5174 	const struct port_info *pi = netdev_priv(dev);
5175 	const struct adapter *adap = pi->adapter;
5176 
5177 	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
5178 		bufp += sprintf(bufp, "100M/");
5179 	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
5180 		bufp += sprintf(bufp, "1G/");
5181 	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
5182 		bufp += sprintf(bufp, "10G/");
5183 	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
5184 		bufp += sprintf(bufp, "25G/");
5185 	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
5186 		bufp += sprintf(bufp, "40G/");
5187 	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
5188 		bufp += sprintf(bufp, "50G/");
5189 	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
5190 		bufp += sprintf(bufp, "100G/");
5191 	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_200G)
5192 		bufp += sprintf(bufp, "200G/");
5193 	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_400G)
5194 		bufp += sprintf(bufp, "400G/");
5195 	if (bufp != buf)
5196 		--bufp;
5197 	sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
5198 
5199 	netdev_info(dev, "%s: Chelsio %s (%s) %s\n",
5200 		    dev->name, adap->params.vpd.id, adap->name, buf);
5201 }
5202 
5203 /*
5204  * Free the following resources:
5205  * - memory used for tables
5206  * - MSI/MSI-X
5207  * - net devices
5208  * - resources FW is holding for us
5209  */
5210 static void free_some_resources(struct adapter *adapter)
5211 {
5212 	unsigned int i;
5213 
5214 	kvfree(adapter->mps_encap);
5215 	kvfree(adapter->smt);
5216 	kvfree(adapter->l2t);
5217 	kvfree(adapter->srq);
5218 	t4_cleanup_sched(adapter);
5219 	kvfree(adapter->tids.tid_tab);
5220 	cxgb4_cleanup_tc_flower(adapter);
5221 	cxgb4_cleanup_tc_u32(adapter);
5222 	kfree(adapter->sge.egr_map);
5223 	kfree(adapter->sge.ingr_map);
5224 	kfree(adapter->sge.starving_fl);
5225 	kfree(adapter->sge.txq_maperr);
5226 #ifdef CONFIG_DEBUG_FS
5227 	kfree(adapter->sge.blocked_fl);
5228 #endif
5229 	disable_msi(adapter);
5230 
5231 	for_each_port(adapter, i)
5232 		if (adapter->port[i]) {
5233 			struct port_info *pi = adap2pinfo(adapter, i);
5234 
5235 			if (pi->viid != 0)
5236 				t4_free_vi(adapter, adapter->mbox, adapter->pf,
5237 					   0, pi->viid);
5238 			kfree(adap2pinfo(adapter, i)->rss);
5239 			free_netdev(adapter->port[i]);
5240 		}
5241 	if (adapter->flags & FW_OK)
5242 		t4_fw_bye(adapter, adapter->pf);
5243 }
5244 
5245 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
5246 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
5247 		   NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
5248 #define SEGMENT_SIZE 128
5249 
5250 static int t4_get_chip_type(struct adapter *adap, int ver)
5251 {
5252 	u32 pl_rev = REV_G(t4_read_reg(adap, PL_REV_A));
5253 
5254 	switch (ver) {
5255 	case CHELSIO_T4:
5256 		return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
5257 	case CHELSIO_T5:
5258 		return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
5259 	case CHELSIO_T6:
5260 		return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
5261 	default:
5262 		break;
5263 	}
5264 	return -EINVAL;
5265 }
5266 
5267 #ifdef CONFIG_PCI_IOV
5268 static void cxgb4_mgmt_setup(struct net_device *dev)
5269 {
5270 	dev->type = ARPHRD_NONE;
5271 	dev->mtu = 0;
5272 	dev->hard_header_len = 0;
5273 	dev->addr_len = 0;
5274 	dev->tx_queue_len = 0;
5275 	dev->flags |= IFF_NOARP;
5276 	dev->priv_flags |= IFF_NO_QUEUE;
5277 
5278 	/* Initialize the device structure. */
5279 	dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
5280 	dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
5281 }
5282 
5283 static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
5284 {
5285 	struct adapter *adap = pci_get_drvdata(pdev);
5286 	int err = 0;
5287 	int current_vfs = pci_num_vf(pdev);
5288 	u32 pcie_fw;
5289 
5290 	pcie_fw = readl(adap->regs + PCIE_FW_A);
5291 	/* Check if fw is initialized */
5292 	if (!(pcie_fw & PCIE_FW_INIT_F)) {
5293 		dev_warn(&pdev->dev, "Device not initialized\n");
5294 		return -EOPNOTSUPP;
5295 	}
5296 
5297 	/* If any of the VF's is already assigned to Guest OS, then
5298 	 * SRIOV for the same cannot be modified
5299 	 */
5300 	if (current_vfs && pci_vfs_assigned(pdev)) {
5301 		dev_err(&pdev->dev,
5302 			"Cannot modify SR-IOV while VFs are assigned\n");
5303 		return current_vfs;
5304 	}
5305 	/* Note that the upper-level code ensures that we're never called with
5306 	 * a non-zero "num_vfs" when we already have VFs instantiated.  But
5307 	 * it never hurts to code defensively.
5308 	 */
5309 	if (num_vfs != 0 && current_vfs != 0)
5310 		return -EBUSY;
5311 
5312 	/* Nothing to do for no change. */
5313 	if (num_vfs == current_vfs)
5314 		return num_vfs;
5315 
5316 	/* Disable SRIOV when zero is passed. */
5317 	if (!num_vfs) {
5318 		pci_disable_sriov(pdev);
5319 		/* free VF Management Interface */
5320 		unregister_netdev(adap->port[0]);
5321 		free_netdev(adap->port[0]);
5322 		adap->port[0] = NULL;
5323 
5324 		/* free VF resources */
5325 		adap->num_vfs = 0;
5326 		kfree(adap->vfinfo);
5327 		adap->vfinfo = NULL;
5328 		return 0;
5329 	}
5330 
5331 	if (!current_vfs) {
5332 		struct fw_pfvf_cmd port_cmd, port_rpl;
5333 		struct net_device *netdev;
5334 		unsigned int pmask, port;
5335 		struct pci_dev *pbridge;
5336 		struct port_info *pi;
5337 		char name[IFNAMSIZ];
5338 		u32 devcap2;
5339 		u16 flags;
5340 		int pos;
5341 
5342 		/* If we want to instantiate Virtual Functions, then our
5343 		 * parent bridge's PCI-E needs to support Alternative Routing
5344 		 * ID (ARI) because our VFs will show up at function offset 8
5345 		 * and above.
5346 		 */
5347 		pbridge = pdev->bus->self;
5348 		pos = pci_find_capability(pbridge, PCI_CAP_ID_EXP);
5349 		pci_read_config_word(pbridge, pos + PCI_EXP_FLAGS, &flags);
5350 		pci_read_config_dword(pbridge, pos + PCI_EXP_DEVCAP2, &devcap2);
5351 
5352 		if ((flags & PCI_EXP_FLAGS_VERS) < 2 ||
5353 		    !(devcap2 & PCI_EXP_DEVCAP2_ARI)) {
5354 			/* Our parent bridge does not support ARI so issue a
5355 			 * warning and skip instantiating the VFs.  They
5356 			 * won't be reachable.
5357 			 */
5358 			dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Functions\n",
5359 				 pbridge->bus->number, PCI_SLOT(pbridge->devfn),
5360 				 PCI_FUNC(pbridge->devfn));
5361 			return -ENOTSUPP;
5362 		}
5363 		memset(&port_cmd, 0, sizeof(port_cmd));
5364 		port_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
5365 						 FW_CMD_REQUEST_F |
5366 						 FW_CMD_READ_F |
5367 						 FW_PFVF_CMD_PFN_V(adap->pf) |
5368 						 FW_PFVF_CMD_VFN_V(0));
5369 		port_cmd.retval_len16 = cpu_to_be32(FW_LEN16(port_cmd));
5370 		err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd),
5371 				 &port_rpl);
5372 		if (err)
5373 			return err;
5374 		pmask = FW_PFVF_CMD_PMASK_G(be32_to_cpu(port_rpl.type_to_neq));
5375 		port = ffs(pmask) - 1;
5376 		/* Allocate VF Management Interface. */
5377 		snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx,
5378 			 adap->pf);
5379 		netdev = alloc_netdev(sizeof(struct port_info),
5380 				      name, NET_NAME_UNKNOWN, cxgb4_mgmt_setup);
5381 		if (!netdev)
5382 			return -ENOMEM;
5383 
5384 		pi = netdev_priv(netdev);
5385 		pi->adapter = adap;
5386 		pi->lport = port;
5387 		pi->tx_chan = port;
5388 		SET_NETDEV_DEV(netdev, &pdev->dev);
5389 
5390 		adap->port[0] = netdev;
5391 		pi->port_id = 0;
5392 
5393 		err = register_netdev(adap->port[0]);
5394 		if (err) {
5395 			pr_info("Unable to register VF mgmt netdev %s\n", name);
5396 			free_netdev(adap->port[0]);
5397 			adap->port[0] = NULL;
5398 			return err;
5399 		}
5400 		/* Allocate and set up VF Information. */
5401 		adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev),
5402 				       sizeof(struct vf_info), GFP_KERNEL);
5403 		if (!adap->vfinfo) {
5404 			unregister_netdev(adap->port[0]);
5405 			free_netdev(adap->port[0]);
5406 			adap->port[0] = NULL;
5407 			return -ENOMEM;
5408 		}
5409 		cxgb4_mgmt_fill_vf_station_mac_addr(adap);
5410 	}
5411 	/* Instantiate the requested number of VFs. */
5412 	err = pci_enable_sriov(pdev, num_vfs);
5413 	if (err) {
5414 		pr_info("Unable to instantiate %d VFs\n", num_vfs);
5415 		if (!current_vfs) {
5416 			unregister_netdev(adap->port[0]);
5417 			free_netdev(adap->port[0]);
5418 			adap->port[0] = NULL;
5419 			kfree(adap->vfinfo);
5420 			adap->vfinfo = NULL;
5421 		}
5422 		return err;
5423 	}
5424 
5425 	adap->num_vfs = num_vfs;
5426 	return num_vfs;
5427 }
5428 #endif /* CONFIG_PCI_IOV */
5429 
5430 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5431 {
5432 	struct net_device *netdev;
5433 	struct adapter *adapter;
5434 	static int adap_idx = 1;
5435 	int s_qpp, qpp, num_seg;
5436 	struct port_info *pi;
5437 	bool highdma = false;
5438 	enum chip_type chip;
5439 	void __iomem *regs;
5440 	int func, chip_ver;
5441 	u16 device_id;
5442 	int i, err;
5443 	u32 whoami;
5444 
5445 	printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
5446 
5447 	err = pci_request_regions(pdev, KBUILD_MODNAME);
5448 	if (err) {
5449 		/* Just info, some other driver may have claimed the device. */
5450 		dev_info(&pdev->dev, "cannot obtain PCI resources\n");
5451 		return err;
5452 	}
5453 
5454 	err = pci_enable_device(pdev);
5455 	if (err) {
5456 		dev_err(&pdev->dev, "cannot enable PCI device\n");
5457 		goto out_release_regions;
5458 	}
5459 
5460 	regs = pci_ioremap_bar(pdev, 0);
5461 	if (!regs) {
5462 		dev_err(&pdev->dev, "cannot map device registers\n");
5463 		err = -ENOMEM;
5464 		goto out_disable_device;
5465 	}
5466 
5467 	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5468 	if (!adapter) {
5469 		err = -ENOMEM;
5470 		goto out_unmap_bar0;
5471 	}
5472 
5473 	adapter->regs = regs;
5474 	err = t4_wait_dev_ready(regs);
5475 	if (err < 0)
5476 		goto out_free_adapter;
5477 
5478 	/* We control everything through one PF */
5479 	whoami = t4_read_reg(adapter, PL_WHOAMI_A);
5480 	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
5481 	chip = t4_get_chip_type(adapter, CHELSIO_PCI_ID_VER(device_id));
5482 	if (chip < 0) {
5483 		dev_err(&pdev->dev, "Device %d is not supported\n", device_id);
5484 		err = chip;
5485 		goto out_free_adapter;
5486 	}
5487 	chip_ver = CHELSIO_CHIP_VERSION(chip);
5488 	func = chip_ver <= CHELSIO_T5 ?
5489 	       SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
5490 
5491 	adapter->pdev = pdev;
5492 	adapter->pdev_dev = &pdev->dev;
5493 	adapter->name = pci_name(pdev);
5494 	adapter->mbox = func;
5495 	adapter->pf = func;
5496 	adapter->params.chip = chip;
5497 	adapter->adap_idx = adap_idx;
5498 	adapter->msg_enable = DFLT_MSG_ENABLE;
5499 	adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
5500 				    (sizeof(struct mbox_cmd) *
5501 				     T4_OS_LOG_MBOX_CMDS),
5502 				    GFP_KERNEL);
5503 	if (!adapter->mbox_log) {
5504 		err = -ENOMEM;
5505 		goto out_free_adapter;
5506 	}
5507 	spin_lock_init(&adapter->mbox_lock);
5508 	INIT_LIST_HEAD(&adapter->mlist.list);
5509 	adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
5510 	pci_set_drvdata(pdev, adapter);
5511 
5512 	if (func != ent->driver_data) {
5513 		pci_disable_device(pdev);
5514 		pci_save_state(pdev);        /* to restore SR-IOV later */
5515 		return 0;
5516 	}
5517 
5518 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
5519 		highdma = true;
5520 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5521 		if (err) {
5522 			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
5523 				"coherent allocations\n");
5524 			goto out_free_adapter;
5525 		}
5526 	} else {
5527 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5528 		if (err) {
5529 			dev_err(&pdev->dev, "no usable DMA configuration\n");
5530 			goto out_free_adapter;
5531 		}
5532 	}
5533 
5534 	pci_enable_pcie_error_reporting(pdev);
5535 	pci_set_master(pdev);
5536 	pci_save_state(pdev);
5537 	adap_idx++;
5538 	adapter->workq = create_singlethread_workqueue("cxgb4");
5539 	if (!adapter->workq) {
5540 		err = -ENOMEM;
5541 		goto out_free_adapter;
5542 	}
5543 
5544 	/* PCI device has been enabled */
5545 	adapter->flags |= DEV_ENABLED;
5546 	memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
5547 
5548 	/* If possible, we use PCIe Relaxed Ordering Attribute to deliver
5549 	 * Ingress Packet Data to Free List Buffers in order to allow for
5550 	 * chipset performance optimizations between the Root Complex and
5551 	 * Memory Controllers.  (Messages to the associated Ingress Queue
5552 	 * notifying new Packet Placement in the Free Lists Buffers will be
5553 	 * send without the Relaxed Ordering Attribute thus guaranteeing that
5554 	 * all preceding PCIe Transaction Layer Packets will be processed
5555 	 * first.)  But some Root Complexes have various issues with Upstream
5556 	 * Transaction Layer Packets with the Relaxed Ordering Attribute set.
5557 	 * The PCIe devices which under the Root Complexes will be cleared the
5558 	 * Relaxed Ordering bit in the configuration space, So we check our
5559 	 * PCIe configuration space to see if it's flagged with advice against
5560 	 * using Relaxed Ordering.
5561 	 */
5562 	if (!pcie_relaxed_ordering_enabled(pdev))
5563 		adapter->flags |= ROOT_NO_RELAXED_ORDERING;
5564 
5565 	spin_lock_init(&adapter->stats_lock);
5566 	spin_lock_init(&adapter->tid_release_lock);
5567 	spin_lock_init(&adapter->win0_lock);
5568 
5569 	INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
5570 	INIT_WORK(&adapter->db_full_task, process_db_full);
5571 	INIT_WORK(&adapter->db_drop_task, process_db_drop);
5572 	INIT_WORK(&adapter->fatal_err_notify_task, notify_fatal_err);
5573 
5574 	err = t4_prep_adapter(adapter);
5575 	if (err)
5576 		goto out_free_adapter;
5577 
5578 	if (is_kdump_kernel()) {
5579 		/* Collect hardware state and append to /proc/vmcore */
5580 		err = cxgb4_cudbg_vmcore_add_dump(adapter);
5581 		if (err) {
5582 			dev_warn(adapter->pdev_dev,
5583 				 "Fail collecting vmcore device dump, err: %d. Continuing\n",
5584 				 err);
5585 			err = 0;
5586 		}
5587 	}
5588 
5589 	if (!is_t4(adapter->params.chip)) {
5590 		s_qpp = (QUEUESPERPAGEPF0_S +
5591 			(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
5592 			adapter->pf);
5593 		qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
5594 		      SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
5595 		num_seg = PAGE_SIZE / SEGMENT_SIZE;
5596 
5597 		/* Each segment size is 128B. Write coalescing is enabled only
5598 		 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5599 		 * queue is less no of segments that can be accommodated in
5600 		 * a page size.
5601 		 */
5602 		if (qpp > num_seg) {
5603 			dev_err(&pdev->dev,
5604 				"Incorrect number of egress queues per page\n");
5605 			err = -EINVAL;
5606 			goto out_free_adapter;
5607 		}
5608 		adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
5609 		pci_resource_len(pdev, 2));
5610 		if (!adapter->bar2) {
5611 			dev_err(&pdev->dev, "cannot map device bar2 region\n");
5612 			err = -ENOMEM;
5613 			goto out_free_adapter;
5614 		}
5615 	}
5616 
5617 	setup_memwin(adapter);
5618 	err = adap_init0(adapter);
5619 #ifdef CONFIG_DEBUG_FS
5620 	bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
5621 #endif
5622 	setup_memwin_rdma(adapter);
5623 	if (err)
5624 		goto out_unmap_bar;
5625 
5626 	/* configure SGE_STAT_CFG_A to read WC stats */
5627 	if (!is_t4(adapter->params.chip))
5628 		t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
5629 			     (is_t5(adapter->params.chip) ? STATMODE_V(0) :
5630 			      T6_STATMODE_V(0)));
5631 
5632 	for_each_port(adapter, i) {
5633 		netdev = alloc_etherdev_mq(sizeof(struct port_info),
5634 					   MAX_ETH_QSETS);
5635 		if (!netdev) {
5636 			err = -ENOMEM;
5637 			goto out_free_dev;
5638 		}
5639 
5640 		SET_NETDEV_DEV(netdev, &pdev->dev);
5641 
5642 		adapter->port[i] = netdev;
5643 		pi = netdev_priv(netdev);
5644 		pi->adapter = adapter;
5645 		pi->xact_addr_filt = -1;
5646 		pi->port_id = i;
5647 		netdev->irq = pdev->irq;
5648 
5649 		netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
5650 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5651 			NETIF_F_RXCSUM | NETIF_F_RXHASH |
5652 			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
5653 			NETIF_F_HW_TC;
5654 
5655 		if (chip_ver > CHELSIO_T5) {
5656 			netdev->hw_enc_features |= NETIF_F_IP_CSUM |
5657 						   NETIF_F_IPV6_CSUM |
5658 						   NETIF_F_RXCSUM |
5659 						   NETIF_F_GSO_UDP_TUNNEL |
5660 						   NETIF_F_TSO | NETIF_F_TSO6;
5661 
5662 			netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
5663 		}
5664 
5665 		if (highdma)
5666 			netdev->hw_features |= NETIF_F_HIGHDMA;
5667 		netdev->features |= netdev->hw_features;
5668 		netdev->vlan_features = netdev->features & VLAN_FEAT;
5669 
5670 		netdev->priv_flags |= IFF_UNICAST_FLT;
5671 
5672 		/* MTU range: 81 - 9600 */
5673 		netdev->min_mtu = 81;              /* accommodate SACK */
5674 		netdev->max_mtu = MAX_MTU;
5675 
5676 		netdev->netdev_ops = &cxgb4_netdev_ops;
5677 #ifdef CONFIG_CHELSIO_T4_DCB
5678 		netdev->dcbnl_ops = &cxgb4_dcb_ops;
5679 		cxgb4_dcb_state_init(netdev);
5680 		cxgb4_dcb_version_init(netdev);
5681 #endif
5682 		cxgb4_set_ethtool_ops(netdev);
5683 	}
5684 
5685 	cxgb4_init_ethtool_dump(adapter);
5686 
5687 	pci_set_drvdata(pdev, adapter);
5688 
5689 	if (adapter->flags & FW_OK) {
5690 		err = t4_port_init(adapter, func, func, 0);
5691 		if (err)
5692 			goto out_free_dev;
5693 	} else if (adapter->params.nports == 1) {
5694 		/* If we don't have a connection to the firmware -- possibly
5695 		 * because of an error -- grab the raw VPD parameters so we
5696 		 * can set the proper MAC Address on the debug network
5697 		 * interface that we've created.
5698 		 */
5699 		u8 hw_addr[ETH_ALEN];
5700 		u8 *na = adapter->params.vpd.na;
5701 
5702 		err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
5703 		if (!err) {
5704 			for (i = 0; i < ETH_ALEN; i++)
5705 				hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
5706 					      hex2val(na[2 * i + 1]));
5707 			t4_set_hw_addr(adapter, 0, hw_addr);
5708 		}
5709 	}
5710 
5711 	if (!(adapter->flags & FW_OK))
5712 		goto fw_attach_fail;
5713 
5714 	/* Configure queues and allocate tables now, they can be needed as
5715 	 * soon as the first register_netdev completes.
5716 	 */
5717 	err = cfg_queues(adapter);
5718 	if (err)
5719 		goto out_free_dev;
5720 
5721 	adapter->smt = t4_init_smt();
5722 	if (!adapter->smt) {
5723 		/* We tolerate a lack of SMT, giving up some functionality */
5724 		dev_warn(&pdev->dev, "could not allocate SMT, continuing\n");
5725 	}
5726 
5727 	adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
5728 	if (!adapter->l2t) {
5729 		/* We tolerate a lack of L2T, giving up some functionality */
5730 		dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
5731 		adapter->params.offload = 0;
5732 	}
5733 
5734 	adapter->mps_encap = kvcalloc(adapter->params.arch.mps_tcam_size,
5735 				      sizeof(struct mps_encap_entry),
5736 				      GFP_KERNEL);
5737 	if (!adapter->mps_encap)
5738 		dev_warn(&pdev->dev, "could not allocate MPS Encap entries, continuing\n");
5739 
5740 #if IS_ENABLED(CONFIG_IPV6)
5741 	if (chip_ver <= CHELSIO_T5 &&
5742 	    (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
5743 		/* CLIP functionality is not present in hardware,
5744 		 * hence disable all offload features
5745 		 */
5746 		dev_warn(&pdev->dev,
5747 			 "CLIP not enabled in hardware, continuing\n");
5748 		adapter->params.offload = 0;
5749 	} else {
5750 		adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
5751 						  adapter->clipt_end);
5752 		if (!adapter->clipt) {
5753 			/* We tolerate a lack of clip_table, giving up
5754 			 * some functionality
5755 			 */
5756 			dev_warn(&pdev->dev,
5757 				 "could not allocate Clip table, continuing\n");
5758 			adapter->params.offload = 0;
5759 		}
5760 	}
5761 #endif
5762 
5763 	for_each_port(adapter, i) {
5764 		pi = adap2pinfo(adapter, i);
5765 		pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls);
5766 		if (!pi->sched_tbl)
5767 			dev_warn(&pdev->dev,
5768 				 "could not activate scheduling on port %d\n",
5769 				 i);
5770 	}
5771 
5772 	if (tid_init(&adapter->tids) < 0) {
5773 		dev_warn(&pdev->dev, "could not allocate TID table, "
5774 			 "continuing\n");
5775 		adapter->params.offload = 0;
5776 	} else {
5777 		adapter->tc_u32 = cxgb4_init_tc_u32(adapter);
5778 		if (!adapter->tc_u32)
5779 			dev_warn(&pdev->dev,
5780 				 "could not offload tc u32, continuing\n");
5781 
5782 		if (cxgb4_init_tc_flower(adapter))
5783 			dev_warn(&pdev->dev,
5784 				 "could not offload tc flower, continuing\n");
5785 	}
5786 
5787 	if (is_offload(adapter) || is_hashfilter(adapter)) {
5788 		if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
5789 			u32 hash_base, hash_reg;
5790 
5791 			if (chip_ver <= CHELSIO_T5) {
5792 				hash_reg = LE_DB_TID_HASHBASE_A;
5793 				hash_base = t4_read_reg(adapter, hash_reg);
5794 				adapter->tids.hash_base = hash_base / 4;
5795 			} else {
5796 				hash_reg = T6_LE_DB_HASH_TID_BASE_A;
5797 				hash_base = t4_read_reg(adapter, hash_reg);
5798 				adapter->tids.hash_base = hash_base;
5799 			}
5800 		}
5801 	}
5802 
5803 	/* See what interrupts we'll be using */
5804 	if (msi > 1 && enable_msix(adapter) == 0)
5805 		adapter->flags |= USING_MSIX;
5806 	else if (msi > 0 && pci_enable_msi(pdev) == 0) {
5807 		adapter->flags |= USING_MSI;
5808 		if (msi > 1)
5809 			free_msix_info(adapter);
5810 	}
5811 
5812 	/* check for PCI Express bandwidth capabiltites */
5813 	pcie_print_link_status(pdev);
5814 
5815 	err = init_rss(adapter);
5816 	if (err)
5817 		goto out_free_dev;
5818 
5819 	err = setup_fw_sge_queues(adapter);
5820 	if (err) {
5821 		dev_err(adapter->pdev_dev,
5822 			"FW sge queue allocation failed, err %d", err);
5823 		goto out_free_dev;
5824 	}
5825 
5826 fw_attach_fail:
5827 	/*
5828 	 * The card is now ready to go.  If any errors occur during device
5829 	 * registration we do not fail the whole card but rather proceed only
5830 	 * with the ports we manage to register successfully.  However we must
5831 	 * register at least one net device.
5832 	 */
5833 	for_each_port(adapter, i) {
5834 		pi = adap2pinfo(adapter, i);
5835 		adapter->port[i]->dev_port = pi->lport;
5836 		netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
5837 		netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
5838 
5839 		netif_carrier_off(adapter->port[i]);
5840 
5841 		err = register_netdev(adapter->port[i]);
5842 		if (err)
5843 			break;
5844 		adapter->chan_map[pi->tx_chan] = i;
5845 		print_port_info(adapter->port[i]);
5846 	}
5847 	if (i == 0) {
5848 		dev_err(&pdev->dev, "could not register any net devices\n");
5849 		goto out_free_dev;
5850 	}
5851 	if (err) {
5852 		dev_warn(&pdev->dev, "only %d net devices registered\n", i);
5853 		err = 0;
5854 	}
5855 
5856 	if (cxgb4_debugfs_root) {
5857 		adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
5858 							   cxgb4_debugfs_root);
5859 		setup_debugfs(adapter);
5860 	}
5861 
5862 	/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
5863 	pdev->needs_freset = 1;
5864 
5865 	if (is_uld(adapter)) {
5866 		mutex_lock(&uld_mutex);
5867 		list_add_tail(&adapter->list_node, &adapter_list);
5868 		mutex_unlock(&uld_mutex);
5869 	}
5870 
5871 	if (!is_t4(adapter->params.chip))
5872 		cxgb4_ptp_init(adapter);
5873 
5874 	if (IS_REACHABLE(CONFIG_THERMAL) &&
5875 	    !is_t4(adapter->params.chip) && (adapter->flags & FW_OK))
5876 		cxgb4_thermal_init(adapter);
5877 
5878 	print_adapter_info(adapter);
5879 	return 0;
5880 
5881  out_free_dev:
5882 	t4_free_sge_resources(adapter);
5883 	free_some_resources(adapter);
5884 	if (adapter->flags & USING_MSIX)
5885 		free_msix_info(adapter);
5886 	if (adapter->num_uld || adapter->num_ofld_uld)
5887 		t4_uld_mem_free(adapter);
5888  out_unmap_bar:
5889 	if (!is_t4(adapter->params.chip))
5890 		iounmap(adapter->bar2);
5891  out_free_adapter:
5892 	if (adapter->workq)
5893 		destroy_workqueue(adapter->workq);
5894 
5895 	kfree(adapter->mbox_log);
5896 	kfree(adapter);
5897  out_unmap_bar0:
5898 	iounmap(regs);
5899  out_disable_device:
5900 	pci_disable_pcie_error_reporting(pdev);
5901 	pci_disable_device(pdev);
5902  out_release_regions:
5903 	pci_release_regions(pdev);
5904 	return err;
5905 }
5906 
5907 static void remove_one(struct pci_dev *pdev)
5908 {
5909 	struct adapter *adapter = pci_get_drvdata(pdev);
5910 
5911 	if (!adapter) {
5912 		pci_release_regions(pdev);
5913 		return;
5914 	}
5915 
5916 	adapter->flags |= SHUTTING_DOWN;
5917 
5918 	if (adapter->pf == 4) {
5919 		int i;
5920 
5921 		/* Tear down per-adapter Work Queue first since it can contain
5922 		 * references to our adapter data structure.
5923 		 */
5924 		destroy_workqueue(adapter->workq);
5925 
5926 		if (is_uld(adapter)) {
5927 			detach_ulds(adapter);
5928 			t4_uld_clean_up(adapter);
5929 		}
5930 
5931 		adap_free_hma_mem(adapter);
5932 
5933 		disable_interrupts(adapter);
5934 
5935 		for_each_port(adapter, i)
5936 			if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5937 				unregister_netdev(adapter->port[i]);
5938 
5939 		debugfs_remove_recursive(adapter->debugfs_root);
5940 
5941 		if (!is_t4(adapter->params.chip))
5942 			cxgb4_ptp_stop(adapter);
5943 		if (IS_REACHABLE(CONFIG_THERMAL))
5944 			cxgb4_thermal_remove(adapter);
5945 
5946 		/* If we allocated filters, free up state associated with any
5947 		 * valid filters ...
5948 		 */
5949 		clear_all_filters(adapter);
5950 
5951 		if (adapter->flags & FULL_INIT_DONE)
5952 			cxgb_down(adapter);
5953 
5954 		if (adapter->flags & USING_MSIX)
5955 			free_msix_info(adapter);
5956 		if (adapter->num_uld || adapter->num_ofld_uld)
5957 			t4_uld_mem_free(adapter);
5958 		free_some_resources(adapter);
5959 #if IS_ENABLED(CONFIG_IPV6)
5960 		t4_cleanup_clip_tbl(adapter);
5961 #endif
5962 		if (!is_t4(adapter->params.chip))
5963 			iounmap(adapter->bar2);
5964 	}
5965 #ifdef CONFIG_PCI_IOV
5966 	else {
5967 		cxgb4_iov_configure(adapter->pdev, 0);
5968 	}
5969 #endif
5970 	iounmap(adapter->regs);
5971 	pci_disable_pcie_error_reporting(pdev);
5972 	if ((adapter->flags & DEV_ENABLED)) {
5973 		pci_disable_device(pdev);
5974 		adapter->flags &= ~DEV_ENABLED;
5975 	}
5976 	pci_release_regions(pdev);
5977 	kfree(adapter->mbox_log);
5978 	synchronize_rcu();
5979 	kfree(adapter);
5980 }
5981 
5982 /* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
5983  * delivery.  This is essentially a stripped down version of the PCI remove()
5984  * function where we do the minimal amount of work necessary to shutdown any
5985  * further activity.
5986  */
5987 static void shutdown_one(struct pci_dev *pdev)
5988 {
5989 	struct adapter *adapter = pci_get_drvdata(pdev);
5990 
5991 	/* As with remove_one() above (see extended comment), we only want do
5992 	 * do cleanup on PCI Devices which went all the way through init_one()
5993 	 * ...
5994 	 */
5995 	if (!adapter) {
5996 		pci_release_regions(pdev);
5997 		return;
5998 	}
5999 
6000 	adapter->flags |= SHUTTING_DOWN;
6001 
6002 	if (adapter->pf == 4) {
6003 		int i;
6004 
6005 		for_each_port(adapter, i)
6006 			if (adapter->port[i]->reg_state == NETREG_REGISTERED)
6007 				cxgb_close(adapter->port[i]);
6008 
6009 		if (is_uld(adapter)) {
6010 			detach_ulds(adapter);
6011 			t4_uld_clean_up(adapter);
6012 		}
6013 
6014 		disable_interrupts(adapter);
6015 		disable_msi(adapter);
6016 
6017 		t4_sge_stop(adapter);
6018 		if (adapter->flags & FW_OK)
6019 			t4_fw_bye(adapter, adapter->mbox);
6020 	}
6021 }
6022 
6023 static struct pci_driver cxgb4_driver = {
6024 	.name     = KBUILD_MODNAME,
6025 	.id_table = cxgb4_pci_tbl,
6026 	.probe    = init_one,
6027 	.remove   = remove_one,
6028 	.shutdown = shutdown_one,
6029 #ifdef CONFIG_PCI_IOV
6030 	.sriov_configure = cxgb4_iov_configure,
6031 #endif
6032 	.err_handler = &cxgb4_eeh,
6033 };
6034 
6035 static int __init cxgb4_init_module(void)
6036 {
6037 	int ret;
6038 
6039 	/* Debugfs support is optional, just warn if this fails */
6040 	cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6041 	if (!cxgb4_debugfs_root)
6042 		pr_warn("could not create debugfs entry, continuing\n");
6043 
6044 	ret = pci_register_driver(&cxgb4_driver);
6045 	if (ret < 0)
6046 		debugfs_remove(cxgb4_debugfs_root);
6047 
6048 #if IS_ENABLED(CONFIG_IPV6)
6049 	if (!inet6addr_registered) {
6050 		register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6051 		inet6addr_registered = true;
6052 	}
6053 #endif
6054 
6055 	return ret;
6056 }
6057 
6058 static void __exit cxgb4_cleanup_module(void)
6059 {
6060 #if IS_ENABLED(CONFIG_IPV6)
6061 	if (inet6addr_registered) {
6062 		unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6063 		inet6addr_registered = false;
6064 	}
6065 #endif
6066 	pci_unregister_driver(&cxgb4_driver);
6067 	debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
6068 }
6069 
6070 module_init(cxgb4_init_module);
6071 module_exit(cxgb4_cleanup_module);
6072