1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36 
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
44 #include <linux/if.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <net/bonding.h>
65 #include <linux/uaccess.h>
66 #include <linux/crash_dump.h>
67 #include <net/udp_tunnel.h>
68 #include <net/xfrm.h>
69 #if defined(CONFIG_CHELSIO_TLS_DEVICE)
70 #include <net/tls.h>
71 #endif
72 
73 #include "cxgb4.h"
74 #include "cxgb4_filter.h"
75 #include "t4_regs.h"
76 #include "t4_values.h"
77 #include "t4_msg.h"
78 #include "t4fw_api.h"
79 #include "t4fw_version.h"
80 #include "cxgb4_dcb.h"
81 #include "srq.h"
82 #include "cxgb4_debugfs.h"
83 #include "clip_tbl.h"
84 #include "l2t.h"
85 #include "smt.h"
86 #include "sched.h"
87 #include "cxgb4_tc_u32.h"
88 #include "cxgb4_tc_flower.h"
89 #include "cxgb4_tc_mqprio.h"
90 #include "cxgb4_tc_matchall.h"
91 #include "cxgb4_ptp.h"
92 #include "cxgb4_cudbg.h"
93 
94 char cxgb4_driver_name[] = KBUILD_MODNAME;
95 
96 #define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
97 
98 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
99 			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
100 			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
101 
102 /* Macros needed to support the PCI Device ID Table ...
103  */
104 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
105 	static const struct pci_device_id cxgb4_pci_tbl[] = {
106 #define CXGB4_UNIFIED_PF 0x4
107 
108 #define CH_PCI_DEVICE_ID_FUNCTION CXGB4_UNIFIED_PF
109 
110 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
111  * called for both.
112  */
113 #define CH_PCI_DEVICE_ID_FUNCTION2 0x0
114 
115 #define CH_PCI_ID_TABLE_ENTRY(devid) \
116 		{PCI_VDEVICE(CHELSIO, (devid)), CXGB4_UNIFIED_PF}
117 
118 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
119 		{ 0, } \
120 	}
121 
122 #include "t4_pci_id_tbl.h"
123 
124 #define FW4_FNAME "cxgb4/t4fw.bin"
125 #define FW5_FNAME "cxgb4/t5fw.bin"
126 #define FW6_FNAME "cxgb4/t6fw.bin"
127 #define FW4_CFNAME "cxgb4/t4-config.txt"
128 #define FW5_CFNAME "cxgb4/t5-config.txt"
129 #define FW6_CFNAME "cxgb4/t6-config.txt"
130 #define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
131 #define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
132 #define PHY_AQ1202_DEVICEID 0x4409
133 #define PHY_BCM84834_DEVICEID 0x4486
134 
135 MODULE_DESCRIPTION(DRV_DESC);
136 MODULE_AUTHOR("Chelsio Communications");
137 MODULE_LICENSE("Dual BSD/GPL");
138 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
139 MODULE_FIRMWARE(FW4_FNAME);
140 MODULE_FIRMWARE(FW5_FNAME);
141 MODULE_FIRMWARE(FW6_FNAME);
142 
143 /*
144  * The driver uses the best interrupt scheme available on a platform in the
145  * order MSI-X, MSI, legacy INTx interrupts.  This parameter determines which
146  * of these schemes the driver may consider as follows:
147  *
148  * msi = 2: choose from among all three options
149  * msi = 1: only consider MSI and INTx interrupts
150  * msi = 0: force INTx interrupts
151  */
152 static int msi = 2;
153 
154 module_param(msi, int, 0644);
155 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
156 
157 /*
158  * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
159  * offset by 2 bytes in order to have the IP headers line up on 4-byte
160  * boundaries.  This is a requirement for many architectures which will throw
161  * a machine check fault if an attempt is made to access one of the 4-byte IP
162  * header fields on a non-4-byte boundary.  And it's a major performance issue
163  * even on some architectures which allow it like some implementations of the
164  * x86 ISA.  However, some architectures don't mind this and for some very
165  * edge-case performance sensitive applications (like forwarding large volumes
166  * of small packets), setting this DMA offset to 0 will decrease the number of
167  * PCI-E Bus transfers enough to measurably affect performance.
168  */
169 static int rx_dma_offset = 2;
170 
171 /* TX Queue select used to determine what algorithm to use for selecting TX
172  * queue. Select between the kernel provided function (select_queue=0) or user
173  * cxgb_select_queue function (select_queue=1)
174  *
175  * Default: select_queue=0
176  */
177 static int select_queue;
178 module_param(select_queue, int, 0644);
179 MODULE_PARM_DESC(select_queue,
180 		 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
181 
182 static struct dentry *cxgb4_debugfs_root;
183 
184 LIST_HEAD(adapter_list);
185 DEFINE_MUTEX(uld_mutex);
186 LIST_HEAD(uld_list);
187 
188 static int cfg_queues(struct adapter *adap);
189 
190 static void link_report(struct net_device *dev)
191 {
192 	if (!netif_carrier_ok(dev))
193 		netdev_info(dev, "link down\n");
194 	else {
195 		static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
196 
197 		const char *s;
198 		const struct port_info *p = netdev_priv(dev);
199 
200 		switch (p->link_cfg.speed) {
201 		case 100:
202 			s = "100Mbps";
203 			break;
204 		case 1000:
205 			s = "1Gbps";
206 			break;
207 		case 10000:
208 			s = "10Gbps";
209 			break;
210 		case 25000:
211 			s = "25Gbps";
212 			break;
213 		case 40000:
214 			s = "40Gbps";
215 			break;
216 		case 50000:
217 			s = "50Gbps";
218 			break;
219 		case 100000:
220 			s = "100Gbps";
221 			break;
222 		default:
223 			pr_info("%s: unsupported speed: %d\n",
224 				dev->name, p->link_cfg.speed);
225 			return;
226 		}
227 
228 		netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
229 			    fc[p->link_cfg.fc]);
230 	}
231 }
232 
233 #ifdef CONFIG_CHELSIO_T4_DCB
234 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
235 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
236 {
237 	struct port_info *pi = netdev_priv(dev);
238 	struct adapter *adap = pi->adapter;
239 	struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
240 	int i;
241 
242 	/* We use a simple mapping of Port TX Queue Index to DCB
243 	 * Priority when we're enabling DCB.
244 	 */
245 	for (i = 0; i < pi->nqsets; i++, txq++) {
246 		u32 name, value;
247 		int err;
248 
249 		name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
250 			FW_PARAMS_PARAM_X_V(
251 				FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
252 			FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
253 		value = enable ? i : 0xffffffff;
254 
255 		/* Since we can be called while atomic (from "interrupt
256 		 * level") we need to issue the Set Parameters Commannd
257 		 * without sleeping (timeout < 0).
258 		 */
259 		err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
260 					    &name, &value,
261 					    -FW_CMD_MAX_TIMEOUT);
262 
263 		if (err)
264 			dev_err(adap->pdev_dev,
265 				"Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
266 				enable ? "set" : "unset", pi->port_id, i, -err);
267 		else
268 			txq->dcb_prio = enable ? value : 0;
269 	}
270 }
271 
272 int cxgb4_dcb_enabled(const struct net_device *dev)
273 {
274 	struct port_info *pi = netdev_priv(dev);
275 
276 	if (!pi->dcb.enabled)
277 		return 0;
278 
279 	return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
280 		(pi->dcb.state == CXGB4_DCB_STATE_HOST));
281 }
282 #endif /* CONFIG_CHELSIO_T4_DCB */
283 
284 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
285 {
286 	struct net_device *dev = adapter->port[port_id];
287 
288 	/* Skip changes from disabled ports. */
289 	if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
290 		if (link_stat)
291 			netif_carrier_on(dev);
292 		else {
293 #ifdef CONFIG_CHELSIO_T4_DCB
294 			if (cxgb4_dcb_enabled(dev)) {
295 				cxgb4_dcb_reset(dev);
296 				dcb_tx_queue_prio_enable(dev, false);
297 			}
298 #endif /* CONFIG_CHELSIO_T4_DCB */
299 			netif_carrier_off(dev);
300 		}
301 
302 		link_report(dev);
303 	}
304 }
305 
306 void t4_os_portmod_changed(struct adapter *adap, int port_id)
307 {
308 	static const char *mod_str[] = {
309 		NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
310 	};
311 
312 	struct net_device *dev = adap->port[port_id];
313 	struct port_info *pi = netdev_priv(dev);
314 
315 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
316 		netdev_info(dev, "port module unplugged\n");
317 	else if (pi->mod_type < ARRAY_SIZE(mod_str))
318 		netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
319 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
320 		netdev_info(dev, "%s: unsupported port module inserted\n",
321 			    dev->name);
322 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
323 		netdev_info(dev, "%s: unknown port module inserted\n",
324 			    dev->name);
325 	else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
326 		netdev_info(dev, "%s: transceiver module error\n", dev->name);
327 	else
328 		netdev_info(dev, "%s: unknown module type %d inserted\n",
329 			    dev->name, pi->mod_type);
330 
331 	/* If the interface is running, then we'll need any "sticky" Link
332 	 * Parameters redone with a new Transceiver Module.
333 	 */
334 	pi->link_cfg.redo_l1cfg = netif_running(dev);
335 }
336 
337 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
338 module_param(dbfifo_int_thresh, int, 0644);
339 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
340 
341 /*
342  * usecs to sleep while draining the dbfifo
343  */
344 static int dbfifo_drain_delay = 1000;
345 module_param(dbfifo_drain_delay, int, 0644);
346 MODULE_PARM_DESC(dbfifo_drain_delay,
347 		 "usecs to sleep while draining the dbfifo");
348 
349 static inline int cxgb4_set_addr_hash(struct port_info *pi)
350 {
351 	struct adapter *adap = pi->adapter;
352 	u64 vec = 0;
353 	bool ucast = false;
354 	struct hash_mac_addr *entry;
355 
356 	/* Calculate the hash vector for the updated list and program it */
357 	list_for_each_entry(entry, &adap->mac_hlist, list) {
358 		ucast |= is_unicast_ether_addr(entry->addr);
359 		vec |= (1ULL << hash_mac_addr(entry->addr));
360 	}
361 	return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast,
362 				vec, false);
363 }
364 
365 static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
366 {
367 	struct port_info *pi = netdev_priv(netdev);
368 	struct adapter *adap = pi->adapter;
369 	int ret;
370 	u64 mhash = 0;
371 	u64 uhash = 0;
372 	/* idx stores the index of allocated filters,
373 	 * its size should be modified based on the number of
374 	 * MAC addresses that we allocate filters for
375 	 */
376 
377 	u16 idx[1] = {};
378 	bool free = false;
379 	bool ucast = is_unicast_ether_addr(mac_addr);
380 	const u8 *maclist[1] = {mac_addr};
381 	struct hash_mac_addr *new_entry;
382 
383 	ret = cxgb4_alloc_mac_filt(adap, pi->viid, free, 1, maclist,
384 				   idx, ucast ? &uhash : &mhash, false);
385 	if (ret < 0)
386 		goto out;
387 	/* if hash != 0, then add the addr to hash addr list
388 	 * so on the end we will calculate the hash for the
389 	 * list and program it
390 	 */
391 	if (uhash || mhash) {
392 		new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
393 		if (!new_entry)
394 			return -ENOMEM;
395 		ether_addr_copy(new_entry->addr, mac_addr);
396 		list_add_tail(&new_entry->list, &adap->mac_hlist);
397 		ret = cxgb4_set_addr_hash(pi);
398 	}
399 out:
400 	return ret < 0 ? ret : 0;
401 }
402 
403 static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
404 {
405 	struct port_info *pi = netdev_priv(netdev);
406 	struct adapter *adap = pi->adapter;
407 	int ret;
408 	const u8 *maclist[1] = {mac_addr};
409 	struct hash_mac_addr *entry, *tmp;
410 
411 	/* If the MAC address to be removed is in the hash addr
412 	 * list, delete it from the list and update hash vector
413 	 */
414 	list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) {
415 		if (ether_addr_equal(entry->addr, mac_addr)) {
416 			list_del(&entry->list);
417 			kfree(entry);
418 			return cxgb4_set_addr_hash(pi);
419 		}
420 	}
421 
422 	ret = cxgb4_free_mac_filt(adap, pi->viid, 1, maclist, false);
423 	return ret < 0 ? -EINVAL : 0;
424 }
425 
426 /*
427  * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
428  * If @mtu is -1 it is left unchanged.
429  */
430 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
431 {
432 	struct port_info *pi = netdev_priv(dev);
433 	struct adapter *adapter = pi->adapter;
434 
435 	__dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
436 	__dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
437 
438 	return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu,
439 			     (dev->flags & IFF_PROMISC) ? 1 : 0,
440 			     (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
441 			     sleep_ok);
442 }
443 
444 /**
445  *	cxgb4_change_mac - Update match filter for a MAC address.
446  *	@pi: the port_info
447  *	@viid: the VI id
448  *	@tcam_idx: TCAM index of existing filter for old value of MAC address,
449  *		   or -1
450  *	@addr: the new MAC address value
451  *	@persist: whether a new MAC allocation should be persistent
452  *	@smt_idx: the destination to store the new SMT index.
453  *
454  *	Modifies an MPS filter and sets it to the new MAC address if
455  *	@tcam_idx >= 0, or adds the MAC address to a new filter if
456  *	@tcam_idx < 0. In the latter case the address is added persistently
457  *	if @persist is %true.
458  *	Addresses are programmed to hash region, if tcam runs out of entries.
459  *
460  */
461 int cxgb4_change_mac(struct port_info *pi, unsigned int viid,
462 		     int *tcam_idx, const u8 *addr, bool persist,
463 		     u8 *smt_idx)
464 {
465 	struct adapter *adapter = pi->adapter;
466 	struct hash_mac_addr *entry, *new_entry;
467 	int ret;
468 
469 	ret = t4_change_mac(adapter, adapter->mbox, viid,
470 			    *tcam_idx, addr, persist, smt_idx);
471 	/* We ran out of TCAM entries. try programming hash region. */
472 	if (ret == -ENOMEM) {
473 		/* If the MAC address to be updated is in the hash addr
474 		 * list, update it from the list
475 		 */
476 		list_for_each_entry(entry, &adapter->mac_hlist, list) {
477 			if (entry->iface_mac) {
478 				ether_addr_copy(entry->addr, addr);
479 				goto set_hash;
480 			}
481 		}
482 		new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
483 		if (!new_entry)
484 			return -ENOMEM;
485 		ether_addr_copy(new_entry->addr, addr);
486 		new_entry->iface_mac = true;
487 		list_add_tail(&new_entry->list, &adapter->mac_hlist);
488 set_hash:
489 		ret = cxgb4_set_addr_hash(pi);
490 	} else if (ret >= 0) {
491 		*tcam_idx = ret;
492 		ret = 0;
493 	}
494 
495 	return ret;
496 }
497 
498 /*
499  *	link_start - enable a port
500  *	@dev: the port to enable
501  *
502  *	Performs the MAC and PHY actions needed to enable a port.
503  */
504 static int link_start(struct net_device *dev)
505 {
506 	int ret;
507 	struct port_info *pi = netdev_priv(dev);
508 	unsigned int mb = pi->adapter->pf;
509 
510 	/*
511 	 * We do not set address filters and promiscuity here, the stack does
512 	 * that step explicitly.
513 	 */
514 	ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
515 			    !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
516 	if (ret == 0)
517 		ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
518 					    dev->dev_addr, true, &pi->smt_idx);
519 	if (ret == 0)
520 		ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
521 				    &pi->link_cfg);
522 	if (ret == 0) {
523 		local_bh_disable();
524 		ret = t4_enable_pi_params(pi->adapter, mb, pi, true,
525 					  true, CXGB4_DCB_ENABLED);
526 		local_bh_enable();
527 	}
528 
529 	return ret;
530 }
531 
532 #ifdef CONFIG_CHELSIO_T4_DCB
533 /* Handle a Data Center Bridging update message from the firmware. */
534 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
535 {
536 	int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
537 	struct net_device *dev = adap->port[adap->chan_map[port]];
538 	int old_dcb_enabled = cxgb4_dcb_enabled(dev);
539 	int new_dcb_enabled;
540 
541 	cxgb4_dcb_handle_fw_update(adap, pcmd);
542 	new_dcb_enabled = cxgb4_dcb_enabled(dev);
543 
544 	/* If the DCB has become enabled or disabled on the port then we're
545 	 * going to need to set up/tear down DCB Priority parameters for the
546 	 * TX Queues associated with the port.
547 	 */
548 	if (new_dcb_enabled != old_dcb_enabled)
549 		dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
550 }
551 #endif /* CONFIG_CHELSIO_T4_DCB */
552 
553 /* Response queue handler for the FW event queue.
554  */
555 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
556 			  const struct pkt_gl *gl)
557 {
558 	u8 opcode = ((const struct rss_header *)rsp)->opcode;
559 
560 	rsp++;                                          /* skip RSS header */
561 
562 	/* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
563 	 */
564 	if (unlikely(opcode == CPL_FW4_MSG &&
565 	   ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
566 		rsp++;
567 		opcode = ((const struct rss_header *)rsp)->opcode;
568 		rsp++;
569 		if (opcode != CPL_SGE_EGR_UPDATE) {
570 			dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
571 				, opcode);
572 			goto out;
573 		}
574 	}
575 
576 	if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
577 		const struct cpl_sge_egr_update *p = (void *)rsp;
578 		unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
579 		struct sge_txq *txq;
580 
581 		txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
582 		txq->restarts++;
583 		if (txq->q_type == CXGB4_TXQ_ETH) {
584 			struct sge_eth_txq *eq;
585 
586 			eq = container_of(txq, struct sge_eth_txq, q);
587 			t4_sge_eth_txq_egress_update(q->adap, eq, -1);
588 		} else {
589 			struct sge_uld_txq *oq;
590 
591 			oq = container_of(txq, struct sge_uld_txq, q);
592 			tasklet_schedule(&oq->qresume_tsk);
593 		}
594 	} else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
595 		const struct cpl_fw6_msg *p = (void *)rsp;
596 
597 #ifdef CONFIG_CHELSIO_T4_DCB
598 		const struct fw_port_cmd *pcmd = (const void *)p->data;
599 		unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
600 		unsigned int action =
601 			FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
602 
603 		if (cmd == FW_PORT_CMD &&
604 		    (action == FW_PORT_ACTION_GET_PORT_INFO ||
605 		     action == FW_PORT_ACTION_GET_PORT_INFO32)) {
606 			int port = FW_PORT_CMD_PORTID_G(
607 					be32_to_cpu(pcmd->op_to_portid));
608 			struct net_device *dev;
609 			int dcbxdis, state_input;
610 
611 			dev = q->adap->port[q->adap->chan_map[port]];
612 			dcbxdis = (action == FW_PORT_ACTION_GET_PORT_INFO
613 			  ? !!(pcmd->u.info.dcbxdis_pkd & FW_PORT_CMD_DCBXDIS_F)
614 			  : !!(be32_to_cpu(pcmd->u.info32.lstatus32_to_cbllen32)
615 			       & FW_PORT_CMD_DCBXDIS32_F));
616 			state_input = (dcbxdis
617 				       ? CXGB4_DCB_INPUT_FW_DISABLED
618 				       : CXGB4_DCB_INPUT_FW_ENABLED);
619 
620 			cxgb4_dcb_state_fsm(dev, state_input);
621 		}
622 
623 		if (cmd == FW_PORT_CMD &&
624 		    action == FW_PORT_ACTION_L2_DCB_CFG)
625 			dcb_rpl(q->adap, pcmd);
626 		else
627 #endif
628 			if (p->type == 0)
629 				t4_handle_fw_rpl(q->adap, p->data);
630 	} else if (opcode == CPL_L2T_WRITE_RPL) {
631 		const struct cpl_l2t_write_rpl *p = (void *)rsp;
632 
633 		do_l2t_write_rpl(q->adap, p);
634 	} else if (opcode == CPL_SMT_WRITE_RPL) {
635 		const struct cpl_smt_write_rpl *p = (void *)rsp;
636 
637 		do_smt_write_rpl(q->adap, p);
638 	} else if (opcode == CPL_SET_TCB_RPL) {
639 		const struct cpl_set_tcb_rpl *p = (void *)rsp;
640 
641 		filter_rpl(q->adap, p);
642 	} else if (opcode == CPL_ACT_OPEN_RPL) {
643 		const struct cpl_act_open_rpl *p = (void *)rsp;
644 
645 		hash_filter_rpl(q->adap, p);
646 	} else if (opcode == CPL_ABORT_RPL_RSS) {
647 		const struct cpl_abort_rpl_rss *p = (void *)rsp;
648 
649 		hash_del_filter_rpl(q->adap, p);
650 	} else if (opcode == CPL_SRQ_TABLE_RPL) {
651 		const struct cpl_srq_table_rpl *p = (void *)rsp;
652 
653 		do_srq_table_rpl(q->adap, p);
654 	} else
655 		dev_err(q->adap->pdev_dev,
656 			"unexpected CPL %#x on FW event queue\n", opcode);
657 out:
658 	return 0;
659 }
660 
661 static void disable_msi(struct adapter *adapter)
662 {
663 	if (adapter->flags & CXGB4_USING_MSIX) {
664 		pci_disable_msix(adapter->pdev);
665 		adapter->flags &= ~CXGB4_USING_MSIX;
666 	} else if (adapter->flags & CXGB4_USING_MSI) {
667 		pci_disable_msi(adapter->pdev);
668 		adapter->flags &= ~CXGB4_USING_MSI;
669 	}
670 }
671 
672 /*
673  * Interrupt handler for non-data events used with MSI-X.
674  */
675 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
676 {
677 	struct adapter *adap = cookie;
678 	u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
679 
680 	if (v & PFSW_F) {
681 		adap->swintr = 1;
682 		t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
683 	}
684 	if (adap->flags & CXGB4_MASTER_PF)
685 		t4_slow_intr_handler(adap);
686 	return IRQ_HANDLED;
687 }
688 
689 int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec,
690 		       cpumask_var_t *aff_mask, int idx)
691 {
692 	int rv;
693 
694 	if (!zalloc_cpumask_var(aff_mask, GFP_KERNEL)) {
695 		dev_err(adap->pdev_dev, "alloc_cpumask_var failed\n");
696 		return -ENOMEM;
697 	}
698 
699 	cpumask_set_cpu(cpumask_local_spread(idx, dev_to_node(adap->pdev_dev)),
700 			*aff_mask);
701 
702 	rv = irq_set_affinity_hint(vec, *aff_mask);
703 	if (rv)
704 		dev_warn(adap->pdev_dev,
705 			 "irq_set_affinity_hint %u failed %d\n",
706 			 vec, rv);
707 
708 	return 0;
709 }
710 
711 void cxgb4_clear_msix_aff(unsigned short vec, cpumask_var_t aff_mask)
712 {
713 	irq_set_affinity_hint(vec, NULL);
714 	free_cpumask_var(aff_mask);
715 }
716 
717 static int request_msix_queue_irqs(struct adapter *adap)
718 {
719 	struct sge *s = &adap->sge;
720 	struct msix_info *minfo;
721 	int err, ethqidx;
722 
723 	if (s->fwevtq_msix_idx < 0)
724 		return -ENOMEM;
725 
726 	err = request_irq(adap->msix_info[s->fwevtq_msix_idx].vec,
727 			  t4_sge_intr_msix, 0,
728 			  adap->msix_info[s->fwevtq_msix_idx].desc,
729 			  &s->fw_evtq);
730 	if (err)
731 		return err;
732 
733 	for_each_ethrxq(s, ethqidx) {
734 		minfo = s->ethrxq[ethqidx].msix;
735 		err = request_irq(minfo->vec,
736 				  t4_sge_intr_msix, 0,
737 				  minfo->desc,
738 				  &s->ethrxq[ethqidx].rspq);
739 		if (err)
740 			goto unwind;
741 
742 		cxgb4_set_msix_aff(adap, minfo->vec,
743 				   &minfo->aff_mask, ethqidx);
744 	}
745 	return 0;
746 
747 unwind:
748 	while (--ethqidx >= 0) {
749 		minfo = s->ethrxq[ethqidx].msix;
750 		cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
751 		free_irq(minfo->vec, &s->ethrxq[ethqidx].rspq);
752 	}
753 	free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
754 	return err;
755 }
756 
757 static void free_msix_queue_irqs(struct adapter *adap)
758 {
759 	struct sge *s = &adap->sge;
760 	struct msix_info *minfo;
761 	int i;
762 
763 	free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
764 	for_each_ethrxq(s, i) {
765 		minfo = s->ethrxq[i].msix;
766 		cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
767 		free_irq(minfo->vec, &s->ethrxq[i].rspq);
768 	}
769 }
770 
771 static int setup_ppod_edram(struct adapter *adap)
772 {
773 	unsigned int param, val;
774 	int ret;
775 
776 	/* Driver sends FW_PARAMS_PARAM_DEV_PPOD_EDRAM read command to check
777 	 * if firmware supports ppod edram feature or not. If firmware
778 	 * returns 1, then driver can enable this feature by sending
779 	 * FW_PARAMS_PARAM_DEV_PPOD_EDRAM write command with value 1 to
780 	 * enable ppod edram feature.
781 	 */
782 	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
783 		FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PPOD_EDRAM));
784 
785 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
786 	if (ret < 0) {
787 		dev_warn(adap->pdev_dev,
788 			 "querying PPOD_EDRAM support failed: %d\n",
789 			 ret);
790 		return -1;
791 	}
792 
793 	if (val != 1)
794 		return -1;
795 
796 	ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
797 	if (ret < 0) {
798 		dev_err(adap->pdev_dev,
799 			"setting PPOD_EDRAM failed: %d\n", ret);
800 		return -1;
801 	}
802 	return 0;
803 }
804 
805 static void adap_config_hpfilter(struct adapter *adapter)
806 {
807 	u32 param, val = 0;
808 	int ret;
809 
810 	/* Enable HP filter region. Older fw will fail this request and
811 	 * it is fine.
812 	 */
813 	param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT);
814 	ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
815 			    1, &param, &val);
816 
817 	/* An error means FW doesn't know about HP filter support,
818 	 * it's not a problem, don't return an error.
819 	 */
820 	if (ret < 0)
821 		dev_err(adapter->pdev_dev,
822 			"HP filter region isn't supported by FW\n");
823 }
824 
825 /**
826  *	cxgb4_write_rss - write the RSS table for a given port
827  *	@pi: the port
828  *	@queues: array of queue indices for RSS
829  *
830  *	Sets up the portion of the HW RSS table for the port's VI to distribute
831  *	packets to the Rx queues in @queues.
832  *	Should never be called before setting up sge eth rx queues
833  */
834 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
835 {
836 	u16 *rss;
837 	int i, err;
838 	struct adapter *adapter = pi->adapter;
839 	const struct sge_eth_rxq *rxq;
840 
841 	rxq = &adapter->sge.ethrxq[pi->first_qset];
842 	rss = kmalloc_array(pi->rss_size, sizeof(u16), GFP_KERNEL);
843 	if (!rss)
844 		return -ENOMEM;
845 
846 	/* map the queue indices to queue ids */
847 	for (i = 0; i < pi->rss_size; i++, queues++)
848 		rss[i] = rxq[*queues].rspq.abs_id;
849 
850 	err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
851 				  pi->rss_size, rss, pi->rss_size);
852 	/* If Tunnel All Lookup isn't specified in the global RSS
853 	 * Configuration, then we need to specify a default Ingress
854 	 * Queue for any ingress packets which aren't hashed.  We'll
855 	 * use our first ingress queue ...
856 	 */
857 	if (!err)
858 		err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
859 				       FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
860 				       FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
861 				       FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
862 				       FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
863 				       FW_RSS_VI_CONFIG_CMD_UDPEN_F,
864 				       rss[0]);
865 	kfree(rss);
866 	return err;
867 }
868 
869 /**
870  *	setup_rss - configure RSS
871  *	@adap: the adapter
872  *
873  *	Sets up RSS for each port.
874  */
875 static int setup_rss(struct adapter *adap)
876 {
877 	int i, j, err;
878 
879 	for_each_port(adap, i) {
880 		const struct port_info *pi = adap2pinfo(adap, i);
881 
882 		/* Fill default values with equal distribution */
883 		for (j = 0; j < pi->rss_size; j++)
884 			pi->rss[j] = j % pi->nqsets;
885 
886 		err = cxgb4_write_rss(pi, pi->rss);
887 		if (err)
888 			return err;
889 	}
890 	return 0;
891 }
892 
893 /*
894  * Return the channel of the ingress queue with the given qid.
895  */
896 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
897 {
898 	qid -= p->ingr_start;
899 	return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
900 }
901 
902 void cxgb4_quiesce_rx(struct sge_rspq *q)
903 {
904 	if (q->handler)
905 		napi_disable(&q->napi);
906 }
907 
908 /*
909  * Wait until all NAPI handlers are descheduled.
910  */
911 static void quiesce_rx(struct adapter *adap)
912 {
913 	int i;
914 
915 	for (i = 0; i < adap->sge.ingr_sz; i++) {
916 		struct sge_rspq *q = adap->sge.ingr_map[i];
917 
918 		if (!q)
919 			continue;
920 
921 		cxgb4_quiesce_rx(q);
922 	}
923 }
924 
925 /* Disable interrupt and napi handler */
926 static void disable_interrupts(struct adapter *adap)
927 {
928 	struct sge *s = &adap->sge;
929 
930 	if (adap->flags & CXGB4_FULL_INIT_DONE) {
931 		t4_intr_disable(adap);
932 		if (adap->flags & CXGB4_USING_MSIX) {
933 			free_msix_queue_irqs(adap);
934 			free_irq(adap->msix_info[s->nd_msix_idx].vec,
935 				 adap);
936 		} else {
937 			free_irq(adap->pdev->irq, adap);
938 		}
939 		quiesce_rx(adap);
940 	}
941 }
942 
943 void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q)
944 {
945 	if (q->handler)
946 		napi_enable(&q->napi);
947 
948 	/* 0-increment GTS to start the timer and enable interrupts */
949 	t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
950 		     SEINTARM_V(q->intr_params) |
951 		     INGRESSQID_V(q->cntxt_id));
952 }
953 
954 /*
955  * Enable NAPI scheduling and interrupt generation for all Rx queues.
956  */
957 static void enable_rx(struct adapter *adap)
958 {
959 	int i;
960 
961 	for (i = 0; i < adap->sge.ingr_sz; i++) {
962 		struct sge_rspq *q = adap->sge.ingr_map[i];
963 
964 		if (!q)
965 			continue;
966 
967 		cxgb4_enable_rx(adap, q);
968 	}
969 }
970 
971 static int setup_non_data_intr(struct adapter *adap)
972 {
973 	int msix;
974 
975 	adap->sge.nd_msix_idx = -1;
976 	if (!(adap->flags & CXGB4_USING_MSIX))
977 		return 0;
978 
979 	/* Request MSI-X vector for non-data interrupt */
980 	msix = cxgb4_get_msix_idx_from_bmap(adap);
981 	if (msix < 0)
982 		return -ENOMEM;
983 
984 	snprintf(adap->msix_info[msix].desc,
985 		 sizeof(adap->msix_info[msix].desc),
986 		 "%s", adap->port[0]->name);
987 
988 	adap->sge.nd_msix_idx = msix;
989 	return 0;
990 }
991 
992 static int setup_fw_sge_queues(struct adapter *adap)
993 {
994 	struct sge *s = &adap->sge;
995 	int msix, err = 0;
996 
997 	bitmap_zero(s->starving_fl, s->egr_sz);
998 	bitmap_zero(s->txq_maperr, s->egr_sz);
999 
1000 	if (adap->flags & CXGB4_USING_MSIX) {
1001 		s->fwevtq_msix_idx = -1;
1002 		msix = cxgb4_get_msix_idx_from_bmap(adap);
1003 		if (msix < 0)
1004 			return -ENOMEM;
1005 
1006 		snprintf(adap->msix_info[msix].desc,
1007 			 sizeof(adap->msix_info[msix].desc),
1008 			 "%s-FWeventq", adap->port[0]->name);
1009 	} else {
1010 		err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1011 				       NULL, NULL, NULL, -1);
1012 		if (err)
1013 			return err;
1014 		msix = -((int)s->intrq.abs_id + 1);
1015 	}
1016 
1017 	err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1018 			       msix, NULL, fwevtq_handler, NULL, -1);
1019 	if (err && msix >= 0)
1020 		cxgb4_free_msix_idx_in_bmap(adap, msix);
1021 
1022 	s->fwevtq_msix_idx = msix;
1023 	return err;
1024 }
1025 
1026 /**
1027  *	setup_sge_queues - configure SGE Tx/Rx/response queues
1028  *	@adap: the adapter
1029  *
1030  *	Determines how many sets of SGE queues to use and initializes them.
1031  *	We support multiple queue sets per port if we have MSI-X, otherwise
1032  *	just one queue set per port.
1033  */
1034 static int setup_sge_queues(struct adapter *adap)
1035 {
1036 	struct sge_uld_rxq_info *rxq_info = NULL;
1037 	struct sge *s = &adap->sge;
1038 	unsigned int cmplqid = 0;
1039 	int err, i, j, msix = 0;
1040 
1041 	if (is_uld(adap))
1042 		rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
1043 
1044 	if (!(adap->flags & CXGB4_USING_MSIX))
1045 		msix = -((int)s->intrq.abs_id + 1);
1046 
1047 	for_each_port(adap, i) {
1048 		struct net_device *dev = adap->port[i];
1049 		struct port_info *pi = netdev_priv(dev);
1050 		struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1051 		struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1052 
1053 		for (j = 0; j < pi->nqsets; j++, q++) {
1054 			if (msix >= 0) {
1055 				msix = cxgb4_get_msix_idx_from_bmap(adap);
1056 				if (msix < 0) {
1057 					err = msix;
1058 					goto freeout;
1059 				}
1060 
1061 				snprintf(adap->msix_info[msix].desc,
1062 					 sizeof(adap->msix_info[msix].desc),
1063 					 "%s-Rx%d", dev->name, j);
1064 				q->msix = &adap->msix_info[msix];
1065 			}
1066 
1067 			err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1068 					       msix, &q->fl,
1069 					       t4_ethrx_handler,
1070 					       NULL,
1071 					       t4_get_tp_ch_map(adap,
1072 								pi->tx_chan));
1073 			if (err)
1074 				goto freeout;
1075 			q->rspq.idx = j;
1076 			memset(&q->stats, 0, sizeof(q->stats));
1077 		}
1078 
1079 		q = &s->ethrxq[pi->first_qset];
1080 		for (j = 0; j < pi->nqsets; j++, t++, q++) {
1081 			err = t4_sge_alloc_eth_txq(adap, t, dev,
1082 					netdev_get_tx_queue(dev, j),
1083 					q->rspq.cntxt_id,
1084 					!!(adap->flags & CXGB4_SGE_DBQ_TIMER));
1085 			if (err)
1086 				goto freeout;
1087 		}
1088 	}
1089 
1090 	for_each_port(adap, i) {
1091 		/* Note that cmplqid below is 0 if we don't
1092 		 * have RDMA queues, and that's the right value.
1093 		 */
1094 		if (rxq_info)
1095 			cmplqid	= rxq_info->uldrxq[i].rspq.cntxt_id;
1096 
1097 		err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1098 					    s->fw_evtq.cntxt_id, cmplqid);
1099 		if (err)
1100 			goto freeout;
1101 	}
1102 
1103 	if (!is_t4(adap->params.chip)) {
1104 		err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0],
1105 					   netdev_get_tx_queue(adap->port[0], 0)
1106 					   , s->fw_evtq.cntxt_id, false);
1107 		if (err)
1108 			goto freeout;
1109 	}
1110 
1111 	t4_write_reg(adap, is_t4(adap->params.chip) ?
1112 				MPS_TRC_RSS_CONTROL_A :
1113 				MPS_T5_TRC_RSS_CONTROL_A,
1114 		     RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
1115 		     QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
1116 	return 0;
1117 freeout:
1118 	dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err);
1119 	t4_free_sge_resources(adap);
1120 	return err;
1121 }
1122 
1123 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1124 			     struct net_device *sb_dev)
1125 {
1126 	int txq;
1127 
1128 #ifdef CONFIG_CHELSIO_T4_DCB
1129 	/* If a Data Center Bridging has been successfully negotiated on this
1130 	 * link then we'll use the skb's priority to map it to a TX Queue.
1131 	 * The skb's priority is determined via the VLAN Tag Priority Code
1132 	 * Point field.
1133 	 */
1134 	if (cxgb4_dcb_enabled(dev) && !is_kdump_kernel()) {
1135 		u16 vlan_tci;
1136 		int err;
1137 
1138 		err = vlan_get_tag(skb, &vlan_tci);
1139 		if (unlikely(err)) {
1140 			if (net_ratelimit())
1141 				netdev_warn(dev,
1142 					    "TX Packet without VLAN Tag on DCB Link\n");
1143 			txq = 0;
1144 		} else {
1145 			txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1146 #ifdef CONFIG_CHELSIO_T4_FCOE
1147 			if (skb->protocol == htons(ETH_P_FCOE))
1148 				txq = skb->priority & 0x7;
1149 #endif /* CONFIG_CHELSIO_T4_FCOE */
1150 		}
1151 		return txq;
1152 	}
1153 #endif /* CONFIG_CHELSIO_T4_DCB */
1154 
1155 	if (dev->num_tc) {
1156 		struct port_info *pi = netdev2pinfo(dev);
1157 		u8 ver, proto;
1158 
1159 		ver = ip_hdr(skb)->version;
1160 		proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr :
1161 				     ip_hdr(skb)->protocol;
1162 
1163 		/* Send unsupported traffic pattern to normal NIC queues. */
1164 		txq = netdev_pick_tx(dev, skb, sb_dev);
1165 		if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) ||
1166 		    skb->encapsulation ||
1167 		    (proto != IPPROTO_TCP && proto != IPPROTO_UDP))
1168 			txq = txq % pi->nqsets;
1169 
1170 		return txq;
1171 	}
1172 
1173 	if (select_queue) {
1174 		txq = (skb_rx_queue_recorded(skb)
1175 			? skb_get_rx_queue(skb)
1176 			: smp_processor_id());
1177 
1178 		while (unlikely(txq >= dev->real_num_tx_queues))
1179 			txq -= dev->real_num_tx_queues;
1180 
1181 		return txq;
1182 	}
1183 
1184 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
1185 }
1186 
1187 static int closest_timer(const struct sge *s, int time)
1188 {
1189 	int i, delta, match = 0, min_delta = INT_MAX;
1190 
1191 	for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1192 		delta = time - s->timer_val[i];
1193 		if (delta < 0)
1194 			delta = -delta;
1195 		if (delta < min_delta) {
1196 			min_delta = delta;
1197 			match = i;
1198 		}
1199 	}
1200 	return match;
1201 }
1202 
1203 static int closest_thres(const struct sge *s, int thres)
1204 {
1205 	int i, delta, match = 0, min_delta = INT_MAX;
1206 
1207 	for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1208 		delta = thres - s->counter_val[i];
1209 		if (delta < 0)
1210 			delta = -delta;
1211 		if (delta < min_delta) {
1212 			min_delta = delta;
1213 			match = i;
1214 		}
1215 	}
1216 	return match;
1217 }
1218 
1219 /**
1220  *	cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
1221  *	@q: the Rx queue
1222  *	@us: the hold-off time in us, or 0 to disable timer
1223  *	@cnt: the hold-off packet count, or 0 to disable counter
1224  *
1225  *	Sets an Rx queue's interrupt hold-off time and packet count.  At least
1226  *	one of the two needs to be enabled for the queue to generate interrupts.
1227  */
1228 int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
1229 			       unsigned int us, unsigned int cnt)
1230 {
1231 	struct adapter *adap = q->adap;
1232 
1233 	if ((us | cnt) == 0)
1234 		cnt = 1;
1235 
1236 	if (cnt) {
1237 		int err;
1238 		u32 v, new_idx;
1239 
1240 		new_idx = closest_thres(&adap->sge, cnt);
1241 		if (q->desc && q->pktcnt_idx != new_idx) {
1242 			/* the queue has already been created, update it */
1243 			v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1244 			    FW_PARAMS_PARAM_X_V(
1245 					FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1246 			    FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
1247 			err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1248 					    &v, &new_idx);
1249 			if (err)
1250 				return err;
1251 		}
1252 		q->pktcnt_idx = new_idx;
1253 	}
1254 
1255 	us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1256 	q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
1257 	return 0;
1258 }
1259 
1260 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
1261 {
1262 	const struct port_info *pi = netdev_priv(dev);
1263 	netdev_features_t changed = dev->features ^ features;
1264 	int err;
1265 
1266 	if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
1267 		return 0;
1268 
1269 	err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
1270 			    -1, -1, -1,
1271 			    !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
1272 	if (unlikely(err))
1273 		dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
1274 	return err;
1275 }
1276 
1277 static int setup_debugfs(struct adapter *adap)
1278 {
1279 	if (IS_ERR_OR_NULL(adap->debugfs_root))
1280 		return -1;
1281 
1282 #ifdef CONFIG_DEBUG_FS
1283 	t4_setup_debugfs(adap);
1284 #endif
1285 	return 0;
1286 }
1287 
1288 /*
1289  * upper-layer driver support
1290  */
1291 
1292 /*
1293  * Allocate an active-open TID and set it to the supplied value.
1294  */
1295 int cxgb4_alloc_atid(struct tid_info *t, void *data)
1296 {
1297 	int atid = -1;
1298 
1299 	spin_lock_bh(&t->atid_lock);
1300 	if (t->afree) {
1301 		union aopen_entry *p = t->afree;
1302 
1303 		atid = (p - t->atid_tab) + t->atid_base;
1304 		t->afree = p->next;
1305 		p->data = data;
1306 		t->atids_in_use++;
1307 	}
1308 	spin_unlock_bh(&t->atid_lock);
1309 	return atid;
1310 }
1311 EXPORT_SYMBOL(cxgb4_alloc_atid);
1312 
1313 /*
1314  * Release an active-open TID.
1315  */
1316 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1317 {
1318 	union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
1319 
1320 	spin_lock_bh(&t->atid_lock);
1321 	p->next = t->afree;
1322 	t->afree = p;
1323 	t->atids_in_use--;
1324 	spin_unlock_bh(&t->atid_lock);
1325 }
1326 EXPORT_SYMBOL(cxgb4_free_atid);
1327 
1328 /*
1329  * Allocate a server TID and set it to the supplied value.
1330  */
1331 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1332 {
1333 	int stid;
1334 
1335 	spin_lock_bh(&t->stid_lock);
1336 	if (family == PF_INET) {
1337 		stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1338 		if (stid < t->nstids)
1339 			__set_bit(stid, t->stid_bmap);
1340 		else
1341 			stid = -1;
1342 	} else {
1343 		stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
1344 		if (stid < 0)
1345 			stid = -1;
1346 	}
1347 	if (stid >= 0) {
1348 		t->stid_tab[stid].data = data;
1349 		stid += t->stid_base;
1350 		/* IPv6 requires max of 520 bits or 16 cells in TCAM
1351 		 * This is equivalent to 4 TIDs. With CLIP enabled it
1352 		 * needs 2 TIDs.
1353 		 */
1354 		if (family == PF_INET6) {
1355 			t->stids_in_use += 2;
1356 			t->v6_stids_in_use += 2;
1357 		} else {
1358 			t->stids_in_use++;
1359 		}
1360 	}
1361 	spin_unlock_bh(&t->stid_lock);
1362 	return stid;
1363 }
1364 EXPORT_SYMBOL(cxgb4_alloc_stid);
1365 
1366 /* Allocate a server filter TID and set it to the supplied value.
1367  */
1368 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
1369 {
1370 	int stid;
1371 
1372 	spin_lock_bh(&t->stid_lock);
1373 	if (family == PF_INET) {
1374 		stid = find_next_zero_bit(t->stid_bmap,
1375 				t->nstids + t->nsftids, t->nstids);
1376 		if (stid < (t->nstids + t->nsftids))
1377 			__set_bit(stid, t->stid_bmap);
1378 		else
1379 			stid = -1;
1380 	} else {
1381 		stid = -1;
1382 	}
1383 	if (stid >= 0) {
1384 		t->stid_tab[stid].data = data;
1385 		stid -= t->nstids;
1386 		stid += t->sftid_base;
1387 		t->sftids_in_use++;
1388 	}
1389 	spin_unlock_bh(&t->stid_lock);
1390 	return stid;
1391 }
1392 EXPORT_SYMBOL(cxgb4_alloc_sftid);
1393 
1394 /* Release a server TID.
1395  */
1396 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1397 {
1398 	/* Is it a server filter TID? */
1399 	if (t->nsftids && (stid >= t->sftid_base)) {
1400 		stid -= t->sftid_base;
1401 		stid += t->nstids;
1402 	} else {
1403 		stid -= t->stid_base;
1404 	}
1405 
1406 	spin_lock_bh(&t->stid_lock);
1407 	if (family == PF_INET)
1408 		__clear_bit(stid, t->stid_bmap);
1409 	else
1410 		bitmap_release_region(t->stid_bmap, stid, 1);
1411 	t->stid_tab[stid].data = NULL;
1412 	if (stid < t->nstids) {
1413 		if (family == PF_INET6) {
1414 			t->stids_in_use -= 2;
1415 			t->v6_stids_in_use -= 2;
1416 		} else {
1417 			t->stids_in_use--;
1418 		}
1419 	} else {
1420 		t->sftids_in_use--;
1421 	}
1422 
1423 	spin_unlock_bh(&t->stid_lock);
1424 }
1425 EXPORT_SYMBOL(cxgb4_free_stid);
1426 
1427 /*
1428  * Populate a TID_RELEASE WR.  Caller must properly size the skb.
1429  */
1430 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1431 			   unsigned int tid)
1432 {
1433 	struct cpl_tid_release *req;
1434 
1435 	set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1436 	req = __skb_put(skb, sizeof(*req));
1437 	INIT_TP_WR(req, tid);
1438 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1439 }
1440 
1441 /*
1442  * Queue a TID release request and if necessary schedule a work queue to
1443  * process it.
1444  */
1445 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1446 				    unsigned int tid)
1447 {
1448 	struct adapter *adap = container_of(t, struct adapter, tids);
1449 	void **p = &t->tid_tab[tid - t->tid_base];
1450 
1451 	spin_lock_bh(&adap->tid_release_lock);
1452 	*p = adap->tid_release_head;
1453 	/* Low 2 bits encode the Tx channel number */
1454 	adap->tid_release_head = (void **)((uintptr_t)p | chan);
1455 	if (!adap->tid_release_task_busy) {
1456 		adap->tid_release_task_busy = true;
1457 		queue_work(adap->workq, &adap->tid_release_task);
1458 	}
1459 	spin_unlock_bh(&adap->tid_release_lock);
1460 }
1461 
1462 /*
1463  * Process the list of pending TID release requests.
1464  */
1465 static void process_tid_release_list(struct work_struct *work)
1466 {
1467 	struct sk_buff *skb;
1468 	struct adapter *adap;
1469 
1470 	adap = container_of(work, struct adapter, tid_release_task);
1471 
1472 	spin_lock_bh(&adap->tid_release_lock);
1473 	while (adap->tid_release_head) {
1474 		void **p = adap->tid_release_head;
1475 		unsigned int chan = (uintptr_t)p & 3;
1476 		p = (void *)p - chan;
1477 
1478 		adap->tid_release_head = *p;
1479 		*p = NULL;
1480 		spin_unlock_bh(&adap->tid_release_lock);
1481 
1482 		while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1483 					 GFP_KERNEL)))
1484 			schedule_timeout_uninterruptible(1);
1485 
1486 		mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1487 		t4_ofld_send(adap, skb);
1488 		spin_lock_bh(&adap->tid_release_lock);
1489 	}
1490 	adap->tid_release_task_busy = false;
1491 	spin_unlock_bh(&adap->tid_release_lock);
1492 }
1493 
1494 /*
1495  * Release a TID and inform HW.  If we are unable to allocate the release
1496  * message we defer to a work queue.
1497  */
1498 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
1499 		      unsigned short family)
1500 {
1501 	struct adapter *adap = container_of(t, struct adapter, tids);
1502 	struct sk_buff *skb;
1503 
1504 	WARN_ON(tid_out_of_range(&adap->tids, tid));
1505 
1506 	if (t->tid_tab[tid - adap->tids.tid_base]) {
1507 		t->tid_tab[tid - adap->tids.tid_base] = NULL;
1508 		atomic_dec(&t->conns_in_use);
1509 		if (t->hash_base && (tid >= t->hash_base)) {
1510 			if (family == AF_INET6)
1511 				atomic_sub(2, &t->hash_tids_in_use);
1512 			else
1513 				atomic_dec(&t->hash_tids_in_use);
1514 		} else {
1515 			if (family == AF_INET6)
1516 				atomic_sub(2, &t->tids_in_use);
1517 			else
1518 				atomic_dec(&t->tids_in_use);
1519 		}
1520 	}
1521 
1522 	skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
1523 	if (likely(skb)) {
1524 		mk_tid_release(skb, chan, tid);
1525 		t4_ofld_send(adap, skb);
1526 	} else
1527 		cxgb4_queue_tid_release(t, chan, tid);
1528 }
1529 EXPORT_SYMBOL(cxgb4_remove_tid);
1530 
1531 /*
1532  * Allocate and initialize the TID tables.  Returns 0 on success.
1533  */
1534 static int tid_init(struct tid_info *t)
1535 {
1536 	struct adapter *adap = container_of(t, struct adapter, tids);
1537 	unsigned int max_ftids = t->nftids + t->nsftids;
1538 	unsigned int natids = t->natids;
1539 	unsigned int hpftid_bmap_size;
1540 	unsigned int eotid_bmap_size;
1541 	unsigned int stid_bmap_size;
1542 	unsigned int ftid_bmap_size;
1543 	size_t size;
1544 
1545 	stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
1546 	ftid_bmap_size = BITS_TO_LONGS(t->nftids);
1547 	hpftid_bmap_size = BITS_TO_LONGS(t->nhpftids);
1548 	eotid_bmap_size = BITS_TO_LONGS(t->neotids);
1549 	size = t->ntids * sizeof(*t->tid_tab) +
1550 	       natids * sizeof(*t->atid_tab) +
1551 	       t->nstids * sizeof(*t->stid_tab) +
1552 	       t->nsftids * sizeof(*t->stid_tab) +
1553 	       stid_bmap_size * sizeof(long) +
1554 	       t->nhpftids * sizeof(*t->hpftid_tab) +
1555 	       hpftid_bmap_size * sizeof(long) +
1556 	       max_ftids * sizeof(*t->ftid_tab) +
1557 	       ftid_bmap_size * sizeof(long) +
1558 	       t->neotids * sizeof(*t->eotid_tab) +
1559 	       eotid_bmap_size * sizeof(long);
1560 
1561 	t->tid_tab = kvzalloc(size, GFP_KERNEL);
1562 	if (!t->tid_tab)
1563 		return -ENOMEM;
1564 
1565 	t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
1566 	t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
1567 	t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
1568 	t->hpftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
1569 	t->hpftid_bmap = (unsigned long *)&t->hpftid_tab[t->nhpftids];
1570 	t->ftid_tab = (struct filter_entry *)&t->hpftid_bmap[hpftid_bmap_size];
1571 	t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
1572 	t->eotid_tab = (struct eotid_entry *)&t->ftid_bmap[ftid_bmap_size];
1573 	t->eotid_bmap = (unsigned long *)&t->eotid_tab[t->neotids];
1574 	spin_lock_init(&t->stid_lock);
1575 	spin_lock_init(&t->atid_lock);
1576 	spin_lock_init(&t->ftid_lock);
1577 
1578 	t->stids_in_use = 0;
1579 	t->v6_stids_in_use = 0;
1580 	t->sftids_in_use = 0;
1581 	t->afree = NULL;
1582 	t->atids_in_use = 0;
1583 	atomic_set(&t->tids_in_use, 0);
1584 	atomic_set(&t->conns_in_use, 0);
1585 	atomic_set(&t->hash_tids_in_use, 0);
1586 	atomic_set(&t->eotids_in_use, 0);
1587 
1588 	/* Setup the free list for atid_tab and clear the stid bitmap. */
1589 	if (natids) {
1590 		while (--natids)
1591 			t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1592 		t->afree = t->atid_tab;
1593 	}
1594 
1595 	if (is_offload(adap)) {
1596 		bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
1597 		/* Reserve stid 0 for T4/T5 adapters */
1598 		if (!t->stid_base &&
1599 		    CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1600 			__set_bit(0, t->stid_bmap);
1601 
1602 		if (t->neotids)
1603 			bitmap_zero(t->eotid_bmap, t->neotids);
1604 	}
1605 
1606 	if (t->nhpftids)
1607 		bitmap_zero(t->hpftid_bmap, t->nhpftids);
1608 	bitmap_zero(t->ftid_bmap, t->nftids);
1609 	return 0;
1610 }
1611 
1612 /**
1613  *	cxgb4_create_server - create an IP server
1614  *	@dev: the device
1615  *	@stid: the server TID
1616  *	@sip: local IP address to bind server to
1617  *	@sport: the server's TCP port
1618  *	@vlan: the VLAN header information
1619  *	@queue: queue to direct messages from this server to
1620  *
1621  *	Create an IP server for the given port and address.
1622  *	Returns <0 on error and one of the %NET_XMIT_* values on success.
1623  */
1624 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
1625 			__be32 sip, __be16 sport, __be16 vlan,
1626 			unsigned int queue)
1627 {
1628 	unsigned int chan;
1629 	struct sk_buff *skb;
1630 	struct adapter *adap;
1631 	struct cpl_pass_open_req *req;
1632 	int ret;
1633 
1634 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1635 	if (!skb)
1636 		return -ENOMEM;
1637 
1638 	adap = netdev2adap(dev);
1639 	req = __skb_put(skb, sizeof(*req));
1640 	INIT_TP_WR(req, 0);
1641 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
1642 	req->local_port = sport;
1643 	req->peer_port = htons(0);
1644 	req->local_ip = sip;
1645 	req->peer_ip = htonl(0);
1646 	chan = rxq_to_chan(&adap->sge, queue);
1647 	req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1648 	req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1649 				SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1650 	ret = t4_mgmt_tx(adap, skb);
1651 	return net_xmit_eval(ret);
1652 }
1653 EXPORT_SYMBOL(cxgb4_create_server);
1654 
1655 /*	cxgb4_create_server6 - create an IPv6 server
1656  *	@dev: the device
1657  *	@stid: the server TID
1658  *	@sip: local IPv6 address to bind server to
1659  *	@sport: the server's TCP port
1660  *	@queue: queue to direct messages from this server to
1661  *
1662  *	Create an IPv6 server for the given port and address.
1663  *	Returns <0 on error and one of the %NET_XMIT_* values on success.
1664  */
1665 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
1666 			 const struct in6_addr *sip, __be16 sport,
1667 			 unsigned int queue)
1668 {
1669 	unsigned int chan;
1670 	struct sk_buff *skb;
1671 	struct adapter *adap;
1672 	struct cpl_pass_open_req6 *req;
1673 	int ret;
1674 
1675 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1676 	if (!skb)
1677 		return -ENOMEM;
1678 
1679 	adap = netdev2adap(dev);
1680 	req = __skb_put(skb, sizeof(*req));
1681 	INIT_TP_WR(req, 0);
1682 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
1683 	req->local_port = sport;
1684 	req->peer_port = htons(0);
1685 	req->local_ip_hi = *(__be64 *)(sip->s6_addr);
1686 	req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
1687 	req->peer_ip_hi = cpu_to_be64(0);
1688 	req->peer_ip_lo = cpu_to_be64(0);
1689 	chan = rxq_to_chan(&adap->sge, queue);
1690 	req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1691 	req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1692 				SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1693 	ret = t4_mgmt_tx(adap, skb);
1694 	return net_xmit_eval(ret);
1695 }
1696 EXPORT_SYMBOL(cxgb4_create_server6);
1697 
1698 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
1699 			unsigned int queue, bool ipv6)
1700 {
1701 	struct sk_buff *skb;
1702 	struct adapter *adap;
1703 	struct cpl_close_listsvr_req *req;
1704 	int ret;
1705 
1706 	adap = netdev2adap(dev);
1707 
1708 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1709 	if (!skb)
1710 		return -ENOMEM;
1711 
1712 	req = __skb_put(skb, sizeof(*req));
1713 	INIT_TP_WR(req, 0);
1714 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
1715 	req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
1716 				LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
1717 	ret = t4_mgmt_tx(adap, skb);
1718 	return net_xmit_eval(ret);
1719 }
1720 EXPORT_SYMBOL(cxgb4_remove_server);
1721 
1722 /**
1723  *	cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
1724  *	@mtus: the HW MTU table
1725  *	@mtu: the target MTU
1726  *	@idx: index of selected entry in the MTU table
1727  *
1728  *	Returns the index and the value in the HW MTU table that is closest to
1729  *	but does not exceed @mtu, unless @mtu is smaller than any value in the
1730  *	table, in which case that smallest available value is selected.
1731  */
1732 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
1733 			    unsigned int *idx)
1734 {
1735 	unsigned int i = 0;
1736 
1737 	while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
1738 		++i;
1739 	if (idx)
1740 		*idx = i;
1741 	return mtus[i];
1742 }
1743 EXPORT_SYMBOL(cxgb4_best_mtu);
1744 
1745 /**
1746  *     cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
1747  *     @mtus: the HW MTU table
1748  *     @header_size: Header Size
1749  *     @data_size_max: maximum Data Segment Size
1750  *     @data_size_align: desired Data Segment Size Alignment (2^N)
1751  *     @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
1752  *
1753  *     Similar to cxgb4_best_mtu() but instead of searching the Hardware
1754  *     MTU Table based solely on a Maximum MTU parameter, we break that
1755  *     parameter up into a Header Size and Maximum Data Segment Size, and
1756  *     provide a desired Data Segment Size Alignment.  If we find an MTU in
1757  *     the Hardware MTU Table which will result in a Data Segment Size with
1758  *     the requested alignment _and_ that MTU isn't "too far" from the
1759  *     closest MTU, then we'll return that rather than the closest MTU.
1760  */
1761 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
1762 				    unsigned short header_size,
1763 				    unsigned short data_size_max,
1764 				    unsigned short data_size_align,
1765 				    unsigned int *mtu_idxp)
1766 {
1767 	unsigned short max_mtu = header_size + data_size_max;
1768 	unsigned short data_size_align_mask = data_size_align - 1;
1769 	int mtu_idx, aligned_mtu_idx;
1770 
1771 	/* Scan the MTU Table till we find an MTU which is larger than our
1772 	 * Maximum MTU or we reach the end of the table.  Along the way,
1773 	 * record the last MTU found, if any, which will result in a Data
1774 	 * Segment Length matching the requested alignment.
1775 	 */
1776 	for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
1777 		unsigned short data_size = mtus[mtu_idx] - header_size;
1778 
1779 		/* If this MTU minus the Header Size would result in a
1780 		 * Data Segment Size of the desired alignment, remember it.
1781 		 */
1782 		if ((data_size & data_size_align_mask) == 0)
1783 			aligned_mtu_idx = mtu_idx;
1784 
1785 		/* If we're not at the end of the Hardware MTU Table and the
1786 		 * next element is larger than our Maximum MTU, drop out of
1787 		 * the loop.
1788 		 */
1789 		if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
1790 			break;
1791 	}
1792 
1793 	/* If we fell out of the loop because we ran to the end of the table,
1794 	 * then we just have to use the last [largest] entry.
1795 	 */
1796 	if (mtu_idx == NMTUS)
1797 		mtu_idx--;
1798 
1799 	/* If we found an MTU which resulted in the requested Data Segment
1800 	 * Length alignment and that's "not far" from the largest MTU which is
1801 	 * less than or equal to the maximum MTU, then use that.
1802 	 */
1803 	if (aligned_mtu_idx >= 0 &&
1804 	    mtu_idx - aligned_mtu_idx <= 1)
1805 		mtu_idx = aligned_mtu_idx;
1806 
1807 	/* If the caller has passed in an MTU Index pointer, pass the
1808 	 * MTU Index back.  Return the MTU value.
1809 	 */
1810 	if (mtu_idxp)
1811 		*mtu_idxp = mtu_idx;
1812 	return mtus[mtu_idx];
1813 }
1814 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
1815 
1816 /**
1817  *	cxgb4_port_chan - get the HW channel of a port
1818  *	@dev: the net device for the port
1819  *
1820  *	Return the HW Tx channel of the given port.
1821  */
1822 unsigned int cxgb4_port_chan(const struct net_device *dev)
1823 {
1824 	return netdev2pinfo(dev)->tx_chan;
1825 }
1826 EXPORT_SYMBOL(cxgb4_port_chan);
1827 
1828 /**
1829  *      cxgb4_port_e2cchan - get the HW c-channel of a port
1830  *      @dev: the net device for the port
1831  *
1832  *      Return the HW RX c-channel of the given port.
1833  */
1834 unsigned int cxgb4_port_e2cchan(const struct net_device *dev)
1835 {
1836 	return netdev2pinfo(dev)->rx_cchan;
1837 }
1838 EXPORT_SYMBOL(cxgb4_port_e2cchan);
1839 
1840 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
1841 {
1842 	struct adapter *adap = netdev2adap(dev);
1843 	u32 v1, v2, lp_count, hp_count;
1844 
1845 	v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
1846 	v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
1847 	if (is_t4(adap->params.chip)) {
1848 		lp_count = LP_COUNT_G(v1);
1849 		hp_count = HP_COUNT_G(v1);
1850 	} else {
1851 		lp_count = LP_COUNT_T5_G(v1);
1852 		hp_count = HP_COUNT_T5_G(v2);
1853 	}
1854 	return lpfifo ? lp_count : hp_count;
1855 }
1856 EXPORT_SYMBOL(cxgb4_dbfifo_count);
1857 
1858 /**
1859  *	cxgb4_port_viid - get the VI id of a port
1860  *	@dev: the net device for the port
1861  *
1862  *	Return the VI id of the given port.
1863  */
1864 unsigned int cxgb4_port_viid(const struct net_device *dev)
1865 {
1866 	return netdev2pinfo(dev)->viid;
1867 }
1868 EXPORT_SYMBOL(cxgb4_port_viid);
1869 
1870 /**
1871  *	cxgb4_port_idx - get the index of a port
1872  *	@dev: the net device for the port
1873  *
1874  *	Return the index of the given port.
1875  */
1876 unsigned int cxgb4_port_idx(const struct net_device *dev)
1877 {
1878 	return netdev2pinfo(dev)->port_id;
1879 }
1880 EXPORT_SYMBOL(cxgb4_port_idx);
1881 
1882 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
1883 			 struct tp_tcp_stats *v6)
1884 {
1885 	struct adapter *adap = pci_get_drvdata(pdev);
1886 
1887 	spin_lock(&adap->stats_lock);
1888 	t4_tp_get_tcp_stats(adap, v4, v6, false);
1889 	spin_unlock(&adap->stats_lock);
1890 }
1891 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
1892 
1893 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
1894 		      const unsigned int *pgsz_order)
1895 {
1896 	struct adapter *adap = netdev2adap(dev);
1897 
1898 	t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
1899 	t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
1900 		     HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
1901 		     HPZ3_V(pgsz_order[3]));
1902 }
1903 EXPORT_SYMBOL(cxgb4_iscsi_init);
1904 
1905 int cxgb4_flush_eq_cache(struct net_device *dev)
1906 {
1907 	struct adapter *adap = netdev2adap(dev);
1908 
1909 	return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS);
1910 }
1911 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
1912 
1913 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
1914 {
1915 	u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
1916 	__be64 indices;
1917 	int ret;
1918 
1919 	spin_lock(&adap->win0_lock);
1920 	ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
1921 			   sizeof(indices), (__be32 *)&indices,
1922 			   T4_MEMORY_READ);
1923 	spin_unlock(&adap->win0_lock);
1924 	if (!ret) {
1925 		*cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
1926 		*pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
1927 	}
1928 	return ret;
1929 }
1930 
1931 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
1932 			u16 size)
1933 {
1934 	struct adapter *adap = netdev2adap(dev);
1935 	u16 hw_pidx, hw_cidx;
1936 	int ret;
1937 
1938 	ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
1939 	if (ret)
1940 		goto out;
1941 
1942 	if (pidx != hw_pidx) {
1943 		u16 delta;
1944 		u32 val;
1945 
1946 		if (pidx >= hw_pidx)
1947 			delta = pidx - hw_pidx;
1948 		else
1949 			delta = size - hw_pidx + pidx;
1950 
1951 		if (is_t4(adap->params.chip))
1952 			val = PIDX_V(delta);
1953 		else
1954 			val = PIDX_T5_V(delta);
1955 		wmb();
1956 		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
1957 			     QID_V(qid) | val);
1958 	}
1959 out:
1960 	return ret;
1961 }
1962 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
1963 
1964 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
1965 {
1966 	u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
1967 	u32 edc0_end, edc1_end, mc0_end, mc1_end;
1968 	u32 offset, memtype, memaddr;
1969 	struct adapter *adap;
1970 	u32 hma_size = 0;
1971 	int ret;
1972 
1973 	adap = netdev2adap(dev);
1974 
1975 	offset = ((stag >> 8) * 32) + adap->vres.stag.start;
1976 
1977 	/* Figure out where the offset lands in the Memory Type/Address scheme.
1978 	 * This code assumes that the memory is laid out starting at offset 0
1979 	 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
1980 	 * and EDC1.  Some cards will have neither MC0 nor MC1, most cards have
1981 	 * MC0, and some have both MC0 and MC1.
1982 	 */
1983 	size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
1984 	edc0_size = EDRAM0_SIZE_G(size) << 20;
1985 	size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
1986 	edc1_size = EDRAM1_SIZE_G(size) << 20;
1987 	size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
1988 	mc0_size = EXT_MEM0_SIZE_G(size) << 20;
1989 
1990 	if (t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A) & HMA_MUX_F) {
1991 		size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
1992 		hma_size = EXT_MEM1_SIZE_G(size) << 20;
1993 	}
1994 	edc0_end = edc0_size;
1995 	edc1_end = edc0_end + edc1_size;
1996 	mc0_end = edc1_end + mc0_size;
1997 
1998 	if (offset < edc0_end) {
1999 		memtype = MEM_EDC0;
2000 		memaddr = offset;
2001 	} else if (offset < edc1_end) {
2002 		memtype = MEM_EDC1;
2003 		memaddr = offset - edc0_end;
2004 	} else {
2005 		if (hma_size && (offset < (edc1_end + hma_size))) {
2006 			memtype = MEM_HMA;
2007 			memaddr = offset - edc1_end;
2008 		} else if (offset < mc0_end) {
2009 			memtype = MEM_MC0;
2010 			memaddr = offset - edc1_end;
2011 		} else if (is_t5(adap->params.chip)) {
2012 			size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
2013 			mc1_size = EXT_MEM1_SIZE_G(size) << 20;
2014 			mc1_end = mc0_end + mc1_size;
2015 			if (offset < mc1_end) {
2016 				memtype = MEM_MC1;
2017 				memaddr = offset - mc0_end;
2018 			} else {
2019 				/* offset beyond the end of any memory */
2020 				goto err;
2021 			}
2022 		} else {
2023 			/* T4/T6 only has a single memory channel */
2024 			goto err;
2025 		}
2026 	}
2027 
2028 	spin_lock(&adap->win0_lock);
2029 	ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
2030 	spin_unlock(&adap->win0_lock);
2031 	return ret;
2032 
2033 err:
2034 	dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
2035 		stag, offset);
2036 	return -EINVAL;
2037 }
2038 EXPORT_SYMBOL(cxgb4_read_tpte);
2039 
2040 u64 cxgb4_read_sge_timestamp(struct net_device *dev)
2041 {
2042 	u32 hi, lo;
2043 	struct adapter *adap;
2044 
2045 	adap = netdev2adap(dev);
2046 	lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
2047 	hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
2048 
2049 	return ((u64)hi << 32) | (u64)lo;
2050 }
2051 EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
2052 
2053 int cxgb4_bar2_sge_qregs(struct net_device *dev,
2054 			 unsigned int qid,
2055 			 enum cxgb4_bar2_qtype qtype,
2056 			 int user,
2057 			 u64 *pbar2_qoffset,
2058 			 unsigned int *pbar2_qid)
2059 {
2060 	return t4_bar2_sge_qregs(netdev2adap(dev),
2061 				 qid,
2062 				 (qtype == CXGB4_BAR2_QTYPE_EGRESS
2063 				  ? T4_BAR2_QTYPE_EGRESS
2064 				  : T4_BAR2_QTYPE_INGRESS),
2065 				 user,
2066 				 pbar2_qoffset,
2067 				 pbar2_qid);
2068 }
2069 EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
2070 
2071 static struct pci_driver cxgb4_driver;
2072 
2073 static void check_neigh_update(struct neighbour *neigh)
2074 {
2075 	const struct device *parent;
2076 	const struct net_device *netdev = neigh->dev;
2077 
2078 	if (is_vlan_dev(netdev))
2079 		netdev = vlan_dev_real_dev(netdev);
2080 	parent = netdev->dev.parent;
2081 	if (parent && parent->driver == &cxgb4_driver.driver)
2082 		t4_l2t_update(dev_get_drvdata(parent), neigh);
2083 }
2084 
2085 static int netevent_cb(struct notifier_block *nb, unsigned long event,
2086 		       void *data)
2087 {
2088 	switch (event) {
2089 	case NETEVENT_NEIGH_UPDATE:
2090 		check_neigh_update(data);
2091 		break;
2092 	case NETEVENT_REDIRECT:
2093 	default:
2094 		break;
2095 	}
2096 	return 0;
2097 }
2098 
2099 static bool netevent_registered;
2100 static struct notifier_block cxgb4_netevent_nb = {
2101 	.notifier_call = netevent_cb
2102 };
2103 
2104 static void drain_db_fifo(struct adapter *adap, int usecs)
2105 {
2106 	u32 v1, v2, lp_count, hp_count;
2107 
2108 	do {
2109 		v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
2110 		v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
2111 		if (is_t4(adap->params.chip)) {
2112 			lp_count = LP_COUNT_G(v1);
2113 			hp_count = HP_COUNT_G(v1);
2114 		} else {
2115 			lp_count = LP_COUNT_T5_G(v1);
2116 			hp_count = HP_COUNT_T5_G(v2);
2117 		}
2118 
2119 		if (lp_count == 0 && hp_count == 0)
2120 			break;
2121 		set_current_state(TASK_UNINTERRUPTIBLE);
2122 		schedule_timeout(usecs_to_jiffies(usecs));
2123 	} while (1);
2124 }
2125 
2126 static void disable_txq_db(struct sge_txq *q)
2127 {
2128 	unsigned long flags;
2129 
2130 	spin_lock_irqsave(&q->db_lock, flags);
2131 	q->db_disabled = 1;
2132 	spin_unlock_irqrestore(&q->db_lock, flags);
2133 }
2134 
2135 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
2136 {
2137 	spin_lock_irq(&q->db_lock);
2138 	if (q->db_pidx_inc) {
2139 		/* Make sure that all writes to the TX descriptors
2140 		 * are committed before we tell HW about them.
2141 		 */
2142 		wmb();
2143 		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2144 			     QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
2145 		q->db_pidx_inc = 0;
2146 	}
2147 	q->db_disabled = 0;
2148 	spin_unlock_irq(&q->db_lock);
2149 }
2150 
2151 static void disable_dbs(struct adapter *adap)
2152 {
2153 	int i;
2154 
2155 	for_each_ethrxq(&adap->sge, i)
2156 		disable_txq_db(&adap->sge.ethtxq[i].q);
2157 	if (is_offload(adap)) {
2158 		struct sge_uld_txq_info *txq_info =
2159 			adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2160 
2161 		if (txq_info) {
2162 			for_each_ofldtxq(&adap->sge, i) {
2163 				struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2164 
2165 				disable_txq_db(&txq->q);
2166 			}
2167 		}
2168 	}
2169 	for_each_port(adap, i)
2170 		disable_txq_db(&adap->sge.ctrlq[i].q);
2171 }
2172 
2173 static void enable_dbs(struct adapter *adap)
2174 {
2175 	int i;
2176 
2177 	for_each_ethrxq(&adap->sge, i)
2178 		enable_txq_db(adap, &adap->sge.ethtxq[i].q);
2179 	if (is_offload(adap)) {
2180 		struct sge_uld_txq_info *txq_info =
2181 			adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2182 
2183 		if (txq_info) {
2184 			for_each_ofldtxq(&adap->sge, i) {
2185 				struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2186 
2187 				enable_txq_db(adap, &txq->q);
2188 			}
2189 		}
2190 	}
2191 	for_each_port(adap, i)
2192 		enable_txq_db(adap, &adap->sge.ctrlq[i].q);
2193 }
2194 
2195 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
2196 {
2197 	enum cxgb4_uld type = CXGB4_ULD_RDMA;
2198 
2199 	if (adap->uld && adap->uld[type].handle)
2200 		adap->uld[type].control(adap->uld[type].handle, cmd);
2201 }
2202 
2203 static void process_db_full(struct work_struct *work)
2204 {
2205 	struct adapter *adap;
2206 
2207 	adap = container_of(work, struct adapter, db_full_task);
2208 
2209 	drain_db_fifo(adap, dbfifo_drain_delay);
2210 	enable_dbs(adap);
2211 	notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2212 	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2213 		t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2214 				 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
2215 				 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
2216 	else
2217 		t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2218 				 DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
2219 }
2220 
2221 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2222 {
2223 	u16 hw_pidx, hw_cidx;
2224 	int ret;
2225 
2226 	spin_lock_irq(&q->db_lock);
2227 	ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2228 	if (ret)
2229 		goto out;
2230 	if (q->db_pidx != hw_pidx) {
2231 		u16 delta;
2232 		u32 val;
2233 
2234 		if (q->db_pidx >= hw_pidx)
2235 			delta = q->db_pidx - hw_pidx;
2236 		else
2237 			delta = q->size - hw_pidx + q->db_pidx;
2238 
2239 		if (is_t4(adap->params.chip))
2240 			val = PIDX_V(delta);
2241 		else
2242 			val = PIDX_T5_V(delta);
2243 		wmb();
2244 		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2245 			     QID_V(q->cntxt_id) | val);
2246 	}
2247 out:
2248 	q->db_disabled = 0;
2249 	q->db_pidx_inc = 0;
2250 	spin_unlock_irq(&q->db_lock);
2251 	if (ret)
2252 		CH_WARN(adap, "DB drop recovery failed.\n");
2253 }
2254 
2255 static void recover_all_queues(struct adapter *adap)
2256 {
2257 	int i;
2258 
2259 	for_each_ethrxq(&adap->sge, i)
2260 		sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2261 	if (is_offload(adap)) {
2262 		struct sge_uld_txq_info *txq_info =
2263 			adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2264 		if (txq_info) {
2265 			for_each_ofldtxq(&adap->sge, i) {
2266 				struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2267 
2268 				sync_txq_pidx(adap, &txq->q);
2269 			}
2270 		}
2271 	}
2272 	for_each_port(adap, i)
2273 		sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2274 }
2275 
2276 static void process_db_drop(struct work_struct *work)
2277 {
2278 	struct adapter *adap;
2279 
2280 	adap = container_of(work, struct adapter, db_drop_task);
2281 
2282 	if (is_t4(adap->params.chip)) {
2283 		drain_db_fifo(adap, dbfifo_drain_delay);
2284 		notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
2285 		drain_db_fifo(adap, dbfifo_drain_delay);
2286 		recover_all_queues(adap);
2287 		drain_db_fifo(adap, dbfifo_drain_delay);
2288 		enable_dbs(adap);
2289 		notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2290 	} else if (is_t5(adap->params.chip)) {
2291 		u32 dropped_db = t4_read_reg(adap, 0x010ac);
2292 		u16 qid = (dropped_db >> 15) & 0x1ffff;
2293 		u16 pidx_inc = dropped_db & 0x1fff;
2294 		u64 bar2_qoffset;
2295 		unsigned int bar2_qid;
2296 		int ret;
2297 
2298 		ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
2299 					0, &bar2_qoffset, &bar2_qid);
2300 		if (ret)
2301 			dev_err(adap->pdev_dev, "doorbell drop recovery: "
2302 				"qid=%d, pidx_inc=%d\n", qid, pidx_inc);
2303 		else
2304 			writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
2305 			       adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
2306 
2307 		/* Re-enable BAR2 WC */
2308 		t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
2309 	}
2310 
2311 	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2312 		t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
2313 }
2314 
2315 void t4_db_full(struct adapter *adap)
2316 {
2317 	if (is_t4(adap->params.chip)) {
2318 		disable_dbs(adap);
2319 		notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2320 		t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2321 				 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
2322 		queue_work(adap->workq, &adap->db_full_task);
2323 	}
2324 }
2325 
2326 void t4_db_dropped(struct adapter *adap)
2327 {
2328 	if (is_t4(adap->params.chip)) {
2329 		disable_dbs(adap);
2330 		notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2331 	}
2332 	queue_work(adap->workq, &adap->db_drop_task);
2333 }
2334 
2335 void t4_register_netevent_notifier(void)
2336 {
2337 	if (!netevent_registered) {
2338 		register_netevent_notifier(&cxgb4_netevent_nb);
2339 		netevent_registered = true;
2340 	}
2341 }
2342 
2343 static void detach_ulds(struct adapter *adap)
2344 {
2345 	unsigned int i;
2346 
2347 	mutex_lock(&uld_mutex);
2348 	list_del(&adap->list_node);
2349 
2350 	for (i = 0; i < CXGB4_ULD_MAX; i++)
2351 		if (adap->uld && adap->uld[i].handle)
2352 			adap->uld[i].state_change(adap->uld[i].handle,
2353 					     CXGB4_STATE_DETACH);
2354 
2355 	if (netevent_registered && list_empty(&adapter_list)) {
2356 		unregister_netevent_notifier(&cxgb4_netevent_nb);
2357 		netevent_registered = false;
2358 	}
2359 	mutex_unlock(&uld_mutex);
2360 }
2361 
2362 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2363 {
2364 	unsigned int i;
2365 
2366 	mutex_lock(&uld_mutex);
2367 	for (i = 0; i < CXGB4_ULD_MAX; i++)
2368 		if (adap->uld && adap->uld[i].handle)
2369 			adap->uld[i].state_change(adap->uld[i].handle,
2370 						  new_state);
2371 	mutex_unlock(&uld_mutex);
2372 }
2373 
2374 #if IS_ENABLED(CONFIG_IPV6)
2375 static int cxgb4_inet6addr_handler(struct notifier_block *this,
2376 				   unsigned long event, void *data)
2377 {
2378 	struct inet6_ifaddr *ifa = data;
2379 	struct net_device *event_dev = ifa->idev->dev;
2380 	const struct device *parent = NULL;
2381 #if IS_ENABLED(CONFIG_BONDING)
2382 	struct adapter *adap;
2383 #endif
2384 	if (is_vlan_dev(event_dev))
2385 		event_dev = vlan_dev_real_dev(event_dev);
2386 #if IS_ENABLED(CONFIG_BONDING)
2387 	if (event_dev->flags & IFF_MASTER) {
2388 		list_for_each_entry(adap, &adapter_list, list_node) {
2389 			switch (event) {
2390 			case NETDEV_UP:
2391 				cxgb4_clip_get(adap->port[0],
2392 					       (const u32 *)ifa, 1);
2393 				break;
2394 			case NETDEV_DOWN:
2395 				cxgb4_clip_release(adap->port[0],
2396 						   (const u32 *)ifa, 1);
2397 				break;
2398 			default:
2399 				break;
2400 			}
2401 		}
2402 		return NOTIFY_OK;
2403 	}
2404 #endif
2405 
2406 	if (event_dev)
2407 		parent = event_dev->dev.parent;
2408 
2409 	if (parent && parent->driver == &cxgb4_driver.driver) {
2410 		switch (event) {
2411 		case NETDEV_UP:
2412 			cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
2413 			break;
2414 		case NETDEV_DOWN:
2415 			cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
2416 			break;
2417 		default:
2418 			break;
2419 		}
2420 	}
2421 	return NOTIFY_OK;
2422 }
2423 
2424 static bool inet6addr_registered;
2425 static struct notifier_block cxgb4_inet6addr_notifier = {
2426 	.notifier_call = cxgb4_inet6addr_handler
2427 };
2428 
2429 static void update_clip(const struct adapter *adap)
2430 {
2431 	int i;
2432 	struct net_device *dev;
2433 	int ret;
2434 
2435 	rcu_read_lock();
2436 
2437 	for (i = 0; i < MAX_NPORTS; i++) {
2438 		dev = adap->port[i];
2439 		ret = 0;
2440 
2441 		if (dev)
2442 			ret = cxgb4_update_root_dev_clip(dev);
2443 
2444 		if (ret < 0)
2445 			break;
2446 	}
2447 	rcu_read_unlock();
2448 }
2449 #endif /* IS_ENABLED(CONFIG_IPV6) */
2450 
2451 /**
2452  *	cxgb_up - enable the adapter
2453  *	@adap: adapter being enabled
2454  *
2455  *	Called when the first port is enabled, this function performs the
2456  *	actions necessary to make an adapter operational, such as completing
2457  *	the initialization of HW modules, and enabling interrupts.
2458  *
2459  *	Must be called with the rtnl lock held.
2460  */
2461 static int cxgb_up(struct adapter *adap)
2462 {
2463 	struct sge *s = &adap->sge;
2464 	int err;
2465 
2466 	mutex_lock(&uld_mutex);
2467 	err = setup_sge_queues(adap);
2468 	if (err)
2469 		goto rel_lock;
2470 	err = setup_rss(adap);
2471 	if (err)
2472 		goto freeq;
2473 
2474 	if (adap->flags & CXGB4_USING_MSIX) {
2475 		if (s->nd_msix_idx < 0) {
2476 			err = -ENOMEM;
2477 			goto irq_err;
2478 		}
2479 
2480 		err = request_irq(adap->msix_info[s->nd_msix_idx].vec,
2481 				  t4_nondata_intr, 0,
2482 				  adap->msix_info[s->nd_msix_idx].desc, adap);
2483 		if (err)
2484 			goto irq_err;
2485 
2486 		err = request_msix_queue_irqs(adap);
2487 		if (err)
2488 			goto irq_err_free_nd_msix;
2489 	} else {
2490 		err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2491 				  (adap->flags & CXGB4_USING_MSI) ? 0
2492 								  : IRQF_SHARED,
2493 				  adap->port[0]->name, adap);
2494 		if (err)
2495 			goto irq_err;
2496 	}
2497 
2498 	enable_rx(adap);
2499 	t4_sge_start(adap);
2500 	t4_intr_enable(adap);
2501 	adap->flags |= CXGB4_FULL_INIT_DONE;
2502 	mutex_unlock(&uld_mutex);
2503 
2504 	notify_ulds(adap, CXGB4_STATE_UP);
2505 #if IS_ENABLED(CONFIG_IPV6)
2506 	update_clip(adap);
2507 #endif
2508 	return err;
2509 
2510 irq_err_free_nd_msix:
2511 	free_irq(adap->msix_info[s->nd_msix_idx].vec, adap);
2512 irq_err:
2513 	dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2514 freeq:
2515 	t4_free_sge_resources(adap);
2516 rel_lock:
2517 	mutex_unlock(&uld_mutex);
2518 	return err;
2519 }
2520 
2521 static void cxgb_down(struct adapter *adapter)
2522 {
2523 	cancel_work_sync(&adapter->tid_release_task);
2524 	cancel_work_sync(&adapter->db_full_task);
2525 	cancel_work_sync(&adapter->db_drop_task);
2526 	adapter->tid_release_task_busy = false;
2527 	adapter->tid_release_head = NULL;
2528 
2529 	t4_sge_stop(adapter);
2530 	t4_free_sge_resources(adapter);
2531 
2532 	adapter->flags &= ~CXGB4_FULL_INIT_DONE;
2533 }
2534 
2535 /*
2536  * net_device operations
2537  */
2538 int cxgb_open(struct net_device *dev)
2539 {
2540 	struct port_info *pi = netdev_priv(dev);
2541 	struct adapter *adapter = pi->adapter;
2542 	int err;
2543 
2544 	netif_carrier_off(dev);
2545 
2546 	if (!(adapter->flags & CXGB4_FULL_INIT_DONE)) {
2547 		err = cxgb_up(adapter);
2548 		if (err < 0)
2549 			return err;
2550 	}
2551 
2552 	/* It's possible that the basic port information could have
2553 	 * changed since we first read it.
2554 	 */
2555 	err = t4_update_port_info(pi);
2556 	if (err < 0)
2557 		return err;
2558 
2559 	err = link_start(dev);
2560 	if (!err)
2561 		netif_tx_start_all_queues(dev);
2562 	return err;
2563 }
2564 
2565 int cxgb_close(struct net_device *dev)
2566 {
2567 	struct port_info *pi = netdev_priv(dev);
2568 	struct adapter *adapter = pi->adapter;
2569 	int ret;
2570 
2571 	netif_tx_stop_all_queues(dev);
2572 	netif_carrier_off(dev);
2573 	ret = t4_enable_pi_params(adapter, adapter->pf, pi,
2574 				  false, false, false);
2575 #ifdef CONFIG_CHELSIO_T4_DCB
2576 	cxgb4_dcb_reset(dev);
2577 	dcb_tx_queue_prio_enable(dev, false);
2578 #endif
2579 	return ret;
2580 }
2581 
2582 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
2583 		__be32 sip, __be16 sport, __be16 vlan,
2584 		unsigned int queue, unsigned char port, unsigned char mask)
2585 {
2586 	int ret;
2587 	struct filter_entry *f;
2588 	struct adapter *adap;
2589 	int i;
2590 	u8 *val;
2591 
2592 	adap = netdev2adap(dev);
2593 
2594 	/* Adjust stid to correct filter index */
2595 	stid -= adap->tids.sftid_base;
2596 	stid += adap->tids.nftids;
2597 
2598 	/* Check to make sure the filter requested is writable ...
2599 	 */
2600 	f = &adap->tids.ftid_tab[stid];
2601 	ret = writable_filter(f);
2602 	if (ret)
2603 		return ret;
2604 
2605 	/* Clear out any old resources being used by the filter before
2606 	 * we start constructing the new filter.
2607 	 */
2608 	if (f->valid)
2609 		clear_filter(adap, f);
2610 
2611 	/* Clear out filter specifications */
2612 	memset(&f->fs, 0, sizeof(struct ch_filter_specification));
2613 	f->fs.val.lport = be16_to_cpu(sport);
2614 	f->fs.mask.lport  = ~0;
2615 	val = (u8 *)&sip;
2616 	if ((val[0] | val[1] | val[2] | val[3]) != 0) {
2617 		for (i = 0; i < 4; i++) {
2618 			f->fs.val.lip[i] = val[i];
2619 			f->fs.mask.lip[i] = ~0;
2620 		}
2621 		if (adap->params.tp.vlan_pri_map & PORT_F) {
2622 			f->fs.val.iport = port;
2623 			f->fs.mask.iport = mask;
2624 		}
2625 	}
2626 
2627 	if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
2628 		f->fs.val.proto = IPPROTO_TCP;
2629 		f->fs.mask.proto = ~0;
2630 	}
2631 
2632 	f->fs.dirsteer = 1;
2633 	f->fs.iq = queue;
2634 	/* Mark filter as locked */
2635 	f->locked = 1;
2636 	f->fs.rpttid = 1;
2637 
2638 	/* Save the actual tid. We need this to get the corresponding
2639 	 * filter entry structure in filter_rpl.
2640 	 */
2641 	f->tid = stid + adap->tids.ftid_base;
2642 	ret = set_filter_wr(adap, stid);
2643 	if (ret) {
2644 		clear_filter(adap, f);
2645 		return ret;
2646 	}
2647 
2648 	return 0;
2649 }
2650 EXPORT_SYMBOL(cxgb4_create_server_filter);
2651 
2652 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
2653 		unsigned int queue, bool ipv6)
2654 {
2655 	struct filter_entry *f;
2656 	struct adapter *adap;
2657 
2658 	adap = netdev2adap(dev);
2659 
2660 	/* Adjust stid to correct filter index */
2661 	stid -= adap->tids.sftid_base;
2662 	stid += adap->tids.nftids;
2663 
2664 	f = &adap->tids.ftid_tab[stid];
2665 	/* Unlock the filter */
2666 	f->locked = 0;
2667 
2668 	return delete_filter(adap, stid);
2669 }
2670 EXPORT_SYMBOL(cxgb4_remove_server_filter);
2671 
2672 static void cxgb_get_stats(struct net_device *dev,
2673 			   struct rtnl_link_stats64 *ns)
2674 {
2675 	struct port_stats stats;
2676 	struct port_info *p = netdev_priv(dev);
2677 	struct adapter *adapter = p->adapter;
2678 
2679 	/* Block retrieving statistics during EEH error
2680 	 * recovery. Otherwise, the recovery might fail
2681 	 * and the PCI device will be removed permanently
2682 	 */
2683 	spin_lock(&adapter->stats_lock);
2684 	if (!netif_device_present(dev)) {
2685 		spin_unlock(&adapter->stats_lock);
2686 		return;
2687 	}
2688 	t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
2689 				 &p->stats_base);
2690 	spin_unlock(&adapter->stats_lock);
2691 
2692 	ns->tx_bytes   = stats.tx_octets;
2693 	ns->tx_packets = stats.tx_frames;
2694 	ns->rx_bytes   = stats.rx_octets;
2695 	ns->rx_packets = stats.rx_frames;
2696 	ns->multicast  = stats.rx_mcast_frames;
2697 
2698 	/* detailed rx_errors */
2699 	ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2700 			       stats.rx_runt;
2701 	ns->rx_over_errors   = 0;
2702 	ns->rx_crc_errors    = stats.rx_fcs_err;
2703 	ns->rx_frame_errors  = stats.rx_symbol_err;
2704 	ns->rx_dropped	     = stats.rx_ovflow0 + stats.rx_ovflow1 +
2705 			       stats.rx_ovflow2 + stats.rx_ovflow3 +
2706 			       stats.rx_trunc0 + stats.rx_trunc1 +
2707 			       stats.rx_trunc2 + stats.rx_trunc3;
2708 	ns->rx_missed_errors = 0;
2709 
2710 	/* detailed tx_errors */
2711 	ns->tx_aborted_errors   = 0;
2712 	ns->tx_carrier_errors   = 0;
2713 	ns->tx_fifo_errors      = 0;
2714 	ns->tx_heartbeat_errors = 0;
2715 	ns->tx_window_errors    = 0;
2716 
2717 	ns->tx_errors = stats.tx_error_frames;
2718 	ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2719 		ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2720 }
2721 
2722 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2723 {
2724 	unsigned int mbox;
2725 	int ret = 0, prtad, devad;
2726 	struct port_info *pi = netdev_priv(dev);
2727 	struct adapter *adapter = pi->adapter;
2728 	struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2729 
2730 	switch (cmd) {
2731 	case SIOCGMIIPHY:
2732 		if (pi->mdio_addr < 0)
2733 			return -EOPNOTSUPP;
2734 		data->phy_id = pi->mdio_addr;
2735 		break;
2736 	case SIOCGMIIREG:
2737 	case SIOCSMIIREG:
2738 		if (mdio_phy_id_is_c45(data->phy_id)) {
2739 			prtad = mdio_phy_id_prtad(data->phy_id);
2740 			devad = mdio_phy_id_devad(data->phy_id);
2741 		} else if (data->phy_id < 32) {
2742 			prtad = data->phy_id;
2743 			devad = 0;
2744 			data->reg_num &= 0x1f;
2745 		} else
2746 			return -EINVAL;
2747 
2748 		mbox = pi->adapter->pf;
2749 		if (cmd == SIOCGMIIREG)
2750 			ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
2751 					 data->reg_num, &data->val_out);
2752 		else
2753 			ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
2754 					 data->reg_num, data->val_in);
2755 		break;
2756 	case SIOCGHWTSTAMP:
2757 		return copy_to_user(req->ifr_data, &pi->tstamp_config,
2758 				    sizeof(pi->tstamp_config)) ?
2759 			-EFAULT : 0;
2760 	case SIOCSHWTSTAMP:
2761 		if (copy_from_user(&pi->tstamp_config, req->ifr_data,
2762 				   sizeof(pi->tstamp_config)))
2763 			return -EFAULT;
2764 
2765 		if (!is_t4(adapter->params.chip)) {
2766 			switch (pi->tstamp_config.tx_type) {
2767 			case HWTSTAMP_TX_OFF:
2768 			case HWTSTAMP_TX_ON:
2769 				break;
2770 			default:
2771 				return -ERANGE;
2772 			}
2773 
2774 			switch (pi->tstamp_config.rx_filter) {
2775 			case HWTSTAMP_FILTER_NONE:
2776 				pi->rxtstamp = false;
2777 				break;
2778 			case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2779 			case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2780 				cxgb4_ptprx_timestamping(pi, pi->port_id,
2781 							 PTP_TS_L4);
2782 				break;
2783 			case HWTSTAMP_FILTER_PTP_V2_EVENT:
2784 				cxgb4_ptprx_timestamping(pi, pi->port_id,
2785 							 PTP_TS_L2_L4);
2786 				break;
2787 			case HWTSTAMP_FILTER_ALL:
2788 			case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2789 			case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2790 			case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2791 			case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2792 				pi->rxtstamp = true;
2793 				break;
2794 			default:
2795 				pi->tstamp_config.rx_filter =
2796 					HWTSTAMP_FILTER_NONE;
2797 				return -ERANGE;
2798 			}
2799 
2800 			if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) &&
2801 			    (pi->tstamp_config.rx_filter ==
2802 				HWTSTAMP_FILTER_NONE)) {
2803 				if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0)
2804 					pi->ptp_enable = false;
2805 			}
2806 
2807 			if (pi->tstamp_config.rx_filter !=
2808 				HWTSTAMP_FILTER_NONE) {
2809 				if (cxgb4_ptp_redirect_rx_packet(adapter,
2810 								 pi) >= 0)
2811 					pi->ptp_enable = true;
2812 			}
2813 		} else {
2814 			/* For T4 Adapters */
2815 			switch (pi->tstamp_config.rx_filter) {
2816 			case HWTSTAMP_FILTER_NONE:
2817 			pi->rxtstamp = false;
2818 			break;
2819 			case HWTSTAMP_FILTER_ALL:
2820 			pi->rxtstamp = true;
2821 			break;
2822 			default:
2823 			pi->tstamp_config.rx_filter =
2824 			HWTSTAMP_FILTER_NONE;
2825 			return -ERANGE;
2826 			}
2827 		}
2828 		return copy_to_user(req->ifr_data, &pi->tstamp_config,
2829 				    sizeof(pi->tstamp_config)) ?
2830 			-EFAULT : 0;
2831 	default:
2832 		return -EOPNOTSUPP;
2833 	}
2834 	return ret;
2835 }
2836 
2837 static void cxgb_set_rxmode(struct net_device *dev)
2838 {
2839 	/* unfortunately we can't return errors to the stack */
2840 	set_rxmode(dev, -1, false);
2841 }
2842 
2843 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2844 {
2845 	int ret;
2846 	struct port_info *pi = netdev_priv(dev);
2847 
2848 	ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
2849 			    -1, -1, -1, true);
2850 	if (!ret)
2851 		dev->mtu = new_mtu;
2852 	return ret;
2853 }
2854 
2855 #ifdef CONFIG_PCI_IOV
2856 static int cxgb4_mgmt_open(struct net_device *dev)
2857 {
2858 	/* Turn carrier off since we don't have to transmit anything on this
2859 	 * interface.
2860 	 */
2861 	netif_carrier_off(dev);
2862 	return 0;
2863 }
2864 
2865 /* Fill MAC address that will be assigned by the FW */
2866 static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
2867 {
2868 	u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
2869 	unsigned int i, vf, nvfs;
2870 	u16 a, b;
2871 	int err;
2872 	u8 *na;
2873 
2874 	adap->params.pci.vpd_cap_addr = pci_find_capability(adap->pdev,
2875 							    PCI_CAP_ID_VPD);
2876 	err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
2877 	if (err)
2878 		return;
2879 
2880 	na = adap->params.vpd.na;
2881 	for (i = 0; i < ETH_ALEN; i++)
2882 		hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
2883 			      hex2val(na[2 * i + 1]));
2884 
2885 	a = (hw_addr[0] << 8) | hw_addr[1];
2886 	b = (hw_addr[1] << 8) | hw_addr[2];
2887 	a ^= b;
2888 	a |= 0x0200;    /* locally assigned Ethernet MAC address */
2889 	a &= ~0x0100;   /* not a multicast Ethernet MAC address */
2890 	macaddr[0] = a >> 8;
2891 	macaddr[1] = a & 0xff;
2892 
2893 	for (i = 2; i < 5; i++)
2894 		macaddr[i] = hw_addr[i + 1];
2895 
2896 	for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev);
2897 		vf < nvfs; vf++) {
2898 		macaddr[5] = adap->pf * nvfs + vf;
2899 		ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr);
2900 	}
2901 }
2902 
2903 static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
2904 {
2905 	struct port_info *pi = netdev_priv(dev);
2906 	struct adapter *adap = pi->adapter;
2907 	int ret;
2908 
2909 	/* verify MAC addr is valid */
2910 	if (!is_valid_ether_addr(mac)) {
2911 		dev_err(pi->adapter->pdev_dev,
2912 			"Invalid Ethernet address %pM for VF %d\n",
2913 			mac, vf);
2914 		return -EINVAL;
2915 	}
2916 
2917 	dev_info(pi->adapter->pdev_dev,
2918 		 "Setting MAC %pM on VF %d\n", mac, vf);
2919 	ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
2920 	if (!ret)
2921 		ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
2922 	return ret;
2923 }
2924 
2925 static int cxgb4_mgmt_get_vf_config(struct net_device *dev,
2926 				    int vf, struct ifla_vf_info *ivi)
2927 {
2928 	struct port_info *pi = netdev_priv(dev);
2929 	struct adapter *adap = pi->adapter;
2930 	struct vf_info *vfinfo;
2931 
2932 	if (vf >= adap->num_vfs)
2933 		return -EINVAL;
2934 	vfinfo = &adap->vfinfo[vf];
2935 
2936 	ivi->vf = vf;
2937 	ivi->max_tx_rate = vfinfo->tx_rate;
2938 	ivi->min_tx_rate = 0;
2939 	ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr);
2940 	ivi->vlan = vfinfo->vlan;
2941 	ivi->linkstate = vfinfo->link_state;
2942 	return 0;
2943 }
2944 
2945 static int cxgb4_mgmt_get_phys_port_id(struct net_device *dev,
2946 				       struct netdev_phys_item_id *ppid)
2947 {
2948 	struct port_info *pi = netdev_priv(dev);
2949 	unsigned int phy_port_id;
2950 
2951 	phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id;
2952 	ppid->id_len = sizeof(phy_port_id);
2953 	memcpy(ppid->id, &phy_port_id, ppid->id_len);
2954 	return 0;
2955 }
2956 
2957 static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
2958 				  int min_tx_rate, int max_tx_rate)
2959 {
2960 	struct port_info *pi = netdev_priv(dev);
2961 	struct adapter *adap = pi->adapter;
2962 	unsigned int link_ok, speed, mtu;
2963 	u32 fw_pfvf, fw_class;
2964 	int class_id = vf;
2965 	int ret;
2966 	u16 pktsize;
2967 
2968 	if (vf >= adap->num_vfs)
2969 		return -EINVAL;
2970 
2971 	if (min_tx_rate) {
2972 		dev_err(adap->pdev_dev,
2973 			"Min tx rate (%d) (> 0) for VF %d is Invalid.\n",
2974 			min_tx_rate, vf);
2975 		return -EINVAL;
2976 	}
2977 
2978 	if (max_tx_rate == 0) {
2979 		/* unbind VF to to any Traffic Class */
2980 		fw_pfvf =
2981 		    (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
2982 		     FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
2983 		fw_class = 0xffffffff;
2984 		ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
2985 				    &fw_pfvf, &fw_class);
2986 		if (ret) {
2987 			dev_err(adap->pdev_dev,
2988 				"Err %d in unbinding PF %d VF %d from TX Rate Limiting\n",
2989 				ret, adap->pf, vf);
2990 			return -EINVAL;
2991 		}
2992 		dev_info(adap->pdev_dev,
2993 			 "PF %d VF %d is unbound from TX Rate Limiting\n",
2994 			 adap->pf, vf);
2995 		adap->vfinfo[vf].tx_rate = 0;
2996 		return 0;
2997 	}
2998 
2999 	ret = t4_get_link_params(pi, &link_ok, &speed, &mtu);
3000 	if (ret != FW_SUCCESS) {
3001 		dev_err(adap->pdev_dev,
3002 			"Failed to get link information for VF %d\n", vf);
3003 		return -EINVAL;
3004 	}
3005 
3006 	if (!link_ok) {
3007 		dev_err(adap->pdev_dev, "Link down for VF %d\n", vf);
3008 		return -EINVAL;
3009 	}
3010 
3011 	if (max_tx_rate > speed) {
3012 		dev_err(adap->pdev_dev,
3013 			"Max tx rate %d for VF %d can't be > link-speed %u",
3014 			max_tx_rate, vf, speed);
3015 		return -EINVAL;
3016 	}
3017 
3018 	pktsize = mtu;
3019 	/* subtract ethhdr size and 4 bytes crc since, f/w appends it */
3020 	pktsize = pktsize - sizeof(struct ethhdr) - 4;
3021 	/* subtract ipv4 hdr size, tcp hdr size to get typical IPv4 MSS size */
3022 	pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr);
3023 	/* configure Traffic Class for rate-limiting */
3024 	ret = t4_sched_params(adap, SCHED_CLASS_TYPE_PACKET,
3025 			      SCHED_CLASS_LEVEL_CL_RL,
3026 			      SCHED_CLASS_MODE_CLASS,
3027 			      SCHED_CLASS_RATEUNIT_BITS,
3028 			      SCHED_CLASS_RATEMODE_ABS,
3029 			      pi->tx_chan, class_id, 0,
3030 			      max_tx_rate * 1000, 0, pktsize, 0);
3031 	if (ret) {
3032 		dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n",
3033 			ret);
3034 		return -EINVAL;
3035 	}
3036 	dev_info(adap->pdev_dev,
3037 		 "Class %d with MSS %u configured with rate %u\n",
3038 		 class_id, pktsize, max_tx_rate);
3039 
3040 	/* bind VF to configured Traffic Class */
3041 	fw_pfvf = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
3042 		   FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
3043 	fw_class = class_id;
3044 	ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf,
3045 			    &fw_class);
3046 	if (ret) {
3047 		dev_err(adap->pdev_dev,
3048 			"Err %d in binding PF %d VF %d to Traffic Class %d\n",
3049 			ret, adap->pf, vf, class_id);
3050 		return -EINVAL;
3051 	}
3052 	dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n",
3053 		 adap->pf, vf, class_id);
3054 	adap->vfinfo[vf].tx_rate = max_tx_rate;
3055 	return 0;
3056 }
3057 
3058 static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf,
3059 				  u16 vlan, u8 qos, __be16 vlan_proto)
3060 {
3061 	struct port_info *pi = netdev_priv(dev);
3062 	struct adapter *adap = pi->adapter;
3063 	int ret;
3064 
3065 	if (vf >= adap->num_vfs || vlan > 4095 || qos > 7)
3066 		return -EINVAL;
3067 
3068 	if (vlan_proto != htons(ETH_P_8021Q) || qos != 0)
3069 		return -EPROTONOSUPPORT;
3070 
3071 	ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan);
3072 	if (!ret) {
3073 		adap->vfinfo[vf].vlan = vlan;
3074 		return 0;
3075 	}
3076 
3077 	dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n",
3078 		ret, (vlan ? "setting" : "clearing"), adap->pf, vf);
3079 	return ret;
3080 }
3081 
3082 static int cxgb4_mgmt_set_vf_link_state(struct net_device *dev, int vf,
3083 					int link)
3084 {
3085 	struct port_info *pi = netdev_priv(dev);
3086 	struct adapter *adap = pi->adapter;
3087 	u32 param, val;
3088 	int ret = 0;
3089 
3090 	if (vf >= adap->num_vfs)
3091 		return -EINVAL;
3092 
3093 	switch (link) {
3094 	case IFLA_VF_LINK_STATE_AUTO:
3095 		val = FW_VF_LINK_STATE_AUTO;
3096 		break;
3097 
3098 	case IFLA_VF_LINK_STATE_ENABLE:
3099 		val = FW_VF_LINK_STATE_ENABLE;
3100 		break;
3101 
3102 	case IFLA_VF_LINK_STATE_DISABLE:
3103 		val = FW_VF_LINK_STATE_DISABLE;
3104 		break;
3105 
3106 	default:
3107 		return -EINVAL;
3108 	}
3109 
3110 	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
3111 		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_LINK_STATE));
3112 	ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
3113 			    &param, &val);
3114 	if (ret) {
3115 		dev_err(adap->pdev_dev,
3116 			"Error %d in setting PF %d VF %d link state\n",
3117 			ret, adap->pf, vf);
3118 		return -EINVAL;
3119 	}
3120 
3121 	adap->vfinfo[vf].link_state = link;
3122 	return ret;
3123 }
3124 #endif /* CONFIG_PCI_IOV */
3125 
3126 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
3127 {
3128 	int ret;
3129 	struct sockaddr *addr = p;
3130 	struct port_info *pi = netdev_priv(dev);
3131 
3132 	if (!is_valid_ether_addr(addr->sa_data))
3133 		return -EADDRNOTAVAIL;
3134 
3135 	ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
3136 				    addr->sa_data, true, &pi->smt_idx);
3137 	if (ret < 0)
3138 		return ret;
3139 
3140 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3141 	return 0;
3142 }
3143 
3144 #ifdef CONFIG_NET_POLL_CONTROLLER
3145 static void cxgb_netpoll(struct net_device *dev)
3146 {
3147 	struct port_info *pi = netdev_priv(dev);
3148 	struct adapter *adap = pi->adapter;
3149 
3150 	if (adap->flags & CXGB4_USING_MSIX) {
3151 		int i;
3152 		struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
3153 
3154 		for (i = pi->nqsets; i; i--, rx++)
3155 			t4_sge_intr_msix(0, &rx->rspq);
3156 	} else
3157 		t4_intr_handler(adap)(0, adap);
3158 }
3159 #endif
3160 
3161 static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
3162 {
3163 	struct port_info *pi = netdev_priv(dev);
3164 	struct adapter *adap = pi->adapter;
3165 	struct ch_sched_queue qe = { 0 };
3166 	struct ch_sched_params p = { 0 };
3167 	struct sched_class *e;
3168 	u32 req_rate;
3169 	int err = 0;
3170 
3171 	if (!can_sched(dev))
3172 		return -ENOTSUPP;
3173 
3174 	if (index < 0 || index > pi->nqsets - 1)
3175 		return -EINVAL;
3176 
3177 	if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3178 		dev_err(adap->pdev_dev,
3179 			"Failed to rate limit on queue %d. Link Down?\n",
3180 			index);
3181 		return -EINVAL;
3182 	}
3183 
3184 	qe.queue = index;
3185 	e = cxgb4_sched_queue_lookup(dev, &qe);
3186 	if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) {
3187 		dev_err(adap->pdev_dev,
3188 			"Queue %u already bound to class %u of type: %u\n",
3189 			index, e->idx, e->info.u.params.level);
3190 		return -EBUSY;
3191 	}
3192 
3193 	/* Convert from Mbps to Kbps */
3194 	req_rate = rate * 1000;
3195 
3196 	/* Max rate is 100 Gbps */
3197 	if (req_rate > SCHED_MAX_RATE_KBPS) {
3198 		dev_err(adap->pdev_dev,
3199 			"Invalid rate %u Mbps, Max rate is %u Mbps\n",
3200 			rate, SCHED_MAX_RATE_KBPS / 1000);
3201 		return -ERANGE;
3202 	}
3203 
3204 	/* First unbind the queue from any existing class */
3205 	memset(&qe, 0, sizeof(qe));
3206 	qe.queue = index;
3207 	qe.class = SCHED_CLS_NONE;
3208 
3209 	err = cxgb4_sched_class_unbind(dev, (void *)(&qe), SCHED_QUEUE);
3210 	if (err) {
3211 		dev_err(adap->pdev_dev,
3212 			"Unbinding Queue %d on port %d fail. Err: %d\n",
3213 			index, pi->port_id, err);
3214 		return err;
3215 	}
3216 
3217 	/* Queue already unbound */
3218 	if (!req_rate)
3219 		return 0;
3220 
3221 	/* Fetch any available unused or matching scheduling class */
3222 	p.type = SCHED_CLASS_TYPE_PACKET;
3223 	p.u.params.level    = SCHED_CLASS_LEVEL_CL_RL;
3224 	p.u.params.mode     = SCHED_CLASS_MODE_CLASS;
3225 	p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS;
3226 	p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS;
3227 	p.u.params.channel  = pi->tx_chan;
3228 	p.u.params.class    = SCHED_CLS_NONE;
3229 	p.u.params.minrate  = 0;
3230 	p.u.params.maxrate  = req_rate;
3231 	p.u.params.weight   = 0;
3232 	p.u.params.pktsize  = dev->mtu;
3233 
3234 	e = cxgb4_sched_class_alloc(dev, &p);
3235 	if (!e)
3236 		return -ENOMEM;
3237 
3238 	/* Bind the queue to a scheduling class */
3239 	memset(&qe, 0, sizeof(qe));
3240 	qe.queue = index;
3241 	qe.class = e->idx;
3242 
3243 	err = cxgb4_sched_class_bind(dev, (void *)(&qe), SCHED_QUEUE);
3244 	if (err)
3245 		dev_err(adap->pdev_dev,
3246 			"Queue rate limiting failed. Err: %d\n", err);
3247 	return err;
3248 }
3249 
3250 static int cxgb_setup_tc_flower(struct net_device *dev,
3251 				struct flow_cls_offload *cls_flower)
3252 {
3253 	switch (cls_flower->command) {
3254 	case FLOW_CLS_REPLACE:
3255 		return cxgb4_tc_flower_replace(dev, cls_flower);
3256 	case FLOW_CLS_DESTROY:
3257 		return cxgb4_tc_flower_destroy(dev, cls_flower);
3258 	case FLOW_CLS_STATS:
3259 		return cxgb4_tc_flower_stats(dev, cls_flower);
3260 	default:
3261 		return -EOPNOTSUPP;
3262 	}
3263 }
3264 
3265 static int cxgb_setup_tc_cls_u32(struct net_device *dev,
3266 				 struct tc_cls_u32_offload *cls_u32)
3267 {
3268 	switch (cls_u32->command) {
3269 	case TC_CLSU32_NEW_KNODE:
3270 	case TC_CLSU32_REPLACE_KNODE:
3271 		return cxgb4_config_knode(dev, cls_u32);
3272 	case TC_CLSU32_DELETE_KNODE:
3273 		return cxgb4_delete_knode(dev, cls_u32);
3274 	default:
3275 		return -EOPNOTSUPP;
3276 	}
3277 }
3278 
3279 static int cxgb_setup_tc_matchall(struct net_device *dev,
3280 				  struct tc_cls_matchall_offload *cls_matchall,
3281 				  bool ingress)
3282 {
3283 	struct adapter *adap = netdev2adap(dev);
3284 
3285 	if (!adap->tc_matchall)
3286 		return -ENOMEM;
3287 
3288 	switch (cls_matchall->command) {
3289 	case TC_CLSMATCHALL_REPLACE:
3290 		return cxgb4_tc_matchall_replace(dev, cls_matchall, ingress);
3291 	case TC_CLSMATCHALL_DESTROY:
3292 		return cxgb4_tc_matchall_destroy(dev, cls_matchall, ingress);
3293 	case TC_CLSMATCHALL_STATS:
3294 		if (ingress)
3295 			return cxgb4_tc_matchall_stats(dev, cls_matchall);
3296 		break;
3297 	default:
3298 		break;
3299 	}
3300 
3301 	return -EOPNOTSUPP;
3302 }
3303 
3304 static int cxgb_setup_tc_block_ingress_cb(enum tc_setup_type type,
3305 					  void *type_data, void *cb_priv)
3306 {
3307 	struct net_device *dev = cb_priv;
3308 	struct port_info *pi = netdev2pinfo(dev);
3309 	struct adapter *adap = netdev2adap(dev);
3310 
3311 	if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3312 		dev_err(adap->pdev_dev,
3313 			"Failed to setup tc on port %d. Link Down?\n",
3314 			pi->port_id);
3315 		return -EINVAL;
3316 	}
3317 
3318 	if (!tc_cls_can_offload_and_chain0(dev, type_data))
3319 		return -EOPNOTSUPP;
3320 
3321 	switch (type) {
3322 	case TC_SETUP_CLSU32:
3323 		return cxgb_setup_tc_cls_u32(dev, type_data);
3324 	case TC_SETUP_CLSFLOWER:
3325 		return cxgb_setup_tc_flower(dev, type_data);
3326 	case TC_SETUP_CLSMATCHALL:
3327 		return cxgb_setup_tc_matchall(dev, type_data, true);
3328 	default:
3329 		return -EOPNOTSUPP;
3330 	}
3331 }
3332 
3333 static int cxgb_setup_tc_block_egress_cb(enum tc_setup_type type,
3334 					 void *type_data, void *cb_priv)
3335 {
3336 	struct net_device *dev = cb_priv;
3337 	struct port_info *pi = netdev2pinfo(dev);
3338 	struct adapter *adap = netdev2adap(dev);
3339 
3340 	if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3341 		dev_err(adap->pdev_dev,
3342 			"Failed to setup tc on port %d. Link Down?\n",
3343 			pi->port_id);
3344 		return -EINVAL;
3345 	}
3346 
3347 	if (!tc_cls_can_offload_and_chain0(dev, type_data))
3348 		return -EOPNOTSUPP;
3349 
3350 	switch (type) {
3351 	case TC_SETUP_CLSMATCHALL:
3352 		return cxgb_setup_tc_matchall(dev, type_data, false);
3353 	default:
3354 		break;
3355 	}
3356 
3357 	return -EOPNOTSUPP;
3358 }
3359 
3360 static int cxgb_setup_tc_mqprio(struct net_device *dev,
3361 				struct tc_mqprio_qopt_offload *mqprio)
3362 {
3363 	struct adapter *adap = netdev2adap(dev);
3364 
3365 	if (!is_ethofld(adap) || !adap->tc_mqprio)
3366 		return -ENOMEM;
3367 
3368 	return cxgb4_setup_tc_mqprio(dev, mqprio);
3369 }
3370 
3371 static LIST_HEAD(cxgb_block_cb_list);
3372 
3373 static int cxgb_setup_tc_block(struct net_device *dev,
3374 			       struct flow_block_offload *f)
3375 {
3376 	struct port_info *pi = netdev_priv(dev);
3377 	flow_setup_cb_t *cb;
3378 	bool ingress_only;
3379 
3380 	pi->tc_block_shared = f->block_shared;
3381 	if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
3382 		cb = cxgb_setup_tc_block_egress_cb;
3383 		ingress_only = false;
3384 	} else {
3385 		cb = cxgb_setup_tc_block_ingress_cb;
3386 		ingress_only = true;
3387 	}
3388 
3389 	return flow_block_cb_setup_simple(f, &cxgb_block_cb_list,
3390 					  cb, pi, dev, ingress_only);
3391 }
3392 
3393 static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
3394 			 void *type_data)
3395 {
3396 	switch (type) {
3397 	case TC_SETUP_QDISC_MQPRIO:
3398 		return cxgb_setup_tc_mqprio(dev, type_data);
3399 	case TC_SETUP_BLOCK:
3400 		return cxgb_setup_tc_block(dev, type_data);
3401 	default:
3402 		return -EOPNOTSUPP;
3403 	}
3404 }
3405 
3406 static void cxgb_del_udp_tunnel(struct net_device *netdev,
3407 				struct udp_tunnel_info *ti)
3408 {
3409 	struct port_info *pi = netdev_priv(netdev);
3410 	struct adapter *adapter = pi->adapter;
3411 	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
3412 	u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
3413 	int ret = 0, i;
3414 
3415 	if (chip_ver < CHELSIO_T6)
3416 		return;
3417 
3418 	switch (ti->type) {
3419 	case UDP_TUNNEL_TYPE_VXLAN:
3420 		if (!adapter->vxlan_port_cnt ||
3421 		    adapter->vxlan_port != ti->port)
3422 			return; /* Invalid VxLAN destination port */
3423 
3424 		adapter->vxlan_port_cnt--;
3425 		if (adapter->vxlan_port_cnt)
3426 			return;
3427 
3428 		adapter->vxlan_port = 0;
3429 		t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0);
3430 		break;
3431 	case UDP_TUNNEL_TYPE_GENEVE:
3432 		if (!adapter->geneve_port_cnt ||
3433 		    adapter->geneve_port != ti->port)
3434 			return; /* Invalid GENEVE destination port */
3435 
3436 		adapter->geneve_port_cnt--;
3437 		if (adapter->geneve_port_cnt)
3438 			return;
3439 
3440 		adapter->geneve_port = 0;
3441 		t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
3442 		break;
3443 	default:
3444 		return;
3445 	}
3446 
3447 	/* Matchall mac entries can be deleted only after all tunnel ports
3448 	 * are brought down or removed.
3449 	 */
3450 	if (!adapter->rawf_cnt)
3451 		return;
3452 	for_each_port(adapter, i) {
3453 		pi = adap2pinfo(adapter, i);
3454 		ret = t4_free_raw_mac_filt(adapter, pi->viid,
3455 					   match_all_mac, match_all_mac,
3456 					   adapter->rawf_start +
3457 					    pi->port_id,
3458 					   1, pi->port_id, false);
3459 		if (ret < 0) {
3460 			netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
3461 				    i);
3462 			return;
3463 		}
3464 	}
3465 }
3466 
3467 static void cxgb_add_udp_tunnel(struct net_device *netdev,
3468 				struct udp_tunnel_info *ti)
3469 {
3470 	struct port_info *pi = netdev_priv(netdev);
3471 	struct adapter *adapter = pi->adapter;
3472 	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
3473 	u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
3474 	int i, ret;
3475 
3476 	if (chip_ver < CHELSIO_T6 || !adapter->rawf_cnt)
3477 		return;
3478 
3479 	switch (ti->type) {
3480 	case UDP_TUNNEL_TYPE_VXLAN:
3481 		/* Callback for adding vxlan port can be called with the same
3482 		 * port for both IPv4 and IPv6. We should not disable the
3483 		 * offloading when the same port for both protocols is added
3484 		 * and later one of them is removed.
3485 		 */
3486 		if (adapter->vxlan_port_cnt &&
3487 		    adapter->vxlan_port == ti->port) {
3488 			adapter->vxlan_port_cnt++;
3489 			return;
3490 		}
3491 
3492 		/* We will support only one VxLAN port */
3493 		if (adapter->vxlan_port_cnt) {
3494 			netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
3495 				    be16_to_cpu(adapter->vxlan_port),
3496 				    be16_to_cpu(ti->port));
3497 			return;
3498 		}
3499 
3500 		adapter->vxlan_port = ti->port;
3501 		adapter->vxlan_port_cnt = 1;
3502 
3503 		t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A,
3504 			     VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
3505 		break;
3506 	case UDP_TUNNEL_TYPE_GENEVE:
3507 		if (adapter->geneve_port_cnt &&
3508 		    adapter->geneve_port == ti->port) {
3509 			adapter->geneve_port_cnt++;
3510 			return;
3511 		}
3512 
3513 		/* We will support only one GENEVE port */
3514 		if (adapter->geneve_port_cnt) {
3515 			netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
3516 				    be16_to_cpu(adapter->geneve_port),
3517 				    be16_to_cpu(ti->port));
3518 			return;
3519 		}
3520 
3521 		adapter->geneve_port = ti->port;
3522 		adapter->geneve_port_cnt = 1;
3523 
3524 		t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
3525 			     GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
3526 		break;
3527 	default:
3528 		return;
3529 	}
3530 
3531 	/* Create a 'match all' mac filter entry for inner mac,
3532 	 * if raw mac interface is supported. Once the linux kernel provides
3533 	 * driver entry points for adding/deleting the inner mac addresses,
3534 	 * we will remove this 'match all' entry and fallback to adding
3535 	 * exact match filters.
3536 	 */
3537 	for_each_port(adapter, i) {
3538 		pi = adap2pinfo(adapter, i);
3539 
3540 		ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
3541 					    match_all_mac,
3542 					    match_all_mac,
3543 					    adapter->rawf_start +
3544 					    pi->port_id,
3545 					    1, pi->port_id, false);
3546 		if (ret < 0) {
3547 			netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
3548 				    be16_to_cpu(ti->port));
3549 			cxgb_del_udp_tunnel(netdev, ti);
3550 			return;
3551 		}
3552 	}
3553 }
3554 
3555 static netdev_features_t cxgb_features_check(struct sk_buff *skb,
3556 					     struct net_device *dev,
3557 					     netdev_features_t features)
3558 {
3559 	struct port_info *pi = netdev_priv(dev);
3560 	struct adapter *adapter = pi->adapter;
3561 
3562 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
3563 		return features;
3564 
3565 	/* Check if hw supports offload for this packet */
3566 	if (!skb->encapsulation || cxgb_encap_offload_supported(skb))
3567 		return features;
3568 
3569 	/* Offload is not supported for this encapsulated packet */
3570 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3571 }
3572 
3573 static netdev_features_t cxgb_fix_features(struct net_device *dev,
3574 					   netdev_features_t features)
3575 {
3576 	/* Disable GRO, if RX_CSUM is disabled */
3577 	if (!(features & NETIF_F_RXCSUM))
3578 		features &= ~NETIF_F_GRO;
3579 
3580 	return features;
3581 }
3582 
3583 static const struct net_device_ops cxgb4_netdev_ops = {
3584 	.ndo_open             = cxgb_open,
3585 	.ndo_stop             = cxgb_close,
3586 	.ndo_start_xmit       = t4_start_xmit,
3587 	.ndo_select_queue     =	cxgb_select_queue,
3588 	.ndo_get_stats64      = cxgb_get_stats,
3589 	.ndo_set_rx_mode      = cxgb_set_rxmode,
3590 	.ndo_set_mac_address  = cxgb_set_mac_addr,
3591 	.ndo_set_features     = cxgb_set_features,
3592 	.ndo_validate_addr    = eth_validate_addr,
3593 	.ndo_do_ioctl         = cxgb_ioctl,
3594 	.ndo_change_mtu       = cxgb_change_mtu,
3595 #ifdef CONFIG_NET_POLL_CONTROLLER
3596 	.ndo_poll_controller  = cxgb_netpoll,
3597 #endif
3598 #ifdef CONFIG_CHELSIO_T4_FCOE
3599 	.ndo_fcoe_enable      = cxgb_fcoe_enable,
3600 	.ndo_fcoe_disable     = cxgb_fcoe_disable,
3601 #endif /* CONFIG_CHELSIO_T4_FCOE */
3602 	.ndo_set_tx_maxrate   = cxgb_set_tx_maxrate,
3603 	.ndo_setup_tc         = cxgb_setup_tc,
3604 	.ndo_udp_tunnel_add   = cxgb_add_udp_tunnel,
3605 	.ndo_udp_tunnel_del   = cxgb_del_udp_tunnel,
3606 	.ndo_features_check   = cxgb_features_check,
3607 	.ndo_fix_features     = cxgb_fix_features,
3608 };
3609 
3610 #ifdef CONFIG_PCI_IOV
3611 static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
3612 	.ndo_open               = cxgb4_mgmt_open,
3613 	.ndo_set_vf_mac         = cxgb4_mgmt_set_vf_mac,
3614 	.ndo_get_vf_config      = cxgb4_mgmt_get_vf_config,
3615 	.ndo_set_vf_rate        = cxgb4_mgmt_set_vf_rate,
3616 	.ndo_get_phys_port_id   = cxgb4_mgmt_get_phys_port_id,
3617 	.ndo_set_vf_vlan        = cxgb4_mgmt_set_vf_vlan,
3618 	.ndo_set_vf_link_state	= cxgb4_mgmt_set_vf_link_state,
3619 };
3620 #endif
3621 
3622 static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
3623 				   struct ethtool_drvinfo *info)
3624 {
3625 	struct adapter *adapter = netdev2adap(dev);
3626 
3627 	strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
3628 	strlcpy(info->bus_info, pci_name(adapter->pdev),
3629 		sizeof(info->bus_info));
3630 }
3631 
3632 static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
3633 	.get_drvinfo       = cxgb4_mgmt_get_drvinfo,
3634 };
3635 
3636 static void notify_fatal_err(struct work_struct *work)
3637 {
3638 	struct adapter *adap;
3639 
3640 	adap = container_of(work, struct adapter, fatal_err_notify_task);
3641 	notify_ulds(adap, CXGB4_STATE_FATAL_ERROR);
3642 }
3643 
3644 void t4_fatal_err(struct adapter *adap)
3645 {
3646 	int port;
3647 
3648 	if (pci_channel_offline(adap->pdev))
3649 		return;
3650 
3651 	/* Disable the SGE since ULDs are going to free resources that
3652 	 * could be exposed to the adapter.  RDMA MWs for example...
3653 	 */
3654 	t4_shutdown_adapter(adap);
3655 	for_each_port(adap, port) {
3656 		struct net_device *dev = adap->port[port];
3657 
3658 		/* If we get here in very early initialization the network
3659 		 * devices may not have been set up yet.
3660 		 */
3661 		if (!dev)
3662 			continue;
3663 
3664 		netif_tx_stop_all_queues(dev);
3665 		netif_carrier_off(dev);
3666 	}
3667 	dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
3668 	queue_work(adap->workq, &adap->fatal_err_notify_task);
3669 }
3670 
3671 static void setup_memwin(struct adapter *adap)
3672 {
3673 	u32 nic_win_base = t4_get_util_window(adap);
3674 
3675 	t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
3676 }
3677 
3678 static void setup_memwin_rdma(struct adapter *adap)
3679 {
3680 	if (adap->vres.ocq.size) {
3681 		u32 start;
3682 		unsigned int sz_kb;
3683 
3684 		start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
3685 		start &= PCI_BASE_ADDRESS_MEM_MASK;
3686 		start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
3687 		sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
3688 		t4_write_reg(adap,
3689 			     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
3690 			     start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
3691 		t4_write_reg(adap,
3692 			     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
3693 			     adap->vres.ocq.start);
3694 		t4_read_reg(adap,
3695 			    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
3696 	}
3697 }
3698 
3699 /* HMA Definitions */
3700 
3701 /* The maximum number of address that can be send in a single FW cmd */
3702 #define HMA_MAX_ADDR_IN_CMD	5
3703 
3704 #define HMA_PAGE_SIZE		PAGE_SIZE
3705 
3706 #define HMA_MAX_NO_FW_ADDRESS	(16 << 10)  /* FW supports 16K addresses */
3707 
3708 #define HMA_PAGE_ORDER					\
3709 	((HMA_PAGE_SIZE < HMA_MAX_NO_FW_ADDRESS) ?	\
3710 	ilog2(HMA_MAX_NO_FW_ADDRESS / HMA_PAGE_SIZE) : 0)
3711 
3712 /* The minimum and maximum possible HMA sizes that can be specified in the FW
3713  * configuration(in units of MB).
3714  */
3715 #define HMA_MIN_TOTAL_SIZE	1
3716 #define HMA_MAX_TOTAL_SIZE				\
3717 	(((HMA_PAGE_SIZE << HMA_PAGE_ORDER) *		\
3718 	  HMA_MAX_NO_FW_ADDRESS) >> 20)
3719 
3720 static void adap_free_hma_mem(struct adapter *adapter)
3721 {
3722 	struct scatterlist *iter;
3723 	struct page *page;
3724 	int i;
3725 
3726 	if (!adapter->hma.sgt)
3727 		return;
3728 
3729 	if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) {
3730 		dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl,
3731 			     adapter->hma.sgt->nents, PCI_DMA_BIDIRECTIONAL);
3732 		adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG;
3733 	}
3734 
3735 	for_each_sg(adapter->hma.sgt->sgl, iter,
3736 		    adapter->hma.sgt->orig_nents, i) {
3737 		page = sg_page(iter);
3738 		if (page)
3739 			__free_pages(page, HMA_PAGE_ORDER);
3740 	}
3741 
3742 	kfree(adapter->hma.phy_addr);
3743 	sg_free_table(adapter->hma.sgt);
3744 	kfree(adapter->hma.sgt);
3745 	adapter->hma.sgt = NULL;
3746 }
3747 
3748 static int adap_config_hma(struct adapter *adapter)
3749 {
3750 	struct scatterlist *sgl, *iter;
3751 	struct sg_table *sgt;
3752 	struct page *newpage;
3753 	unsigned int i, j, k;
3754 	u32 param, hma_size;
3755 	unsigned int ncmds;
3756 	size_t page_size;
3757 	u32 page_order;
3758 	int node, ret;
3759 
3760 	/* HMA is supported only for T6+ cards.
3761 	 * Avoid initializing HMA in kdump kernels.
3762 	 */
3763 	if (is_kdump_kernel() ||
3764 	    CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
3765 		return 0;
3766 
3767 	/* Get the HMA region size required by fw */
3768 	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3769 		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HMA_SIZE));
3770 	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3771 			      1, &param, &hma_size);
3772 	/* An error means card has its own memory or HMA is not supported by
3773 	 * the firmware. Return without any errors.
3774 	 */
3775 	if (ret || !hma_size)
3776 		return 0;
3777 
3778 	if (hma_size < HMA_MIN_TOTAL_SIZE ||
3779 	    hma_size > HMA_MAX_TOTAL_SIZE) {
3780 		dev_err(adapter->pdev_dev,
3781 			"HMA size %uMB beyond bounds(%u-%lu)MB\n",
3782 			hma_size, HMA_MIN_TOTAL_SIZE, HMA_MAX_TOTAL_SIZE);
3783 		return -EINVAL;
3784 	}
3785 
3786 	page_size = HMA_PAGE_SIZE;
3787 	page_order = HMA_PAGE_ORDER;
3788 	adapter->hma.sgt = kzalloc(sizeof(*adapter->hma.sgt), GFP_KERNEL);
3789 	if (unlikely(!adapter->hma.sgt)) {
3790 		dev_err(adapter->pdev_dev, "HMA SG table allocation failed\n");
3791 		return -ENOMEM;
3792 	}
3793 	sgt = adapter->hma.sgt;
3794 	/* FW returned value will be in MB's
3795 	 */
3796 	sgt->orig_nents = (hma_size << 20) / (page_size << page_order);
3797 	if (sg_alloc_table(sgt, sgt->orig_nents, GFP_KERNEL)) {
3798 		dev_err(adapter->pdev_dev, "HMA SGL allocation failed\n");
3799 		kfree(adapter->hma.sgt);
3800 		adapter->hma.sgt = NULL;
3801 		return -ENOMEM;
3802 	}
3803 
3804 	sgl = adapter->hma.sgt->sgl;
3805 	node = dev_to_node(adapter->pdev_dev);
3806 	for_each_sg(sgl, iter, sgt->orig_nents, i) {
3807 		newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL |
3808 					   __GFP_ZERO, page_order);
3809 		if (!newpage) {
3810 			dev_err(adapter->pdev_dev,
3811 				"Not enough memory for HMA page allocation\n");
3812 			ret = -ENOMEM;
3813 			goto free_hma;
3814 		}
3815 		sg_set_page(iter, newpage, page_size << page_order, 0);
3816 	}
3817 
3818 	sgt->nents = dma_map_sg(adapter->pdev_dev, sgl, sgt->orig_nents,
3819 				DMA_BIDIRECTIONAL);
3820 	if (!sgt->nents) {
3821 		dev_err(adapter->pdev_dev,
3822 			"Not enough memory for HMA DMA mapping");
3823 		ret = -ENOMEM;
3824 		goto free_hma;
3825 	}
3826 	adapter->hma.flags |= HMA_DMA_MAPPED_FLAG;
3827 
3828 	adapter->hma.phy_addr = kcalloc(sgt->nents, sizeof(dma_addr_t),
3829 					GFP_KERNEL);
3830 	if (unlikely(!adapter->hma.phy_addr))
3831 		goto free_hma;
3832 
3833 	for_each_sg(sgl, iter, sgt->nents, i) {
3834 		newpage = sg_page(iter);
3835 		adapter->hma.phy_addr[i] = sg_dma_address(iter);
3836 	}
3837 
3838 	ncmds = DIV_ROUND_UP(sgt->nents, HMA_MAX_ADDR_IN_CMD);
3839 	/* Pass on the addresses to firmware */
3840 	for (i = 0, k = 0; i < ncmds; i++, k += HMA_MAX_ADDR_IN_CMD) {
3841 		struct fw_hma_cmd hma_cmd;
3842 		u8 naddr = HMA_MAX_ADDR_IN_CMD;
3843 		u8 soc = 0, eoc = 0;
3844 		u8 hma_mode = 1; /* Presently we support only Page table mode */
3845 
3846 		soc = (i == 0) ? 1 : 0;
3847 		eoc = (i == ncmds - 1) ? 1 : 0;
3848 
3849 		/* For last cmd, set naddr corresponding to remaining
3850 		 * addresses
3851 		 */
3852 		if (i == ncmds - 1) {
3853 			naddr = sgt->nents % HMA_MAX_ADDR_IN_CMD;
3854 			naddr = naddr ? naddr : HMA_MAX_ADDR_IN_CMD;
3855 		}
3856 		memset(&hma_cmd, 0, sizeof(hma_cmd));
3857 		hma_cmd.op_pkd = htonl(FW_CMD_OP_V(FW_HMA_CMD) |
3858 				       FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3859 		hma_cmd.retval_len16 = htonl(FW_LEN16(hma_cmd));
3860 
3861 		hma_cmd.mode_to_pcie_params =
3862 			htonl(FW_HMA_CMD_MODE_V(hma_mode) |
3863 			      FW_HMA_CMD_SOC_V(soc) | FW_HMA_CMD_EOC_V(eoc));
3864 
3865 		/* HMA cmd size specified in MB's */
3866 		hma_cmd.naddr_size =
3867 			htonl(FW_HMA_CMD_SIZE_V(hma_size) |
3868 			      FW_HMA_CMD_NADDR_V(naddr));
3869 
3870 		/* Total Page size specified in units of 4K */
3871 		hma_cmd.addr_size_pkd =
3872 			htonl(FW_HMA_CMD_ADDR_SIZE_V
3873 				((page_size << page_order) >> 12));
3874 
3875 		/* Fill the 5 addresses */
3876 		for (j = 0; j < naddr; j++) {
3877 			hma_cmd.phy_address[j] =
3878 				cpu_to_be64(adapter->hma.phy_addr[j + k]);
3879 		}
3880 		ret = t4_wr_mbox(adapter, adapter->mbox, &hma_cmd,
3881 				 sizeof(hma_cmd), &hma_cmd);
3882 		if (ret) {
3883 			dev_err(adapter->pdev_dev,
3884 				"HMA FW command failed with err %d\n", ret);
3885 			goto free_hma;
3886 		}
3887 	}
3888 
3889 	if (!ret)
3890 		dev_info(adapter->pdev_dev,
3891 			 "Reserved %uMB host memory for HMA\n", hma_size);
3892 	return ret;
3893 
3894 free_hma:
3895 	adap_free_hma_mem(adapter);
3896 	return ret;
3897 }
3898 
3899 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
3900 {
3901 	u32 v;
3902 	int ret;
3903 
3904 	/* Now that we've successfully configured and initialized the adapter
3905 	 * can ask the Firmware what resources it has provisioned for us.
3906 	 */
3907 	ret = t4_get_pfres(adap);
3908 	if (ret) {
3909 		dev_err(adap->pdev_dev,
3910 			"Unable to retrieve resource provisioning information\n");
3911 		return ret;
3912 	}
3913 
3914 	/* get device capabilities */
3915 	memset(c, 0, sizeof(*c));
3916 	c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3917 			       FW_CMD_REQUEST_F | FW_CMD_READ_F);
3918 	c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
3919 	ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
3920 	if (ret < 0)
3921 		return ret;
3922 
3923 	c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3924 			       FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3925 	ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
3926 	if (ret < 0)
3927 		return ret;
3928 
3929 	ret = t4_config_glbl_rss(adap, adap->pf,
3930 				 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
3931 				 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
3932 				 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
3933 	if (ret < 0)
3934 		return ret;
3935 
3936 	ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
3937 			  MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
3938 			  FW_CMD_CAP_PF);
3939 	if (ret < 0)
3940 		return ret;
3941 
3942 	t4_sge_init(adap);
3943 
3944 	/* tweak some settings */
3945 	t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
3946 	t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
3947 	t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
3948 	v = t4_read_reg(adap, TP_PIO_DATA_A);
3949 	t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
3950 
3951 	/* first 4 Tx modulation queues point to consecutive Tx channels */
3952 	adap->params.tp.tx_modq_map = 0xE4;
3953 	t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
3954 		     TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
3955 
3956 	/* associate each Tx modulation queue with consecutive Tx channels */
3957 	v = 0x84218421;
3958 	t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3959 			  &v, 1, TP_TX_SCHED_HDR_A);
3960 	t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3961 			  &v, 1, TP_TX_SCHED_FIFO_A);
3962 	t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3963 			  &v, 1, TP_TX_SCHED_PCMD_A);
3964 
3965 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
3966 	if (is_offload(adap)) {
3967 		t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
3968 			     TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3969 			     TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3970 			     TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3971 			     TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3972 		t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
3973 			     TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3974 			     TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3975 			     TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3976 			     TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3977 	}
3978 
3979 	/* get basic stuff going */
3980 	return t4_early_init(adap, adap->pf);
3981 }
3982 
3983 /*
3984  * Max # of ATIDs.  The absolute HW max is 16K but we keep it lower.
3985  */
3986 #define MAX_ATIDS 8192U
3987 
3988 /*
3989  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3990  *
3991  * If the firmware we're dealing with has Configuration File support, then
3992  * we use that to perform all configuration
3993  */
3994 
3995 /*
3996  * Tweak configuration based on module parameters, etc.  Most of these have
3997  * defaults assigned to them by Firmware Configuration Files (if we're using
3998  * them) but need to be explicitly set if we're using hard-coded
3999  * initialization.  But even in the case of using Firmware Configuration
4000  * Files, we'd like to expose the ability to change these via module
4001  * parameters so these are essentially common tweaks/settings for
4002  * Configuration Files and hard-coded initialization ...
4003  */
4004 static int adap_init0_tweaks(struct adapter *adapter)
4005 {
4006 	/*
4007 	 * Fix up various Host-Dependent Parameters like Page Size, Cache
4008 	 * Line Size, etc.  The firmware default is for a 4KB Page Size and
4009 	 * 64B Cache Line Size ...
4010 	 */
4011 	t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
4012 
4013 	/*
4014 	 * Process module parameters which affect early initialization.
4015 	 */
4016 	if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4017 		dev_err(&adapter->pdev->dev,
4018 			"Ignoring illegal rx_dma_offset=%d, using 2\n",
4019 			rx_dma_offset);
4020 		rx_dma_offset = 2;
4021 	}
4022 	t4_set_reg_field(adapter, SGE_CONTROL_A,
4023 			 PKTSHIFT_V(PKTSHIFT_M),
4024 			 PKTSHIFT_V(rx_dma_offset));
4025 
4026 	/*
4027 	 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4028 	 * adds the pseudo header itself.
4029 	 */
4030 	t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
4031 			       CSUM_HAS_PSEUDO_HDR_F, 0);
4032 
4033 	return 0;
4034 }
4035 
4036 /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
4037  * unto themselves and they contain their own firmware to perform their
4038  * tasks ...
4039  */
4040 static int phy_aq1202_version(const u8 *phy_fw_data,
4041 			      size_t phy_fw_size)
4042 {
4043 	int offset;
4044 
4045 	/* At offset 0x8 you're looking for the primary image's
4046 	 * starting offset which is 3 Bytes wide
4047 	 *
4048 	 * At offset 0xa of the primary image, you look for the offset
4049 	 * of the DRAM segment which is 3 Bytes wide.
4050 	 *
4051 	 * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
4052 	 * wide
4053 	 */
4054 	#define be16(__p) (((__p)[0] << 8) | (__p)[1])
4055 	#define le16(__p) ((__p)[0] | ((__p)[1] << 8))
4056 	#define le24(__p) (le16(__p) | ((__p)[2] << 16))
4057 
4058 	offset = le24(phy_fw_data + 0x8) << 12;
4059 	offset = le24(phy_fw_data + offset + 0xa);
4060 	return be16(phy_fw_data + offset + 0x27e);
4061 
4062 	#undef be16
4063 	#undef le16
4064 	#undef le24
4065 }
4066 
4067 static struct info_10gbt_phy_fw {
4068 	unsigned int phy_fw_id;		/* PCI Device ID */
4069 	char *phy_fw_file;		/* /lib/firmware/ PHY Firmware file */
4070 	int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
4071 	int phy_flash;			/* Has FLASH for PHY Firmware */
4072 } phy_info_array[] = {
4073 	{
4074 		PHY_AQ1202_DEVICEID,
4075 		PHY_AQ1202_FIRMWARE,
4076 		phy_aq1202_version,
4077 		1,
4078 	},
4079 	{
4080 		PHY_BCM84834_DEVICEID,
4081 		PHY_BCM84834_FIRMWARE,
4082 		NULL,
4083 		0,
4084 	},
4085 	{ 0, NULL, NULL },
4086 };
4087 
4088 static struct info_10gbt_phy_fw *find_phy_info(int devid)
4089 {
4090 	int i;
4091 
4092 	for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
4093 		if (phy_info_array[i].phy_fw_id == devid)
4094 			return &phy_info_array[i];
4095 	}
4096 	return NULL;
4097 }
4098 
4099 /* Handle updating of chip-external 10Gb/s-BT PHY firmware.  This needs to
4100  * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD.  On error
4101  * we return a negative error number.  If we transfer new firmware we return 1
4102  * (from t4_load_phy_fw()).  If we don't do anything we return 0.
4103  */
4104 static int adap_init0_phy(struct adapter *adap)
4105 {
4106 	const struct firmware *phyf;
4107 	int ret;
4108 	struct info_10gbt_phy_fw *phy_info;
4109 
4110 	/* Use the device ID to determine which PHY file to flash.
4111 	 */
4112 	phy_info = find_phy_info(adap->pdev->device);
4113 	if (!phy_info) {
4114 		dev_warn(adap->pdev_dev,
4115 			 "No PHY Firmware file found for this PHY\n");
4116 		return -EOPNOTSUPP;
4117 	}
4118 
4119 	/* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
4120 	 * use that. The adapter firmware provides us with a memory buffer
4121 	 * where we can load a PHY firmware file from the host if we want to
4122 	 * override the PHY firmware File in flash.
4123 	 */
4124 	ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
4125 				      adap->pdev_dev);
4126 	if (ret < 0) {
4127 		/* For adapters without FLASH attached to PHY for their
4128 		 * firmware, it's obviously a fatal error if we can't get the
4129 		 * firmware to the adapter.  For adapters with PHY firmware
4130 		 * FLASH storage, it's worth a warning if we can't find the
4131 		 * PHY Firmware but we'll neuter the error ...
4132 		 */
4133 		dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
4134 			"/lib/firmware/%s, error %d\n",
4135 			phy_info->phy_fw_file, -ret);
4136 		if (phy_info->phy_flash) {
4137 			int cur_phy_fw_ver = 0;
4138 
4139 			t4_phy_fw_ver(adap, &cur_phy_fw_ver);
4140 			dev_warn(adap->pdev_dev, "continuing with, on-adapter "
4141 				 "FLASH copy, version %#x\n", cur_phy_fw_ver);
4142 			ret = 0;
4143 		}
4144 
4145 		return ret;
4146 	}
4147 
4148 	/* Load PHY Firmware onto adapter.
4149 	 */
4150 	ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock,
4151 			     phy_info->phy_fw_version,
4152 			     (u8 *)phyf->data, phyf->size);
4153 	if (ret < 0)
4154 		dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
4155 			-ret);
4156 	else if (ret > 0) {
4157 		int new_phy_fw_ver = 0;
4158 
4159 		if (phy_info->phy_fw_version)
4160 			new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
4161 								  phyf->size);
4162 		dev_info(adap->pdev_dev, "Successfully transferred PHY "
4163 			 "Firmware /lib/firmware/%s, version %#x\n",
4164 			 phy_info->phy_fw_file, new_phy_fw_ver);
4165 	}
4166 
4167 	release_firmware(phyf);
4168 
4169 	return ret;
4170 }
4171 
4172 /*
4173  * Attempt to initialize the adapter via a Firmware Configuration File.
4174  */
4175 static int adap_init0_config(struct adapter *adapter, int reset)
4176 {
4177 	char *fw_config_file, fw_config_file_path[256];
4178 	u32 finiver, finicsum, cfcsum, param, val;
4179 	struct fw_caps_config_cmd caps_cmd;
4180 	unsigned long mtype = 0, maddr = 0;
4181 	const struct firmware *cf;
4182 	char *config_name = NULL;
4183 	int config_issued = 0;
4184 	int ret;
4185 
4186 	/*
4187 	 * Reset device if necessary.
4188 	 */
4189 	if (reset) {
4190 		ret = t4_fw_reset(adapter, adapter->mbox,
4191 				  PIORSTMODE_F | PIORST_F);
4192 		if (ret < 0)
4193 			goto bye;
4194 	}
4195 
4196 	/* If this is a 10Gb/s-BT adapter make sure the chip-external
4197 	 * 10Gb/s-BT PHYs have up-to-date firmware.  Note that this step needs
4198 	 * to be performed after any global adapter RESET above since some
4199 	 * PHYs only have local RAM copies of the PHY firmware.
4200 	 */
4201 	if (is_10gbt_device(adapter->pdev->device)) {
4202 		ret = adap_init0_phy(adapter);
4203 		if (ret < 0)
4204 			goto bye;
4205 	}
4206 	/*
4207 	 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4208 	 * then use that.  Otherwise, use the configuration file stored
4209 	 * in the adapter flash ...
4210 	 */
4211 	switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
4212 	case CHELSIO_T4:
4213 		fw_config_file = FW4_CFNAME;
4214 		break;
4215 	case CHELSIO_T5:
4216 		fw_config_file = FW5_CFNAME;
4217 		break;
4218 	case CHELSIO_T6:
4219 		fw_config_file = FW6_CFNAME;
4220 		break;
4221 	default:
4222 		dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4223 		       adapter->pdev->device);
4224 		ret = -EINVAL;
4225 		goto bye;
4226 	}
4227 
4228 	ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
4229 	if (ret < 0) {
4230 		config_name = "On FLASH";
4231 		mtype = FW_MEMTYPE_CF_FLASH;
4232 		maddr = t4_flash_cfg_addr(adapter);
4233 	} else {
4234 		u32 params[7], val[7];
4235 
4236 		sprintf(fw_config_file_path,
4237 			"/lib/firmware/%s", fw_config_file);
4238 		config_name = fw_config_file_path;
4239 
4240 		if (cf->size >= FLASH_CFG_MAX_SIZE)
4241 			ret = -ENOMEM;
4242 		else {
4243 			params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4244 			     FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
4245 			ret = t4_query_params(adapter, adapter->mbox,
4246 					      adapter->pf, 0, 1, params, val);
4247 			if (ret == 0) {
4248 				/*
4249 				 * For t4_memory_rw() below addresses and
4250 				 * sizes have to be in terms of multiples of 4
4251 				 * bytes.  So, if the Configuration File isn't
4252 				 * a multiple of 4 bytes in length we'll have
4253 				 * to write that out separately since we can't
4254 				 * guarantee that the bytes following the
4255 				 * residual byte in the buffer returned by
4256 				 * request_firmware() are zeroed out ...
4257 				 */
4258 				size_t resid = cf->size & 0x3;
4259 				size_t size = cf->size & ~0x3;
4260 				__be32 *data = (__be32 *)cf->data;
4261 
4262 				mtype = FW_PARAMS_PARAM_Y_G(val[0]);
4263 				maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
4264 
4265 				spin_lock(&adapter->win0_lock);
4266 				ret = t4_memory_rw(adapter, 0, mtype, maddr,
4267 						   size, data, T4_MEMORY_WRITE);
4268 				if (ret == 0 && resid != 0) {
4269 					union {
4270 						__be32 word;
4271 						char buf[4];
4272 					} last;
4273 					int i;
4274 
4275 					last.word = data[size >> 2];
4276 					for (i = resid; i < 4; i++)
4277 						last.buf[i] = 0;
4278 					ret = t4_memory_rw(adapter, 0, mtype,
4279 							   maddr + size,
4280 							   4, &last.word,
4281 							   T4_MEMORY_WRITE);
4282 				}
4283 				spin_unlock(&adapter->win0_lock);
4284 			}
4285 		}
4286 
4287 		release_firmware(cf);
4288 		if (ret)
4289 			goto bye;
4290 	}
4291 
4292 	val = 0;
4293 
4294 	/* Ofld + Hash filter is supported. Older fw will fail this request and
4295 	 * it is fine.
4296 	 */
4297 	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4298 		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD));
4299 	ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
4300 			    1, &param, &val);
4301 
4302 	/* FW doesn't know about Hash filter + ofld support,
4303 	 * it's not a problem, don't return an error.
4304 	 */
4305 	if (ret < 0) {
4306 		dev_warn(adapter->pdev_dev,
4307 			 "Hash filter with ofld is not supported by FW\n");
4308 	}
4309 
4310 	/*
4311 	 * Issue a Capability Configuration command to the firmware to get it
4312 	 * to parse the Configuration File.  We don't use t4_fw_config_file()
4313 	 * because we want the ability to modify various features after we've
4314 	 * processed the configuration file ...
4315 	 */
4316 	memset(&caps_cmd, 0, sizeof(caps_cmd));
4317 	caps_cmd.op_to_write =
4318 		htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4319 		      FW_CMD_REQUEST_F |
4320 		      FW_CMD_READ_F);
4321 	caps_cmd.cfvalid_to_len16 =
4322 		htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
4323 		      FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
4324 		      FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
4325 		      FW_LEN16(caps_cmd));
4326 	ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4327 			 &caps_cmd);
4328 
4329 	/* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
4330 	 * Configuration File in FLASH), our last gasp effort is to use the
4331 	 * Firmware Configuration File which is embedded in the firmware.  A
4332 	 * very few early versions of the firmware didn't have one embedded
4333 	 * but we can ignore those.
4334 	 */
4335 	if (ret == -ENOENT) {
4336 		memset(&caps_cmd, 0, sizeof(caps_cmd));
4337 		caps_cmd.op_to_write =
4338 			htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4339 					FW_CMD_REQUEST_F |
4340 					FW_CMD_READ_F);
4341 		caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4342 		ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
4343 				sizeof(caps_cmd), &caps_cmd);
4344 		config_name = "Firmware Default";
4345 	}
4346 
4347 	config_issued = 1;
4348 	if (ret < 0)
4349 		goto bye;
4350 
4351 	finiver = ntohl(caps_cmd.finiver);
4352 	finicsum = ntohl(caps_cmd.finicsum);
4353 	cfcsum = ntohl(caps_cmd.cfcsum);
4354 	if (finicsum != cfcsum)
4355 		dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4356 			 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4357 			 finicsum, cfcsum);
4358 
4359 	/*
4360 	 * And now tell the firmware to use the configuration we just loaded.
4361 	 */
4362 	caps_cmd.op_to_write =
4363 		htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4364 		      FW_CMD_REQUEST_F |
4365 		      FW_CMD_WRITE_F);
4366 	caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4367 	ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4368 			 NULL);
4369 	if (ret < 0)
4370 		goto bye;
4371 
4372 	/*
4373 	 * Tweak configuration based on system architecture, module
4374 	 * parameters, etc.
4375 	 */
4376 	ret = adap_init0_tweaks(adapter);
4377 	if (ret < 0)
4378 		goto bye;
4379 
4380 	/* We will proceed even if HMA init fails. */
4381 	ret = adap_config_hma(adapter);
4382 	if (ret)
4383 		dev_err(adapter->pdev_dev,
4384 			"HMA configuration failed with error %d\n", ret);
4385 
4386 	if (is_t6(adapter->params.chip)) {
4387 		adap_config_hpfilter(adapter);
4388 		ret = setup_ppod_edram(adapter);
4389 		if (!ret)
4390 			dev_info(adapter->pdev_dev, "Successfully enabled "
4391 				 "ppod edram feature\n");
4392 	}
4393 
4394 	/*
4395 	 * And finally tell the firmware to initialize itself using the
4396 	 * parameters from the Configuration File.
4397 	 */
4398 	ret = t4_fw_initialize(adapter, adapter->mbox);
4399 	if (ret < 0)
4400 		goto bye;
4401 
4402 	/* Emit Firmware Configuration File information and return
4403 	 * successfully.
4404 	 */
4405 	dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4406 		 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
4407 		 config_name, finiver, cfcsum);
4408 	return 0;
4409 
4410 	/*
4411 	 * Something bad happened.  Return the error ...  (If the "error"
4412 	 * is that there's no Configuration File on the adapter we don't
4413 	 * want to issue a warning since this is fairly common.)
4414 	 */
4415 bye:
4416 	if (config_issued && ret != -ENOENT)
4417 		dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
4418 			 config_name, -ret);
4419 	return ret;
4420 }
4421 
4422 static struct fw_info fw_info_array[] = {
4423 	{
4424 		.chip = CHELSIO_T4,
4425 		.fs_name = FW4_CFNAME,
4426 		.fw_mod_name = FW4_FNAME,
4427 		.fw_hdr = {
4428 			.chip = FW_HDR_CHIP_T4,
4429 			.fw_ver = __cpu_to_be32(FW_VERSION(T4)),
4430 			.intfver_nic = FW_INTFVER(T4, NIC),
4431 			.intfver_vnic = FW_INTFVER(T4, VNIC),
4432 			.intfver_ri = FW_INTFVER(T4, RI),
4433 			.intfver_iscsi = FW_INTFVER(T4, ISCSI),
4434 			.intfver_fcoe = FW_INTFVER(T4, FCOE),
4435 		},
4436 	}, {
4437 		.chip = CHELSIO_T5,
4438 		.fs_name = FW5_CFNAME,
4439 		.fw_mod_name = FW5_FNAME,
4440 		.fw_hdr = {
4441 			.chip = FW_HDR_CHIP_T5,
4442 			.fw_ver = __cpu_to_be32(FW_VERSION(T5)),
4443 			.intfver_nic = FW_INTFVER(T5, NIC),
4444 			.intfver_vnic = FW_INTFVER(T5, VNIC),
4445 			.intfver_ri = FW_INTFVER(T5, RI),
4446 			.intfver_iscsi = FW_INTFVER(T5, ISCSI),
4447 			.intfver_fcoe = FW_INTFVER(T5, FCOE),
4448 		},
4449 	}, {
4450 		.chip = CHELSIO_T6,
4451 		.fs_name = FW6_CFNAME,
4452 		.fw_mod_name = FW6_FNAME,
4453 		.fw_hdr = {
4454 			.chip = FW_HDR_CHIP_T6,
4455 			.fw_ver = __cpu_to_be32(FW_VERSION(T6)),
4456 			.intfver_nic = FW_INTFVER(T6, NIC),
4457 			.intfver_vnic = FW_INTFVER(T6, VNIC),
4458 			.intfver_ofld = FW_INTFVER(T6, OFLD),
4459 			.intfver_ri = FW_INTFVER(T6, RI),
4460 			.intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
4461 			.intfver_iscsi = FW_INTFVER(T6, ISCSI),
4462 			.intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
4463 			.intfver_fcoe = FW_INTFVER(T6, FCOE),
4464 		},
4465 	}
4466 
4467 };
4468 
4469 static struct fw_info *find_fw_info(int chip)
4470 {
4471 	int i;
4472 
4473 	for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
4474 		if (fw_info_array[i].chip == chip)
4475 			return &fw_info_array[i];
4476 	}
4477 	return NULL;
4478 }
4479 
4480 /*
4481  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4482  */
4483 static int adap_init0(struct adapter *adap, int vpd_skip)
4484 {
4485 	struct fw_caps_config_cmd caps_cmd;
4486 	u32 params[7], val[7];
4487 	enum dev_state state;
4488 	u32 v, port_vec;
4489 	int reset = 1;
4490 	int ret;
4491 
4492 	/* Grab Firmware Device Log parameters as early as possible so we have
4493 	 * access to it for debugging, etc.
4494 	 */
4495 	ret = t4_init_devlog_params(adap);
4496 	if (ret < 0)
4497 		return ret;
4498 
4499 	/* Contact FW, advertising Master capability */
4500 	ret = t4_fw_hello(adap, adap->mbox, adap->mbox,
4501 			  is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state);
4502 	if (ret < 0) {
4503 		dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
4504 			ret);
4505 		return ret;
4506 	}
4507 	if (ret == adap->mbox)
4508 		adap->flags |= CXGB4_MASTER_PF;
4509 
4510 	/*
4511 	 * If we're the Master PF Driver and the device is uninitialized,
4512 	 * then let's consider upgrading the firmware ...  (We always want
4513 	 * to check the firmware version number in order to A. get it for
4514 	 * later reporting and B. to warn if the currently loaded firmware
4515 	 * is excessively mismatched relative to the driver.)
4516 	 */
4517 
4518 	t4_get_version_info(adap);
4519 	ret = t4_check_fw_version(adap);
4520 	/* If firmware is too old (not supported by driver) force an update. */
4521 	if (ret)
4522 		state = DEV_STATE_UNINIT;
4523 	if ((adap->flags & CXGB4_MASTER_PF) && state != DEV_STATE_INIT) {
4524 		struct fw_info *fw_info;
4525 		struct fw_hdr *card_fw;
4526 		const struct firmware *fw;
4527 		const u8 *fw_data = NULL;
4528 		unsigned int fw_size = 0;
4529 
4530 		/* This is the firmware whose headers the driver was compiled
4531 		 * against
4532 		 */
4533 		fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
4534 		if (fw_info == NULL) {
4535 			dev_err(adap->pdev_dev,
4536 				"unable to get firmware info for chip %d.\n",
4537 				CHELSIO_CHIP_VERSION(adap->params.chip));
4538 			return -EINVAL;
4539 		}
4540 
4541 		/* allocate memory to read the header of the firmware on the
4542 		 * card
4543 		 */
4544 		card_fw = kvzalloc(sizeof(*card_fw), GFP_KERNEL);
4545 		if (!card_fw) {
4546 			ret = -ENOMEM;
4547 			goto bye;
4548 		}
4549 
4550 		/* Get FW from from /lib/firmware/ */
4551 		ret = request_firmware(&fw, fw_info->fw_mod_name,
4552 				       adap->pdev_dev);
4553 		if (ret < 0) {
4554 			dev_err(adap->pdev_dev,
4555 				"unable to load firmware image %s, error %d\n",
4556 				fw_info->fw_mod_name, ret);
4557 		} else {
4558 			fw_data = fw->data;
4559 			fw_size = fw->size;
4560 		}
4561 
4562 		/* upgrade FW logic */
4563 		ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
4564 				 state, &reset);
4565 
4566 		/* Cleaning up */
4567 		release_firmware(fw);
4568 		kvfree(card_fw);
4569 
4570 		if (ret < 0)
4571 			goto bye;
4572 	}
4573 
4574 	/* If the firmware is initialized already, emit a simply note to that
4575 	 * effect. Otherwise, it's time to try initializing the adapter.
4576 	 */
4577 	if (state == DEV_STATE_INIT) {
4578 		ret = adap_config_hma(adap);
4579 		if (ret)
4580 			dev_err(adap->pdev_dev,
4581 				"HMA configuration failed with error %d\n",
4582 				ret);
4583 		dev_info(adap->pdev_dev, "Coming up as %s: "\
4584 			 "Adapter already initialized\n",
4585 			 adap->flags & CXGB4_MASTER_PF ? "MASTER" : "SLAVE");
4586 	} else {
4587 		dev_info(adap->pdev_dev, "Coming up as MASTER: "\
4588 			 "Initializing adapter\n");
4589 
4590 		/* Find out whether we're dealing with a version of the
4591 		 * firmware which has configuration file support.
4592 		 */
4593 		params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4594 			     FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
4595 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
4596 				      params, val);
4597 
4598 		/* If the firmware doesn't support Configuration Files,
4599 		 * return an error.
4600 		 */
4601 		if (ret < 0) {
4602 			dev_err(adap->pdev_dev, "firmware doesn't support "
4603 				"Firmware Configuration Files\n");
4604 			goto bye;
4605 		}
4606 
4607 		/* The firmware provides us with a memory buffer where we can
4608 		 * load a Configuration File from the host if we want to
4609 		 * override the Configuration File in flash.
4610 		 */
4611 		ret = adap_init0_config(adap, reset);
4612 		if (ret == -ENOENT) {
4613 			dev_err(adap->pdev_dev, "no Configuration File "
4614 				"present on adapter.\n");
4615 			goto bye;
4616 		}
4617 		if (ret < 0) {
4618 			dev_err(adap->pdev_dev, "could not initialize "
4619 				"adapter, error %d\n", -ret);
4620 			goto bye;
4621 		}
4622 	}
4623 
4624 	/* Now that we've successfully configured and initialized the adapter
4625 	 * (or found it already initialized), we can ask the Firmware what
4626 	 * resources it has provisioned for us.
4627 	 */
4628 	ret = t4_get_pfres(adap);
4629 	if (ret) {
4630 		dev_err(adap->pdev_dev,
4631 			"Unable to retrieve resource provisioning information\n");
4632 		goto bye;
4633 	}
4634 
4635 	/* Grab VPD parameters.  This should be done after we establish a
4636 	 * connection to the firmware since some of the VPD parameters
4637 	 * (notably the Core Clock frequency) are retrieved via requests to
4638 	 * the firmware.  On the other hand, we need these fairly early on
4639 	 * so we do this right after getting ahold of the firmware.
4640 	 *
4641 	 * We need to do this after initializing the adapter because someone
4642 	 * could have FLASHed a new VPD which won't be read by the firmware
4643 	 * until we do the RESET ...
4644 	 */
4645 	if (!vpd_skip) {
4646 		ret = t4_get_vpd_params(adap, &adap->params.vpd);
4647 		if (ret < 0)
4648 			goto bye;
4649 	}
4650 
4651 	/* Find out what ports are available to us.  Note that we need to do
4652 	 * this before calling adap_init0_no_config() since it needs nports
4653 	 * and portvec ...
4654 	 */
4655 	v =
4656 	    FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4657 	    FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
4658 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
4659 	if (ret < 0)
4660 		goto bye;
4661 
4662 	adap->params.nports = hweight32(port_vec);
4663 	adap->params.portvec = port_vec;
4664 
4665 	/* Give the SGE code a chance to pull in anything that it needs ...
4666 	 * Note that this must be called after we retrieve our VPD parameters
4667 	 * in order to know how to convert core ticks to seconds, etc.
4668 	 */
4669 	ret = t4_sge_init(adap);
4670 	if (ret < 0)
4671 		goto bye;
4672 
4673 	/* Grab the SGE Doorbell Queue Timer values.  If successful, that
4674 	 * indicates that the Firmware and Hardware support this.
4675 	 */
4676 	params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4677 		    FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK));
4678 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4679 			      1, params, val);
4680 
4681 	if (!ret) {
4682 		adap->sge.dbqtimer_tick = val[0];
4683 		ret = t4_read_sge_dbqtimers(adap,
4684 					    ARRAY_SIZE(adap->sge.dbqtimer_val),
4685 					    adap->sge.dbqtimer_val);
4686 	}
4687 
4688 	if (!ret)
4689 		adap->flags |= CXGB4_SGE_DBQ_TIMER;
4690 
4691 	if (is_bypass_device(adap->pdev->device))
4692 		adap->params.bypass = 1;
4693 
4694 	/*
4695 	 * Grab some of our basic fundamental operating parameters.
4696 	 */
4697 	params[0] = FW_PARAM_PFVF(EQ_START);
4698 	params[1] = FW_PARAM_PFVF(L2T_START);
4699 	params[2] = FW_PARAM_PFVF(L2T_END);
4700 	params[3] = FW_PARAM_PFVF(FILTER_START);
4701 	params[4] = FW_PARAM_PFVF(FILTER_END);
4702 	params[5] = FW_PARAM_PFVF(IQFLINT_START);
4703 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
4704 	if (ret < 0)
4705 		goto bye;
4706 	adap->sge.egr_start = val[0];
4707 	adap->l2t_start = val[1];
4708 	adap->l2t_end = val[2];
4709 	adap->tids.ftid_base = val[3];
4710 	adap->tids.nftids = val[4] - val[3] + 1;
4711 	adap->sge.ingr_start = val[5];
4712 
4713 	if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
4714 		params[0] = FW_PARAM_PFVF(HPFILTER_START);
4715 		params[1] = FW_PARAM_PFVF(HPFILTER_END);
4716 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4717 				      params, val);
4718 		if (ret < 0)
4719 			goto bye;
4720 
4721 		adap->tids.hpftid_base = val[0];
4722 		adap->tids.nhpftids = val[1] - val[0] + 1;
4723 
4724 		/* Read the raw mps entries. In T6, the last 2 tcam entries
4725 		 * are reserved for raw mac addresses (rawf = 2, one per port).
4726 		 */
4727 		params[0] = FW_PARAM_PFVF(RAWF_START);
4728 		params[1] = FW_PARAM_PFVF(RAWF_END);
4729 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4730 				      params, val);
4731 		if (ret == 0) {
4732 			adap->rawf_start = val[0];
4733 			adap->rawf_cnt = val[1] - val[0] + 1;
4734 		}
4735 
4736 		adap->tids.tid_base =
4737 			t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
4738 	}
4739 
4740 	/* qids (ingress/egress) returned from firmware can be anywhere
4741 	 * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
4742 	 * Hence driver needs to allocate memory for this range to
4743 	 * store the queue info. Get the highest IQFLINT/EQ index returned
4744 	 * in FW_EQ_*_CMD.alloc command.
4745 	 */
4746 	params[0] = FW_PARAM_PFVF(EQ_END);
4747 	params[1] = FW_PARAM_PFVF(IQFLINT_END);
4748 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
4749 	if (ret < 0)
4750 		goto bye;
4751 	adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
4752 	adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
4753 
4754 	adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
4755 				    sizeof(*adap->sge.egr_map), GFP_KERNEL);
4756 	if (!adap->sge.egr_map) {
4757 		ret = -ENOMEM;
4758 		goto bye;
4759 	}
4760 
4761 	adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
4762 				     sizeof(*adap->sge.ingr_map), GFP_KERNEL);
4763 	if (!adap->sge.ingr_map) {
4764 		ret = -ENOMEM;
4765 		goto bye;
4766 	}
4767 
4768 	/* Allocate the memory for the vaious egress queue bitmaps
4769 	 * ie starving_fl, txq_maperr and blocked_fl.
4770 	 */
4771 	adap->sge.starving_fl =	kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
4772 					sizeof(long), GFP_KERNEL);
4773 	if (!adap->sge.starving_fl) {
4774 		ret = -ENOMEM;
4775 		goto bye;
4776 	}
4777 
4778 	adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
4779 				       sizeof(long), GFP_KERNEL);
4780 	if (!adap->sge.txq_maperr) {
4781 		ret = -ENOMEM;
4782 		goto bye;
4783 	}
4784 
4785 #ifdef CONFIG_DEBUG_FS
4786 	adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
4787 				       sizeof(long), GFP_KERNEL);
4788 	if (!adap->sge.blocked_fl) {
4789 		ret = -ENOMEM;
4790 		goto bye;
4791 	}
4792 #endif
4793 
4794 	params[0] = FW_PARAM_PFVF(CLIP_START);
4795 	params[1] = FW_PARAM_PFVF(CLIP_END);
4796 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
4797 	if (ret < 0)
4798 		goto bye;
4799 	adap->clipt_start = val[0];
4800 	adap->clipt_end = val[1];
4801 
4802 	/* Get the supported number of traffic classes */
4803 	params[0] = FW_PARAM_DEV(NUM_TM_CLASS);
4804 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
4805 	if (ret < 0) {
4806 		/* We couldn't retrieve the number of Traffic Classes
4807 		 * supported by the hardware/firmware. So we hard
4808 		 * code it here.
4809 		 */
4810 		adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
4811 	} else {
4812 		adap->params.nsched_cls = val[0];
4813 	}
4814 
4815 	/* query params related to active filter region */
4816 	params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
4817 	params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
4818 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
4819 	/* If Active filter size is set we enable establishing
4820 	 * offload connection through firmware work request
4821 	 */
4822 	if ((val[0] != val[1]) && (ret >= 0)) {
4823 		adap->flags |= CXGB4_FW_OFLD_CONN;
4824 		adap->tids.aftid_base = val[0];
4825 		adap->tids.aftid_end = val[1];
4826 	}
4827 
4828 	/* If we're running on newer firmware, let it know that we're
4829 	 * prepared to deal with encapsulated CPL messages.  Older
4830 	 * firmware won't understand this and we'll just get
4831 	 * unencapsulated messages ...
4832 	 */
4833 	params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
4834 	val[0] = 1;
4835 	(void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
4836 
4837 	/*
4838 	 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
4839 	 * capability.  Earlier versions of the firmware didn't have the
4840 	 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
4841 	 * permission to use ULPTX MEMWRITE DSGL.
4842 	 */
4843 	if (is_t4(adap->params.chip)) {
4844 		adap->params.ulptx_memwrite_dsgl = false;
4845 	} else {
4846 		params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
4847 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4848 				      1, params, val);
4849 		adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
4850 	}
4851 
4852 	/* See if FW supports FW_RI_FR_NSMR_TPTE_WR work request */
4853 	params[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
4854 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4855 			      1, params, val);
4856 	adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0);
4857 
4858 	/* See if FW supports FW_FILTER2 work request */
4859 	if (is_t4(adap->params.chip)) {
4860 		adap->params.filter2_wr_support = 0;
4861 	} else {
4862 		params[0] = FW_PARAM_DEV(FILTER2_WR);
4863 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4864 				      1, params, val);
4865 		adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
4866 	}
4867 
4868 	/* Check if FW supports returning vin and smt index.
4869 	 * If this is not supported, driver will interpret
4870 	 * these values from viid.
4871 	 */
4872 	params[0] = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN);
4873 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4874 			      1, params, val);
4875 	adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0);
4876 
4877 	/*
4878 	 * Get device capabilities so we can determine what resources we need
4879 	 * to manage.
4880 	 */
4881 	memset(&caps_cmd, 0, sizeof(caps_cmd));
4882 	caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4883 				     FW_CMD_REQUEST_F | FW_CMD_READ_F);
4884 	caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4885 	ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
4886 			 &caps_cmd);
4887 	if (ret < 0)
4888 		goto bye;
4889 
4890 	/* hash filter has some mandatory register settings to be tested and for
4891 	 * that it needs to test whether offload is enabled or not, hence
4892 	 * checking and setting it here.
4893 	 */
4894 	if (caps_cmd.ofldcaps)
4895 		adap->params.offload = 1;
4896 
4897 	if (caps_cmd.ofldcaps ||
4898 	    (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) ||
4899 	    (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD))) {
4900 		/* query offload-related parameters */
4901 		params[0] = FW_PARAM_DEV(NTID);
4902 		params[1] = FW_PARAM_PFVF(SERVER_START);
4903 		params[2] = FW_PARAM_PFVF(SERVER_END);
4904 		params[3] = FW_PARAM_PFVF(TDDP_START);
4905 		params[4] = FW_PARAM_PFVF(TDDP_END);
4906 		params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
4907 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
4908 				      params, val);
4909 		if (ret < 0)
4910 			goto bye;
4911 		adap->tids.ntids = val[0];
4912 		adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
4913 		adap->tids.stid_base = val[1];
4914 		adap->tids.nstids = val[2] - val[1] + 1;
4915 		/*
4916 		 * Setup server filter region. Divide the available filter
4917 		 * region into two parts. Regular filters get 1/3rd and server
4918 		 * filters get 2/3rd part. This is only enabled if workarond
4919 		 * path is enabled.
4920 		 * 1. For regular filters.
4921 		 * 2. Server filter: This are special filters which are used
4922 		 * to redirect SYN packets to offload queue.
4923 		 */
4924 		if (adap->flags & CXGB4_FW_OFLD_CONN && !is_bypass(adap)) {
4925 			adap->tids.sftid_base = adap->tids.ftid_base +
4926 					DIV_ROUND_UP(adap->tids.nftids, 3);
4927 			adap->tids.nsftids = adap->tids.nftids -
4928 					 DIV_ROUND_UP(adap->tids.nftids, 3);
4929 			adap->tids.nftids = adap->tids.sftid_base -
4930 						adap->tids.ftid_base;
4931 		}
4932 		adap->vres.ddp.start = val[3];
4933 		adap->vres.ddp.size = val[4] - val[3] + 1;
4934 		adap->params.ofldq_wr_cred = val[5];
4935 
4936 		if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
4937 			init_hash_filter(adap);
4938 		} else {
4939 			adap->num_ofld_uld += 1;
4940 		}
4941 
4942 		if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD)) {
4943 			params[0] = FW_PARAM_PFVF(ETHOFLD_START);
4944 			params[1] = FW_PARAM_PFVF(ETHOFLD_END);
4945 			ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4946 					      params, val);
4947 			if (!ret) {
4948 				adap->tids.eotid_base = val[0];
4949 				adap->tids.neotids = min_t(u32, MAX_ATIDS,
4950 							   val[1] - val[0] + 1);
4951 				adap->params.ethofld = 1;
4952 			}
4953 		}
4954 	}
4955 	if (caps_cmd.rdmacaps) {
4956 		params[0] = FW_PARAM_PFVF(STAG_START);
4957 		params[1] = FW_PARAM_PFVF(STAG_END);
4958 		params[2] = FW_PARAM_PFVF(RQ_START);
4959 		params[3] = FW_PARAM_PFVF(RQ_END);
4960 		params[4] = FW_PARAM_PFVF(PBL_START);
4961 		params[5] = FW_PARAM_PFVF(PBL_END);
4962 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
4963 				      params, val);
4964 		if (ret < 0)
4965 			goto bye;
4966 		adap->vres.stag.start = val[0];
4967 		adap->vres.stag.size = val[1] - val[0] + 1;
4968 		adap->vres.rq.start = val[2];
4969 		adap->vres.rq.size = val[3] - val[2] + 1;
4970 		adap->vres.pbl.start = val[4];
4971 		adap->vres.pbl.size = val[5] - val[4] + 1;
4972 
4973 		params[0] = FW_PARAM_PFVF(SRQ_START);
4974 		params[1] = FW_PARAM_PFVF(SRQ_END);
4975 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4976 				      params, val);
4977 		if (!ret) {
4978 			adap->vres.srq.start = val[0];
4979 			adap->vres.srq.size = val[1] - val[0] + 1;
4980 		}
4981 		if (adap->vres.srq.size) {
4982 			adap->srq = t4_init_srq(adap->vres.srq.size);
4983 			if (!adap->srq)
4984 				dev_warn(&adap->pdev->dev, "could not allocate SRQ, continuing\n");
4985 		}
4986 
4987 		params[0] = FW_PARAM_PFVF(SQRQ_START);
4988 		params[1] = FW_PARAM_PFVF(SQRQ_END);
4989 		params[2] = FW_PARAM_PFVF(CQ_START);
4990 		params[3] = FW_PARAM_PFVF(CQ_END);
4991 		params[4] = FW_PARAM_PFVF(OCQ_START);
4992 		params[5] = FW_PARAM_PFVF(OCQ_END);
4993 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
4994 				      val);
4995 		if (ret < 0)
4996 			goto bye;
4997 		adap->vres.qp.start = val[0];
4998 		adap->vres.qp.size = val[1] - val[0] + 1;
4999 		adap->vres.cq.start = val[2];
5000 		adap->vres.cq.size = val[3] - val[2] + 1;
5001 		adap->vres.ocq.start = val[4];
5002 		adap->vres.ocq.size = val[5] - val[4] + 1;
5003 
5004 		params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
5005 		params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
5006 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
5007 				      val);
5008 		if (ret < 0) {
5009 			adap->params.max_ordird_qp = 8;
5010 			adap->params.max_ird_adapter = 32 * adap->tids.ntids;
5011 			ret = 0;
5012 		} else {
5013 			adap->params.max_ordird_qp = val[0];
5014 			adap->params.max_ird_adapter = val[1];
5015 		}
5016 		dev_info(adap->pdev_dev,
5017 			 "max_ordird_qp %d max_ird_adapter %d\n",
5018 			 adap->params.max_ordird_qp,
5019 			 adap->params.max_ird_adapter);
5020 
5021 		/* Enable write_with_immediate if FW supports it */
5022 		params[0] = FW_PARAM_DEV(RDMA_WRITE_WITH_IMM);
5023 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
5024 				      val);
5025 		adap->params.write_w_imm_support = (ret == 0 && val[0] != 0);
5026 
5027 		/* Enable write_cmpl if FW supports it */
5028 		params[0] = FW_PARAM_DEV(RI_WRITE_CMPL_WR);
5029 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
5030 				      val);
5031 		adap->params.write_cmpl_support = (ret == 0 && val[0] != 0);
5032 		adap->num_ofld_uld += 2;
5033 	}
5034 	if (caps_cmd.iscsicaps) {
5035 		params[0] = FW_PARAM_PFVF(ISCSI_START);
5036 		params[1] = FW_PARAM_PFVF(ISCSI_END);
5037 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5038 				      params, val);
5039 		if (ret < 0)
5040 			goto bye;
5041 		adap->vres.iscsi.start = val[0];
5042 		adap->vres.iscsi.size = val[1] - val[0] + 1;
5043 		if (is_t6(adap->params.chip)) {
5044 			params[0] = FW_PARAM_PFVF(PPOD_EDRAM_START);
5045 			params[1] = FW_PARAM_PFVF(PPOD_EDRAM_END);
5046 			ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5047 					      params, val);
5048 			if (!ret) {
5049 				adap->vres.ppod_edram.start = val[0];
5050 				adap->vres.ppod_edram.size =
5051 					val[1] - val[0] + 1;
5052 
5053 				dev_info(adap->pdev_dev,
5054 					 "ppod edram start 0x%x end 0x%x size 0x%x\n",
5055 					 val[0], val[1],
5056 					 adap->vres.ppod_edram.size);
5057 			}
5058 		}
5059 		/* LIO target and cxgb4i initiaitor */
5060 		adap->num_ofld_uld += 2;
5061 	}
5062 	if (caps_cmd.cryptocaps) {
5063 		if (ntohs(caps_cmd.cryptocaps) &
5064 		    FW_CAPS_CONFIG_CRYPTO_LOOKASIDE) {
5065 			params[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE);
5066 			ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5067 					      2, params, val);
5068 			if (ret < 0) {
5069 				if (ret != -EINVAL)
5070 					goto bye;
5071 			} else {
5072 				adap->vres.ncrypto_fc = val[0];
5073 			}
5074 			adap->num_ofld_uld += 1;
5075 		}
5076 		if (ntohs(caps_cmd.cryptocaps) &
5077 		    FW_CAPS_CONFIG_TLS_INLINE) {
5078 			params[0] = FW_PARAM_PFVF(TLS_START);
5079 			params[1] = FW_PARAM_PFVF(TLS_END);
5080 			ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5081 					      2, params, val);
5082 			if (ret < 0)
5083 				goto bye;
5084 			adap->vres.key.start = val[0];
5085 			adap->vres.key.size = val[1] - val[0] + 1;
5086 			adap->num_uld += 1;
5087 		}
5088 		adap->params.crypto = ntohs(caps_cmd.cryptocaps);
5089 	}
5090 
5091 	/* The MTU/MSS Table is initialized by now, so load their values.  If
5092 	 * we're initializing the adapter, then we'll make any modifications
5093 	 * we want to the MTU/MSS Table and also initialize the congestion
5094 	 * parameters.
5095 	 */
5096 	t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5097 	if (state != DEV_STATE_INIT) {
5098 		int i;
5099 
5100 		/* The default MTU Table contains values 1492 and 1500.
5101 		 * However, for TCP, it's better to have two values which are
5102 		 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
5103 		 * This allows us to have a TCP Data Payload which is a
5104 		 * multiple of 8 regardless of what combination of TCP Options
5105 		 * are in use (always a multiple of 4 bytes) which is
5106 		 * important for performance reasons.  For instance, if no
5107 		 * options are in use, then we have a 20-byte IP header and a
5108 		 * 20-byte TCP header.  In this case, a 1500-byte MSS would
5109 		 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
5110 		 * which is not a multiple of 8.  So using an MSS of 1488 in
5111 		 * this case results in a TCP Data Payload of 1448 bytes which
5112 		 * is a multiple of 8.  On the other hand, if 12-byte TCP Time
5113 		 * Stamps have been negotiated, then an MTU of 1500 bytes
5114 		 * results in a TCP Data Payload of 1448 bytes which, as
5115 		 * above, is a multiple of 8 bytes ...
5116 		 */
5117 		for (i = 0; i < NMTUS; i++)
5118 			if (adap->params.mtus[i] == 1492) {
5119 				adap->params.mtus[i] = 1488;
5120 				break;
5121 			}
5122 
5123 		t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5124 			     adap->params.b_wnd);
5125 	}
5126 	t4_init_sge_params(adap);
5127 	adap->flags |= CXGB4_FW_OK;
5128 	t4_init_tp_params(adap, true);
5129 	return 0;
5130 
5131 	/*
5132 	 * Something bad happened.  If a command timed out or failed with EIO
5133 	 * FW does not operate within its spec or something catastrophic
5134 	 * happened to HW/FW, stop issuing commands.
5135 	 */
5136 bye:
5137 	adap_free_hma_mem(adap);
5138 	kfree(adap->sge.egr_map);
5139 	kfree(adap->sge.ingr_map);
5140 	kfree(adap->sge.starving_fl);
5141 	kfree(adap->sge.txq_maperr);
5142 #ifdef CONFIG_DEBUG_FS
5143 	kfree(adap->sge.blocked_fl);
5144 #endif
5145 	if (ret != -ETIMEDOUT && ret != -EIO)
5146 		t4_fw_bye(adap, adap->mbox);
5147 	return ret;
5148 }
5149 
5150 /* EEH callbacks */
5151 
5152 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5153 					 pci_channel_state_t state)
5154 {
5155 	int i;
5156 	struct adapter *adap = pci_get_drvdata(pdev);
5157 
5158 	if (!adap)
5159 		goto out;
5160 
5161 	rtnl_lock();
5162 	adap->flags &= ~CXGB4_FW_OK;
5163 	notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5164 	spin_lock(&adap->stats_lock);
5165 	for_each_port(adap, i) {
5166 		struct net_device *dev = adap->port[i];
5167 		if (dev) {
5168 			netif_device_detach(dev);
5169 			netif_carrier_off(dev);
5170 		}
5171 	}
5172 	spin_unlock(&adap->stats_lock);
5173 	disable_interrupts(adap);
5174 	if (adap->flags & CXGB4_FULL_INIT_DONE)
5175 		cxgb_down(adap);
5176 	rtnl_unlock();
5177 	if ((adap->flags & CXGB4_DEV_ENABLED)) {
5178 		pci_disable_device(pdev);
5179 		adap->flags &= ~CXGB4_DEV_ENABLED;
5180 	}
5181 out:	return state == pci_channel_io_perm_failure ?
5182 		PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5183 }
5184 
5185 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5186 {
5187 	int i, ret;
5188 	struct fw_caps_config_cmd c;
5189 	struct adapter *adap = pci_get_drvdata(pdev);
5190 
5191 	if (!adap) {
5192 		pci_restore_state(pdev);
5193 		pci_save_state(pdev);
5194 		return PCI_ERS_RESULT_RECOVERED;
5195 	}
5196 
5197 	if (!(adap->flags & CXGB4_DEV_ENABLED)) {
5198 		if (pci_enable_device(pdev)) {
5199 			dev_err(&pdev->dev, "Cannot reenable PCI "
5200 					    "device after reset\n");
5201 			return PCI_ERS_RESULT_DISCONNECT;
5202 		}
5203 		adap->flags |= CXGB4_DEV_ENABLED;
5204 	}
5205 
5206 	pci_set_master(pdev);
5207 	pci_restore_state(pdev);
5208 	pci_save_state(pdev);
5209 
5210 	if (t4_wait_dev_ready(adap->regs) < 0)
5211 		return PCI_ERS_RESULT_DISCONNECT;
5212 	if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
5213 		return PCI_ERS_RESULT_DISCONNECT;
5214 	adap->flags |= CXGB4_FW_OK;
5215 	if (adap_init1(adap, &c))
5216 		return PCI_ERS_RESULT_DISCONNECT;
5217 
5218 	for_each_port(adap, i) {
5219 		struct port_info *pi = adap2pinfo(adap, i);
5220 		u8 vivld = 0, vin = 0;
5221 
5222 		ret = t4_alloc_vi(adap, adap->mbox, pi->tx_chan, adap->pf, 0, 1,
5223 				  NULL, NULL, &vivld, &vin);
5224 		if (ret < 0)
5225 			return PCI_ERS_RESULT_DISCONNECT;
5226 		pi->viid = ret;
5227 		pi->xact_addr_filt = -1;
5228 		/* If fw supports returning the VIN as part of FW_VI_CMD,
5229 		 * save the returned values.
5230 		 */
5231 		if (adap->params.viid_smt_extn_support) {
5232 			pi->vivld = vivld;
5233 			pi->vin = vin;
5234 		} else {
5235 			/* Retrieve the values from VIID */
5236 			pi->vivld = FW_VIID_VIVLD_G(pi->viid);
5237 			pi->vin = FW_VIID_VIN_G(pi->viid);
5238 		}
5239 	}
5240 
5241 	t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5242 		     adap->params.b_wnd);
5243 	setup_memwin(adap);
5244 	if (cxgb_up(adap))
5245 		return PCI_ERS_RESULT_DISCONNECT;
5246 	return PCI_ERS_RESULT_RECOVERED;
5247 }
5248 
5249 static void eeh_resume(struct pci_dev *pdev)
5250 {
5251 	int i;
5252 	struct adapter *adap = pci_get_drvdata(pdev);
5253 
5254 	if (!adap)
5255 		return;
5256 
5257 	rtnl_lock();
5258 	for_each_port(adap, i) {
5259 		struct net_device *dev = adap->port[i];
5260 		if (dev) {
5261 			if (netif_running(dev)) {
5262 				link_start(dev);
5263 				cxgb_set_rxmode(dev);
5264 			}
5265 			netif_device_attach(dev);
5266 		}
5267 	}
5268 	rtnl_unlock();
5269 }
5270 
5271 static void eeh_reset_prepare(struct pci_dev *pdev)
5272 {
5273 	struct adapter *adapter = pci_get_drvdata(pdev);
5274 	int i;
5275 
5276 	if (adapter->pf != 4)
5277 		return;
5278 
5279 	adapter->flags &= ~CXGB4_FW_OK;
5280 
5281 	notify_ulds(adapter, CXGB4_STATE_DOWN);
5282 
5283 	for_each_port(adapter, i)
5284 		if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5285 			cxgb_close(adapter->port[i]);
5286 
5287 	disable_interrupts(adapter);
5288 	cxgb4_free_mps_ref_entries(adapter);
5289 
5290 	adap_free_hma_mem(adapter);
5291 
5292 	if (adapter->flags & CXGB4_FULL_INIT_DONE)
5293 		cxgb_down(adapter);
5294 }
5295 
5296 static void eeh_reset_done(struct pci_dev *pdev)
5297 {
5298 	struct adapter *adapter = pci_get_drvdata(pdev);
5299 	int err, i;
5300 
5301 	if (adapter->pf != 4)
5302 		return;
5303 
5304 	err = t4_wait_dev_ready(adapter->regs);
5305 	if (err < 0) {
5306 		dev_err(adapter->pdev_dev,
5307 			"Device not ready, err %d", err);
5308 		return;
5309 	}
5310 
5311 	setup_memwin(adapter);
5312 
5313 	err = adap_init0(adapter, 1);
5314 	if (err) {
5315 		dev_err(adapter->pdev_dev,
5316 			"Adapter init failed, err %d", err);
5317 		return;
5318 	}
5319 
5320 	setup_memwin_rdma(adapter);
5321 
5322 	if (adapter->flags & CXGB4_FW_OK) {
5323 		err = t4_port_init(adapter, adapter->pf, adapter->pf, 0);
5324 		if (err) {
5325 			dev_err(adapter->pdev_dev,
5326 				"Port init failed, err %d", err);
5327 			return;
5328 		}
5329 	}
5330 
5331 	err = cfg_queues(adapter);
5332 	if (err) {
5333 		dev_err(adapter->pdev_dev,
5334 			"Config queues failed, err %d", err);
5335 		return;
5336 	}
5337 
5338 	cxgb4_init_mps_ref_entries(adapter);
5339 
5340 	err = setup_fw_sge_queues(adapter);
5341 	if (err) {
5342 		dev_err(adapter->pdev_dev,
5343 			"FW sge queue allocation failed, err %d", err);
5344 		return;
5345 	}
5346 
5347 	for_each_port(adapter, i)
5348 		if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5349 			cxgb_open(adapter->port[i]);
5350 }
5351 
5352 static const struct pci_error_handlers cxgb4_eeh = {
5353 	.error_detected = eeh_err_detected,
5354 	.slot_reset     = eeh_slot_reset,
5355 	.resume         = eeh_resume,
5356 	.reset_prepare  = eeh_reset_prepare,
5357 	.reset_done     = eeh_reset_done,
5358 };
5359 
5360 /* Return true if the Link Configuration supports "High Speeds" (those greater
5361  * than 1Gb/s).
5362  */
5363 static inline bool is_x_10g_port(const struct link_config *lc)
5364 {
5365 	unsigned int speeds, high_speeds;
5366 
5367 	speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps));
5368 	high_speeds = speeds &
5369 			~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G);
5370 
5371 	return high_speeds != 0;
5372 }
5373 
5374 /* Perform default configuration of DMA queues depending on the number and type
5375  * of ports we found and the number of available CPUs.  Most settings can be
5376  * modified by the admin prior to actual use.
5377  */
5378 static int cfg_queues(struct adapter *adap)
5379 {
5380 	u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
5381 	u32 ncpus = num_online_cpus();
5382 	u32 niqflint, neq, num_ulds;
5383 	struct sge *s = &adap->sge;
5384 	u32 i, n10g = 0, qidx = 0;
5385 	u32 q10g = 0, q1g;
5386 
5387 	/* Reduce memory usage in kdump environment, disable all offload. */
5388 	if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
5389 		adap->params.offload = 0;
5390 		adap->params.crypto = 0;
5391 		adap->params.ethofld = 0;
5392 	}
5393 
5394 	/* Calculate the number of Ethernet Queue Sets available based on
5395 	 * resources provisioned for us.  We always have an Asynchronous
5396 	 * Firmware Event Ingress Queue.  If we're operating in MSI or Legacy
5397 	 * IRQ Pin Interrupt mode, then we'll also have a Forwarded Interrupt
5398 	 * Ingress Queue.  Meanwhile, we need two Egress Queues for each
5399 	 * Queue Set: one for the Free List and one for the Ethernet TX Queue.
5400 	 *
5401 	 * Note that we should also take into account all of the various
5402 	 * Offload Queues.  But, in any situation where we're operating in
5403 	 * a Resource Constrained Provisioning environment, doing any Offload
5404 	 * at all is problematic ...
5405 	 */
5406 	niqflint = adap->params.pfres.niqflint - 1;
5407 	if (!(adap->flags & CXGB4_USING_MSIX))
5408 		niqflint--;
5409 	neq = adap->params.pfres.neq / 2;
5410 	avail_qsets = min(niqflint, neq);
5411 
5412 	if (avail_qsets < adap->params.nports) {
5413 		dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n",
5414 			avail_qsets, adap->params.nports);
5415 		return -ENOMEM;
5416 	}
5417 
5418 	/* Count the number of 10Gb/s or better ports */
5419 	for_each_port(adap, i)
5420 		n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
5421 
5422 	avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS);
5423 
5424 	/* We default to 1 queue per non-10G port and up to # of cores queues
5425 	 * per 10G port.
5426 	 */
5427 	if (n10g)
5428 		q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
5429 
5430 #ifdef CONFIG_CHELSIO_T4_DCB
5431 	/* For Data Center Bridging support we need to be able to support up
5432 	 * to 8 Traffic Priorities; each of which will be assigned to its
5433 	 * own TX Queue in order to prevent Head-Of-Line Blocking.
5434 	 */
5435 	q1g = 8;
5436 	if (adap->params.nports * 8 > avail_eth_qsets) {
5437 		dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n",
5438 			avail_eth_qsets, adap->params.nports * 8);
5439 		return -ENOMEM;
5440 	}
5441 
5442 	if (adap->params.nports * ncpus < avail_eth_qsets)
5443 		q10g = max(8U, ncpus);
5444 	else
5445 		q10g = max(8U, q10g);
5446 
5447 	while ((q10g * n10g) >
5448 	       (avail_eth_qsets - (adap->params.nports - n10g) * q1g))
5449 		q10g--;
5450 
5451 #else /* !CONFIG_CHELSIO_T4_DCB */
5452 	q1g = 1;
5453 	q10g = min(q10g, ncpus);
5454 #endif /* !CONFIG_CHELSIO_T4_DCB */
5455 	if (is_kdump_kernel()) {
5456 		q10g = 1;
5457 		q1g = 1;
5458 	}
5459 
5460 	for_each_port(adap, i) {
5461 		struct port_info *pi = adap2pinfo(adap, i);
5462 
5463 		pi->first_qset = qidx;
5464 		pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : q1g;
5465 		qidx += pi->nqsets;
5466 	}
5467 
5468 	s->ethqsets = qidx;
5469 	s->max_ethqsets = qidx;   /* MSI-X may lower it later */
5470 	avail_qsets -= qidx;
5471 
5472 	if (is_uld(adap)) {
5473 		/* For offload we use 1 queue/channel if all ports are up to 1G,
5474 		 * otherwise we divide all available queues amongst the channels
5475 		 * capped by the number of available cores.
5476 		 */
5477 		num_ulds = adap->num_uld + adap->num_ofld_uld;
5478 		i = min_t(u32, MAX_OFLD_QSETS, ncpus);
5479 		avail_uld_qsets = roundup(i, adap->params.nports);
5480 		if (avail_qsets < num_ulds * adap->params.nports) {
5481 			adap->params.offload = 0;
5482 			adap->params.crypto = 0;
5483 			s->ofldqsets = 0;
5484 		} else if (avail_qsets < num_ulds * avail_uld_qsets || !n10g) {
5485 			s->ofldqsets = adap->params.nports;
5486 		} else {
5487 			s->ofldqsets = avail_uld_qsets;
5488 		}
5489 
5490 		avail_qsets -= num_ulds * s->ofldqsets;
5491 	}
5492 
5493 	/* ETHOFLD Queues used for QoS offload should follow same
5494 	 * allocation scheme as normal Ethernet Queues.
5495 	 */
5496 	if (is_ethofld(adap)) {
5497 		if (avail_qsets < s->max_ethqsets) {
5498 			adap->params.ethofld = 0;
5499 			s->eoqsets = 0;
5500 		} else {
5501 			s->eoqsets = s->max_ethqsets;
5502 		}
5503 		avail_qsets -= s->eoqsets;
5504 	}
5505 
5506 	for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5507 		struct sge_eth_rxq *r = &s->ethrxq[i];
5508 
5509 		init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
5510 		r->fl.size = 72;
5511 	}
5512 
5513 	for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5514 		s->ethtxq[i].q.size = 1024;
5515 
5516 	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5517 		s->ctrlq[i].q.size = 512;
5518 
5519 	if (!is_t4(adap->params.chip))
5520 		s->ptptxq.q.size = 8;
5521 
5522 	init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
5523 	init_rspq(adap, &s->intrq, 0, 1, 512, 64);
5524 
5525 	return 0;
5526 }
5527 
5528 /*
5529  * Reduce the number of Ethernet queues across all ports to at most n.
5530  * n provides at least one queue per port.
5531  */
5532 static void reduce_ethqs(struct adapter *adap, int n)
5533 {
5534 	int i;
5535 	struct port_info *pi;
5536 
5537 	while (n < adap->sge.ethqsets)
5538 		for_each_port(adap, i) {
5539 			pi = adap2pinfo(adap, i);
5540 			if (pi->nqsets > 1) {
5541 				pi->nqsets--;
5542 				adap->sge.ethqsets--;
5543 				if (adap->sge.ethqsets <= n)
5544 					break;
5545 			}
5546 		}
5547 
5548 	n = 0;
5549 	for_each_port(adap, i) {
5550 		pi = adap2pinfo(adap, i);
5551 		pi->first_qset = n;
5552 		n += pi->nqsets;
5553 	}
5554 }
5555 
5556 static int alloc_msix_info(struct adapter *adap, u32 num_vec)
5557 {
5558 	struct msix_info *msix_info;
5559 
5560 	msix_info = kcalloc(num_vec, sizeof(*msix_info), GFP_KERNEL);
5561 	if (!msix_info)
5562 		return -ENOMEM;
5563 
5564 	adap->msix_bmap.msix_bmap = kcalloc(BITS_TO_LONGS(num_vec),
5565 					    sizeof(long), GFP_KERNEL);
5566 	if (!adap->msix_bmap.msix_bmap) {
5567 		kfree(msix_info);
5568 		return -ENOMEM;
5569 	}
5570 
5571 	spin_lock_init(&adap->msix_bmap.lock);
5572 	adap->msix_bmap.mapsize = num_vec;
5573 
5574 	adap->msix_info = msix_info;
5575 	return 0;
5576 }
5577 
5578 static void free_msix_info(struct adapter *adap)
5579 {
5580 	kfree(adap->msix_bmap.msix_bmap);
5581 	kfree(adap->msix_info);
5582 }
5583 
5584 int cxgb4_get_msix_idx_from_bmap(struct adapter *adap)
5585 {
5586 	struct msix_bmap *bmap = &adap->msix_bmap;
5587 	unsigned int msix_idx;
5588 	unsigned long flags;
5589 
5590 	spin_lock_irqsave(&bmap->lock, flags);
5591 	msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
5592 	if (msix_idx < bmap->mapsize) {
5593 		__set_bit(msix_idx, bmap->msix_bmap);
5594 	} else {
5595 		spin_unlock_irqrestore(&bmap->lock, flags);
5596 		return -ENOSPC;
5597 	}
5598 
5599 	spin_unlock_irqrestore(&bmap->lock, flags);
5600 	return msix_idx;
5601 }
5602 
5603 void cxgb4_free_msix_idx_in_bmap(struct adapter *adap,
5604 				 unsigned int msix_idx)
5605 {
5606 	struct msix_bmap *bmap = &adap->msix_bmap;
5607 	unsigned long flags;
5608 
5609 	spin_lock_irqsave(&bmap->lock, flags);
5610 	__clear_bit(msix_idx, bmap->msix_bmap);
5611 	spin_unlock_irqrestore(&bmap->lock, flags);
5612 }
5613 
5614 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5615 #define EXTRA_VECS 2
5616 
5617 static int enable_msix(struct adapter *adap)
5618 {
5619 	u32 eth_need, uld_need = 0, ethofld_need = 0;
5620 	u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0;
5621 	u8 num_uld = 0, nchan = adap->params.nports;
5622 	u32 i, want, need, num_vec;
5623 	struct sge *s = &adap->sge;
5624 	struct msix_entry *entries;
5625 	struct port_info *pi;
5626 	int allocated, ret;
5627 
5628 	want = s->max_ethqsets;
5629 #ifdef CONFIG_CHELSIO_T4_DCB
5630 	/* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
5631 	 * each port.
5632 	 */
5633 	need = 8 * nchan;
5634 #else
5635 	need = nchan;
5636 #endif
5637 	eth_need = need;
5638 	if (is_uld(adap)) {
5639 		num_uld = adap->num_ofld_uld + adap->num_uld;
5640 		want += num_uld * s->ofldqsets;
5641 		uld_need = num_uld * nchan;
5642 		need += uld_need;
5643 	}
5644 
5645 	if (is_ethofld(adap)) {
5646 		want += s->eoqsets;
5647 		ethofld_need = eth_need;
5648 		need += ethofld_need;
5649 	}
5650 
5651 	want += EXTRA_VECS;
5652 	need += EXTRA_VECS;
5653 
5654 	entries = kmalloc_array(want, sizeof(*entries), GFP_KERNEL);
5655 	if (!entries)
5656 		return -ENOMEM;
5657 
5658 	for (i = 0; i < want; i++)
5659 		entries[i].entry = i;
5660 
5661 	allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
5662 	if (allocated < 0) {
5663 		/* Disable offload and attempt to get vectors for NIC
5664 		 * only mode.
5665 		 */
5666 		want = s->max_ethqsets + EXTRA_VECS;
5667 		need = eth_need + EXTRA_VECS;
5668 		allocated = pci_enable_msix_range(adap->pdev, entries,
5669 						  need, want);
5670 		if (allocated < 0) {
5671 			dev_info(adap->pdev_dev,
5672 				 "Disabling MSI-X due to insufficient MSI-X vectors\n");
5673 			ret = allocated;
5674 			goto out_free;
5675 		}
5676 
5677 		dev_info(adap->pdev_dev,
5678 			 "Disabling offload due to insufficient MSI-X vectors\n");
5679 		adap->params.offload = 0;
5680 		adap->params.crypto = 0;
5681 		adap->params.ethofld = 0;
5682 		s->ofldqsets = 0;
5683 		s->eoqsets = 0;
5684 		uld_need = 0;
5685 		ethofld_need = 0;
5686 	}
5687 
5688 	num_vec = allocated;
5689 	if (num_vec < want) {
5690 		/* Distribute available vectors to the various queue groups.
5691 		 * Every group gets its minimum requirement and NIC gets top
5692 		 * priority for leftovers.
5693 		 */
5694 		ethqsets = eth_need;
5695 		if (is_uld(adap))
5696 			ofldqsets = nchan;
5697 		if (is_ethofld(adap))
5698 			eoqsets = ethofld_need;
5699 
5700 		num_vec -= need;
5701 		while (num_vec) {
5702 			if (num_vec < eth_need + ethofld_need ||
5703 			    ethqsets > s->max_ethqsets)
5704 				break;
5705 
5706 			for_each_port(adap, i) {
5707 				pi = adap2pinfo(adap, i);
5708 				if (pi->nqsets < 2)
5709 					continue;
5710 
5711 				ethqsets++;
5712 				num_vec--;
5713 				if (ethofld_need) {
5714 					eoqsets++;
5715 					num_vec--;
5716 				}
5717 			}
5718 		}
5719 
5720 		if (is_uld(adap)) {
5721 			while (num_vec) {
5722 				if (num_vec < uld_need ||
5723 				    ofldqsets > s->ofldqsets)
5724 					break;
5725 
5726 				ofldqsets++;
5727 				num_vec -= uld_need;
5728 			}
5729 		}
5730 	} else {
5731 		ethqsets = s->max_ethqsets;
5732 		if (is_uld(adap))
5733 			ofldqsets = s->ofldqsets;
5734 		if (is_ethofld(adap))
5735 			eoqsets = s->eoqsets;
5736 	}
5737 
5738 	if (ethqsets < s->max_ethqsets) {
5739 		s->max_ethqsets = ethqsets;
5740 		reduce_ethqs(adap, ethqsets);
5741 	}
5742 
5743 	if (is_uld(adap)) {
5744 		s->ofldqsets = ofldqsets;
5745 		s->nqs_per_uld = s->ofldqsets;
5746 	}
5747 
5748 	if (is_ethofld(adap))
5749 		s->eoqsets = eoqsets;
5750 
5751 	/* map for msix */
5752 	ret = alloc_msix_info(adap, allocated);
5753 	if (ret)
5754 		goto out_disable_msix;
5755 
5756 	for (i = 0; i < allocated; i++) {
5757 		adap->msix_info[i].vec = entries[i].vector;
5758 		adap->msix_info[i].idx = i;
5759 	}
5760 
5761 	dev_info(adap->pdev_dev,
5762 		 "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d\n",
5763 		 allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld);
5764 
5765 	kfree(entries);
5766 	return 0;
5767 
5768 out_disable_msix:
5769 	pci_disable_msix(adap->pdev);
5770 
5771 out_free:
5772 	kfree(entries);
5773 	return ret;
5774 }
5775 
5776 #undef EXTRA_VECS
5777 
5778 static int init_rss(struct adapter *adap)
5779 {
5780 	unsigned int i;
5781 	int err;
5782 
5783 	err = t4_init_rss_mode(adap, adap->mbox);
5784 	if (err)
5785 		return err;
5786 
5787 	for_each_port(adap, i) {
5788 		struct port_info *pi = adap2pinfo(adap, i);
5789 
5790 		pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5791 		if (!pi->rss)
5792 			return -ENOMEM;
5793 	}
5794 	return 0;
5795 }
5796 
5797 /* Dump basic information about the adapter */
5798 static void print_adapter_info(struct adapter *adapter)
5799 {
5800 	/* Hardware/Firmware/etc. Version/Revision IDs */
5801 	t4_dump_version_info(adapter);
5802 
5803 	/* Software/Hardware configuration */
5804 	dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
5805 		 is_offload(adapter) ? "R" : "",
5806 		 ((adapter->flags & CXGB4_USING_MSIX) ? "MSI-X" :
5807 		  (adapter->flags & CXGB4_USING_MSI) ? "MSI" : ""),
5808 		 is_offload(adapter) ? "Offload" : "non-Offload");
5809 }
5810 
5811 static void print_port_info(const struct net_device *dev)
5812 {
5813 	char buf[80];
5814 	char *bufp = buf;
5815 	const struct port_info *pi = netdev_priv(dev);
5816 	const struct adapter *adap = pi->adapter;
5817 
5818 	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
5819 		bufp += sprintf(bufp, "100M/");
5820 	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
5821 		bufp += sprintf(bufp, "1G/");
5822 	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
5823 		bufp += sprintf(bufp, "10G/");
5824 	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
5825 		bufp += sprintf(bufp, "25G/");
5826 	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
5827 		bufp += sprintf(bufp, "40G/");
5828 	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
5829 		bufp += sprintf(bufp, "50G/");
5830 	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
5831 		bufp += sprintf(bufp, "100G/");
5832 	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_200G)
5833 		bufp += sprintf(bufp, "200G/");
5834 	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_400G)
5835 		bufp += sprintf(bufp, "400G/");
5836 	if (bufp != buf)
5837 		--bufp;
5838 	sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
5839 
5840 	netdev_info(dev, "%s: Chelsio %s (%s) %s\n",
5841 		    dev->name, adap->params.vpd.id, adap->name, buf);
5842 }
5843 
5844 /*
5845  * Free the following resources:
5846  * - memory used for tables
5847  * - MSI/MSI-X
5848  * - net devices
5849  * - resources FW is holding for us
5850  */
5851 static void free_some_resources(struct adapter *adapter)
5852 {
5853 	unsigned int i;
5854 
5855 	kvfree(adapter->smt);
5856 	kvfree(adapter->l2t);
5857 	kvfree(adapter->srq);
5858 	t4_cleanup_sched(adapter);
5859 	kvfree(adapter->tids.tid_tab);
5860 	cxgb4_cleanup_tc_matchall(adapter);
5861 	cxgb4_cleanup_tc_mqprio(adapter);
5862 	cxgb4_cleanup_tc_flower(adapter);
5863 	cxgb4_cleanup_tc_u32(adapter);
5864 	kfree(adapter->sge.egr_map);
5865 	kfree(adapter->sge.ingr_map);
5866 	kfree(adapter->sge.starving_fl);
5867 	kfree(adapter->sge.txq_maperr);
5868 #ifdef CONFIG_DEBUG_FS
5869 	kfree(adapter->sge.blocked_fl);
5870 #endif
5871 	disable_msi(adapter);
5872 
5873 	for_each_port(adapter, i)
5874 		if (adapter->port[i]) {
5875 			struct port_info *pi = adap2pinfo(adapter, i);
5876 
5877 			if (pi->viid != 0)
5878 				t4_free_vi(adapter, adapter->mbox, adapter->pf,
5879 					   0, pi->viid);
5880 			kfree(adap2pinfo(adapter, i)->rss);
5881 			free_netdev(adapter->port[i]);
5882 		}
5883 	if (adapter->flags & CXGB4_FW_OK)
5884 		t4_fw_bye(adapter, adapter->pf);
5885 }
5886 
5887 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | \
5888 		   NETIF_F_GSO_UDP_L4)
5889 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
5890 		   NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
5891 #define SEGMENT_SIZE 128
5892 
5893 static int t4_get_chip_type(struct adapter *adap, int ver)
5894 {
5895 	u32 pl_rev = REV_G(t4_read_reg(adap, PL_REV_A));
5896 
5897 	switch (ver) {
5898 	case CHELSIO_T4:
5899 		return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
5900 	case CHELSIO_T5:
5901 		return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
5902 	case CHELSIO_T6:
5903 		return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
5904 	default:
5905 		break;
5906 	}
5907 	return -EINVAL;
5908 }
5909 
5910 #ifdef CONFIG_PCI_IOV
5911 static void cxgb4_mgmt_setup(struct net_device *dev)
5912 {
5913 	dev->type = ARPHRD_NONE;
5914 	dev->mtu = 0;
5915 	dev->hard_header_len = 0;
5916 	dev->addr_len = 0;
5917 	dev->tx_queue_len = 0;
5918 	dev->flags |= IFF_NOARP;
5919 	dev->priv_flags |= IFF_NO_QUEUE;
5920 
5921 	/* Initialize the device structure. */
5922 	dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
5923 	dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
5924 }
5925 
5926 static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
5927 {
5928 	struct adapter *adap = pci_get_drvdata(pdev);
5929 	int err = 0;
5930 	int current_vfs = pci_num_vf(pdev);
5931 	u32 pcie_fw;
5932 
5933 	pcie_fw = readl(adap->regs + PCIE_FW_A);
5934 	/* Check if fw is initialized */
5935 	if (!(pcie_fw & PCIE_FW_INIT_F)) {
5936 		dev_warn(&pdev->dev, "Device not initialized\n");
5937 		return -EOPNOTSUPP;
5938 	}
5939 
5940 	/* If any of the VF's is already assigned to Guest OS, then
5941 	 * SRIOV for the same cannot be modified
5942 	 */
5943 	if (current_vfs && pci_vfs_assigned(pdev)) {
5944 		dev_err(&pdev->dev,
5945 			"Cannot modify SR-IOV while VFs are assigned\n");
5946 		return current_vfs;
5947 	}
5948 	/* Note that the upper-level code ensures that we're never called with
5949 	 * a non-zero "num_vfs" when we already have VFs instantiated.  But
5950 	 * it never hurts to code defensively.
5951 	 */
5952 	if (num_vfs != 0 && current_vfs != 0)
5953 		return -EBUSY;
5954 
5955 	/* Nothing to do for no change. */
5956 	if (num_vfs == current_vfs)
5957 		return num_vfs;
5958 
5959 	/* Disable SRIOV when zero is passed. */
5960 	if (!num_vfs) {
5961 		pci_disable_sriov(pdev);
5962 		/* free VF Management Interface */
5963 		unregister_netdev(adap->port[0]);
5964 		free_netdev(adap->port[0]);
5965 		adap->port[0] = NULL;
5966 
5967 		/* free VF resources */
5968 		adap->num_vfs = 0;
5969 		kfree(adap->vfinfo);
5970 		adap->vfinfo = NULL;
5971 		return 0;
5972 	}
5973 
5974 	if (!current_vfs) {
5975 		struct fw_pfvf_cmd port_cmd, port_rpl;
5976 		struct net_device *netdev;
5977 		unsigned int pmask, port;
5978 		struct pci_dev *pbridge;
5979 		struct port_info *pi;
5980 		char name[IFNAMSIZ];
5981 		u32 devcap2;
5982 		u16 flags;
5983 
5984 		/* If we want to instantiate Virtual Functions, then our
5985 		 * parent bridge's PCI-E needs to support Alternative Routing
5986 		 * ID (ARI) because our VFs will show up at function offset 8
5987 		 * and above.
5988 		 */
5989 		pbridge = pdev->bus->self;
5990 		pcie_capability_read_word(pbridge, PCI_EXP_FLAGS, &flags);
5991 		pcie_capability_read_dword(pbridge, PCI_EXP_DEVCAP2, &devcap2);
5992 
5993 		if ((flags & PCI_EXP_FLAGS_VERS) < 2 ||
5994 		    !(devcap2 & PCI_EXP_DEVCAP2_ARI)) {
5995 			/* Our parent bridge does not support ARI so issue a
5996 			 * warning and skip instantiating the VFs.  They
5997 			 * won't be reachable.
5998 			 */
5999 			dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Functions\n",
6000 				 pbridge->bus->number, PCI_SLOT(pbridge->devfn),
6001 				 PCI_FUNC(pbridge->devfn));
6002 			return -ENOTSUPP;
6003 		}
6004 		memset(&port_cmd, 0, sizeof(port_cmd));
6005 		port_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
6006 						 FW_CMD_REQUEST_F |
6007 						 FW_CMD_READ_F |
6008 						 FW_PFVF_CMD_PFN_V(adap->pf) |
6009 						 FW_PFVF_CMD_VFN_V(0));
6010 		port_cmd.retval_len16 = cpu_to_be32(FW_LEN16(port_cmd));
6011 		err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd),
6012 				 &port_rpl);
6013 		if (err)
6014 			return err;
6015 		pmask = FW_PFVF_CMD_PMASK_G(be32_to_cpu(port_rpl.type_to_neq));
6016 		port = ffs(pmask) - 1;
6017 		/* Allocate VF Management Interface. */
6018 		snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx,
6019 			 adap->pf);
6020 		netdev = alloc_netdev(sizeof(struct port_info),
6021 				      name, NET_NAME_UNKNOWN, cxgb4_mgmt_setup);
6022 		if (!netdev)
6023 			return -ENOMEM;
6024 
6025 		pi = netdev_priv(netdev);
6026 		pi->adapter = adap;
6027 		pi->lport = port;
6028 		pi->tx_chan = port;
6029 		SET_NETDEV_DEV(netdev, &pdev->dev);
6030 
6031 		adap->port[0] = netdev;
6032 		pi->port_id = 0;
6033 
6034 		err = register_netdev(adap->port[0]);
6035 		if (err) {
6036 			pr_info("Unable to register VF mgmt netdev %s\n", name);
6037 			free_netdev(adap->port[0]);
6038 			adap->port[0] = NULL;
6039 			return err;
6040 		}
6041 		/* Allocate and set up VF Information. */
6042 		adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev),
6043 				       sizeof(struct vf_info), GFP_KERNEL);
6044 		if (!adap->vfinfo) {
6045 			unregister_netdev(adap->port[0]);
6046 			free_netdev(adap->port[0]);
6047 			adap->port[0] = NULL;
6048 			return -ENOMEM;
6049 		}
6050 		cxgb4_mgmt_fill_vf_station_mac_addr(adap);
6051 	}
6052 	/* Instantiate the requested number of VFs. */
6053 	err = pci_enable_sriov(pdev, num_vfs);
6054 	if (err) {
6055 		pr_info("Unable to instantiate %d VFs\n", num_vfs);
6056 		if (!current_vfs) {
6057 			unregister_netdev(adap->port[0]);
6058 			free_netdev(adap->port[0]);
6059 			adap->port[0] = NULL;
6060 			kfree(adap->vfinfo);
6061 			adap->vfinfo = NULL;
6062 		}
6063 		return err;
6064 	}
6065 
6066 	adap->num_vfs = num_vfs;
6067 	return num_vfs;
6068 }
6069 #endif /* CONFIG_PCI_IOV */
6070 
6071 #if defined(CONFIG_CHELSIO_TLS_DEVICE)
6072 
6073 static int cxgb4_ktls_dev_add(struct net_device *netdev, struct sock *sk,
6074 			      enum tls_offload_ctx_dir direction,
6075 			      struct tls_crypto_info *crypto_info,
6076 			      u32 tcp_sn)
6077 {
6078 	struct adapter *adap = netdev2adap(netdev);
6079 	int ret = 0;
6080 
6081 	mutex_lock(&uld_mutex);
6082 	if (!adap->uld[CXGB4_ULD_CRYPTO].handle) {
6083 		dev_err(adap->pdev_dev, "chcr driver is not loaded\n");
6084 		ret = -EOPNOTSUPP;
6085 		goto out_unlock;
6086 	}
6087 
6088 	if (!adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops) {
6089 		dev_err(adap->pdev_dev,
6090 			"chcr driver has no registered tlsdev_ops()\n");
6091 		ret = -EOPNOTSUPP;
6092 		goto out_unlock;
6093 	}
6094 
6095 	ret = cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_ENABLE);
6096 	if (ret)
6097 		goto out_unlock;
6098 
6099 	ret = adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops->tls_dev_add(netdev, sk,
6100 								  direction,
6101 								  crypto_info,
6102 								  tcp_sn);
6103 	/* if there is a failure, clear the refcount */
6104 	if (ret)
6105 		cxgb4_set_ktls_feature(adap,
6106 				       FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
6107 out_unlock:
6108 	mutex_unlock(&uld_mutex);
6109 	return ret;
6110 }
6111 
6112 static void cxgb4_ktls_dev_del(struct net_device *netdev,
6113 			       struct tls_context *tls_ctx,
6114 			       enum tls_offload_ctx_dir direction)
6115 {
6116 	struct adapter *adap = netdev2adap(netdev);
6117 
6118 	mutex_lock(&uld_mutex);
6119 	if (!adap->uld[CXGB4_ULD_CRYPTO].handle) {
6120 		dev_err(adap->pdev_dev, "chcr driver is not loaded\n");
6121 		goto out_unlock;
6122 	}
6123 
6124 	if (!adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops) {
6125 		dev_err(adap->pdev_dev,
6126 			"chcr driver has no registered tlsdev_ops\n");
6127 		goto out_unlock;
6128 	}
6129 
6130 	adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops->tls_dev_del(netdev, tls_ctx,
6131 							    direction);
6132 	cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
6133 
6134 out_unlock:
6135 	mutex_unlock(&uld_mutex);
6136 }
6137 
6138 static const struct tlsdev_ops cxgb4_ktls_ops = {
6139 	.tls_dev_add = cxgb4_ktls_dev_add,
6140 	.tls_dev_del = cxgb4_ktls_dev_del,
6141 };
6142 #endif /* CONFIG_CHELSIO_TLS_DEVICE */
6143 
6144 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6145 {
6146 	struct net_device *netdev;
6147 	struct adapter *adapter;
6148 	static int adap_idx = 1;
6149 	int s_qpp, qpp, num_seg;
6150 	struct port_info *pi;
6151 	bool highdma = false;
6152 	enum chip_type chip;
6153 	void __iomem *regs;
6154 	int func, chip_ver;
6155 	u16 device_id;
6156 	int i, err;
6157 	u32 whoami;
6158 
6159 	err = pci_request_regions(pdev, KBUILD_MODNAME);
6160 	if (err) {
6161 		/* Just info, some other driver may have claimed the device. */
6162 		dev_info(&pdev->dev, "cannot obtain PCI resources\n");
6163 		return err;
6164 	}
6165 
6166 	err = pci_enable_device(pdev);
6167 	if (err) {
6168 		dev_err(&pdev->dev, "cannot enable PCI device\n");
6169 		goto out_release_regions;
6170 	}
6171 
6172 	regs = pci_ioremap_bar(pdev, 0);
6173 	if (!regs) {
6174 		dev_err(&pdev->dev, "cannot map device registers\n");
6175 		err = -ENOMEM;
6176 		goto out_disable_device;
6177 	}
6178 
6179 	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
6180 	if (!adapter) {
6181 		err = -ENOMEM;
6182 		goto out_unmap_bar0;
6183 	}
6184 
6185 	adapter->regs = regs;
6186 	err = t4_wait_dev_ready(regs);
6187 	if (err < 0)
6188 		goto out_free_adapter;
6189 
6190 	/* We control everything through one PF */
6191 	whoami = t4_read_reg(adapter, PL_WHOAMI_A);
6192 	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
6193 	chip = t4_get_chip_type(adapter, CHELSIO_PCI_ID_VER(device_id));
6194 	if ((int)chip < 0) {
6195 		dev_err(&pdev->dev, "Device %d is not supported\n", device_id);
6196 		err = chip;
6197 		goto out_free_adapter;
6198 	}
6199 	chip_ver = CHELSIO_CHIP_VERSION(chip);
6200 	func = chip_ver <= CHELSIO_T5 ?
6201 	       SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
6202 
6203 	adapter->pdev = pdev;
6204 	adapter->pdev_dev = &pdev->dev;
6205 	adapter->name = pci_name(pdev);
6206 	adapter->mbox = func;
6207 	adapter->pf = func;
6208 	adapter->params.chip = chip;
6209 	adapter->adap_idx = adap_idx;
6210 	adapter->msg_enable = DFLT_MSG_ENABLE;
6211 	adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
6212 				    (sizeof(struct mbox_cmd) *
6213 				     T4_OS_LOG_MBOX_CMDS),
6214 				    GFP_KERNEL);
6215 	if (!adapter->mbox_log) {
6216 		err = -ENOMEM;
6217 		goto out_free_adapter;
6218 	}
6219 	spin_lock_init(&adapter->mbox_lock);
6220 	INIT_LIST_HEAD(&adapter->mlist.list);
6221 	adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
6222 	pci_set_drvdata(pdev, adapter);
6223 
6224 	if (func != ent->driver_data) {
6225 		pci_disable_device(pdev);
6226 		pci_save_state(pdev);        /* to restore SR-IOV later */
6227 		return 0;
6228 	}
6229 
6230 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
6231 		highdma = true;
6232 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
6233 		if (err) {
6234 			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
6235 				"coherent allocations\n");
6236 			goto out_free_adapter;
6237 		}
6238 	} else {
6239 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6240 		if (err) {
6241 			dev_err(&pdev->dev, "no usable DMA configuration\n");
6242 			goto out_free_adapter;
6243 		}
6244 	}
6245 
6246 	pci_enable_pcie_error_reporting(pdev);
6247 	pci_set_master(pdev);
6248 	pci_save_state(pdev);
6249 	adap_idx++;
6250 	adapter->workq = create_singlethread_workqueue("cxgb4");
6251 	if (!adapter->workq) {
6252 		err = -ENOMEM;
6253 		goto out_free_adapter;
6254 	}
6255 
6256 	/* PCI device has been enabled */
6257 	adapter->flags |= CXGB4_DEV_ENABLED;
6258 	memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6259 
6260 	/* If possible, we use PCIe Relaxed Ordering Attribute to deliver
6261 	 * Ingress Packet Data to Free List Buffers in order to allow for
6262 	 * chipset performance optimizations between the Root Complex and
6263 	 * Memory Controllers.  (Messages to the associated Ingress Queue
6264 	 * notifying new Packet Placement in the Free Lists Buffers will be
6265 	 * send without the Relaxed Ordering Attribute thus guaranteeing that
6266 	 * all preceding PCIe Transaction Layer Packets will be processed
6267 	 * first.)  But some Root Complexes have various issues with Upstream
6268 	 * Transaction Layer Packets with the Relaxed Ordering Attribute set.
6269 	 * The PCIe devices which under the Root Complexes will be cleared the
6270 	 * Relaxed Ordering bit in the configuration space, So we check our
6271 	 * PCIe configuration space to see if it's flagged with advice against
6272 	 * using Relaxed Ordering.
6273 	 */
6274 	if (!pcie_relaxed_ordering_enabled(pdev))
6275 		adapter->flags |= CXGB4_ROOT_NO_RELAXED_ORDERING;
6276 
6277 	spin_lock_init(&adapter->stats_lock);
6278 	spin_lock_init(&adapter->tid_release_lock);
6279 	spin_lock_init(&adapter->win0_lock);
6280 
6281 	INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
6282 	INIT_WORK(&adapter->db_full_task, process_db_full);
6283 	INIT_WORK(&adapter->db_drop_task, process_db_drop);
6284 	INIT_WORK(&adapter->fatal_err_notify_task, notify_fatal_err);
6285 
6286 	err = t4_prep_adapter(adapter);
6287 	if (err)
6288 		goto out_free_adapter;
6289 
6290 	if (is_kdump_kernel()) {
6291 		/* Collect hardware state and append to /proc/vmcore */
6292 		err = cxgb4_cudbg_vmcore_add_dump(adapter);
6293 		if (err) {
6294 			dev_warn(adapter->pdev_dev,
6295 				 "Fail collecting vmcore device dump, err: %d. Continuing\n",
6296 				 err);
6297 			err = 0;
6298 		}
6299 	}
6300 
6301 	if (!is_t4(adapter->params.chip)) {
6302 		s_qpp = (QUEUESPERPAGEPF0_S +
6303 			(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
6304 			adapter->pf);
6305 		qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
6306 		      SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
6307 		num_seg = PAGE_SIZE / SEGMENT_SIZE;
6308 
6309 		/* Each segment size is 128B. Write coalescing is enabled only
6310 		 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6311 		 * queue is less no of segments that can be accommodated in
6312 		 * a page size.
6313 		 */
6314 		if (qpp > num_seg) {
6315 			dev_err(&pdev->dev,
6316 				"Incorrect number of egress queues per page\n");
6317 			err = -EINVAL;
6318 			goto out_free_adapter;
6319 		}
6320 		adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6321 		pci_resource_len(pdev, 2));
6322 		if (!adapter->bar2) {
6323 			dev_err(&pdev->dev, "cannot map device bar2 region\n");
6324 			err = -ENOMEM;
6325 			goto out_free_adapter;
6326 		}
6327 	}
6328 
6329 	setup_memwin(adapter);
6330 	err = adap_init0(adapter, 0);
6331 #ifdef CONFIG_DEBUG_FS
6332 	bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
6333 #endif
6334 	setup_memwin_rdma(adapter);
6335 	if (err)
6336 		goto out_unmap_bar;
6337 
6338 	/* configure SGE_STAT_CFG_A to read WC stats */
6339 	if (!is_t4(adapter->params.chip))
6340 		t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
6341 			     (is_t5(adapter->params.chip) ? STATMODE_V(0) :
6342 			      T6_STATMODE_V(0)));
6343 
6344 	/* Initialize hash mac addr list */
6345 	INIT_LIST_HEAD(&adapter->mac_hlist);
6346 
6347 	for_each_port(adapter, i) {
6348 		/* For supporting MQPRIO Offload, need some extra
6349 		 * queues for each ETHOFLD TIDs. Keep it equal to
6350 		 * MAX_ATIDs for now. Once we connect to firmware
6351 		 * later and query the EOTID params, we'll come to
6352 		 * know the actual # of EOTIDs supported.
6353 		 */
6354 		netdev = alloc_etherdev_mq(sizeof(struct port_info),
6355 					   MAX_ETH_QSETS + MAX_ATIDS);
6356 		if (!netdev) {
6357 			err = -ENOMEM;
6358 			goto out_free_dev;
6359 		}
6360 
6361 		SET_NETDEV_DEV(netdev, &pdev->dev);
6362 
6363 		adapter->port[i] = netdev;
6364 		pi = netdev_priv(netdev);
6365 		pi->adapter = adapter;
6366 		pi->xact_addr_filt = -1;
6367 		pi->port_id = i;
6368 		netdev->irq = pdev->irq;
6369 
6370 		netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6371 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6372 			NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO |
6373 			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
6374 			NETIF_F_HW_TC;
6375 
6376 		if (chip_ver > CHELSIO_T5) {
6377 			netdev->hw_enc_features |= NETIF_F_IP_CSUM |
6378 						   NETIF_F_IPV6_CSUM |
6379 						   NETIF_F_RXCSUM |
6380 						   NETIF_F_GSO_UDP_TUNNEL |
6381 						   NETIF_F_GSO_UDP_TUNNEL_CSUM |
6382 						   NETIF_F_TSO | NETIF_F_TSO6;
6383 
6384 			netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
6385 					       NETIF_F_GSO_UDP_TUNNEL_CSUM |
6386 					       NETIF_F_HW_TLS_RECORD;
6387 		}
6388 
6389 		if (highdma)
6390 			netdev->hw_features |= NETIF_F_HIGHDMA;
6391 		netdev->features |= netdev->hw_features;
6392 		netdev->vlan_features = netdev->features & VLAN_FEAT;
6393 #if defined(CONFIG_CHELSIO_TLS_DEVICE)
6394 		if (pi->adapter->params.crypto & FW_CAPS_CONFIG_TLS_HW) {
6395 			netdev->hw_features |= NETIF_F_HW_TLS_TX;
6396 			netdev->tlsdev_ops = &cxgb4_ktls_ops;
6397 			/* initialize the refcount */
6398 			refcount_set(&pi->adapter->chcr_ktls.ktls_refcount, 0);
6399 		}
6400 #endif
6401 		netdev->priv_flags |= IFF_UNICAST_FLT;
6402 
6403 		/* MTU range: 81 - 9600 */
6404 		netdev->min_mtu = 81;              /* accommodate SACK */
6405 		netdev->max_mtu = MAX_MTU;
6406 
6407 		netdev->netdev_ops = &cxgb4_netdev_ops;
6408 #ifdef CONFIG_CHELSIO_T4_DCB
6409 		netdev->dcbnl_ops = &cxgb4_dcb_ops;
6410 		cxgb4_dcb_state_init(netdev);
6411 		cxgb4_dcb_version_init(netdev);
6412 #endif
6413 		cxgb4_set_ethtool_ops(netdev);
6414 	}
6415 
6416 	cxgb4_init_ethtool_dump(adapter);
6417 
6418 	pci_set_drvdata(pdev, adapter);
6419 
6420 	if (adapter->flags & CXGB4_FW_OK) {
6421 		err = t4_port_init(adapter, func, func, 0);
6422 		if (err)
6423 			goto out_free_dev;
6424 	} else if (adapter->params.nports == 1) {
6425 		/* If we don't have a connection to the firmware -- possibly
6426 		 * because of an error -- grab the raw VPD parameters so we
6427 		 * can set the proper MAC Address on the debug network
6428 		 * interface that we've created.
6429 		 */
6430 		u8 hw_addr[ETH_ALEN];
6431 		u8 *na = adapter->params.vpd.na;
6432 
6433 		err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
6434 		if (!err) {
6435 			for (i = 0; i < ETH_ALEN; i++)
6436 				hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
6437 					      hex2val(na[2 * i + 1]));
6438 			t4_set_hw_addr(adapter, 0, hw_addr);
6439 		}
6440 	}
6441 
6442 	if (!(adapter->flags & CXGB4_FW_OK))
6443 		goto fw_attach_fail;
6444 
6445 	/* Configure queues and allocate tables now, they can be needed as
6446 	 * soon as the first register_netdev completes.
6447 	 */
6448 	err = cfg_queues(adapter);
6449 	if (err)
6450 		goto out_free_dev;
6451 
6452 	adapter->smt = t4_init_smt();
6453 	if (!adapter->smt) {
6454 		/* We tolerate a lack of SMT, giving up some functionality */
6455 		dev_warn(&pdev->dev, "could not allocate SMT, continuing\n");
6456 	}
6457 
6458 	adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
6459 	if (!adapter->l2t) {
6460 		/* We tolerate a lack of L2T, giving up some functionality */
6461 		dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6462 		adapter->params.offload = 0;
6463 	}
6464 
6465 #if IS_ENABLED(CONFIG_IPV6)
6466 	if (chip_ver <= CHELSIO_T5 &&
6467 	    (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
6468 		/* CLIP functionality is not present in hardware,
6469 		 * hence disable all offload features
6470 		 */
6471 		dev_warn(&pdev->dev,
6472 			 "CLIP not enabled in hardware, continuing\n");
6473 		adapter->params.offload = 0;
6474 	} else {
6475 		adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
6476 						  adapter->clipt_end);
6477 		if (!adapter->clipt) {
6478 			/* We tolerate a lack of clip_table, giving up
6479 			 * some functionality
6480 			 */
6481 			dev_warn(&pdev->dev,
6482 				 "could not allocate Clip table, continuing\n");
6483 			adapter->params.offload = 0;
6484 		}
6485 	}
6486 #endif
6487 
6488 	for_each_port(adapter, i) {
6489 		pi = adap2pinfo(adapter, i);
6490 		pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls);
6491 		if (!pi->sched_tbl)
6492 			dev_warn(&pdev->dev,
6493 				 "could not activate scheduling on port %d\n",
6494 				 i);
6495 	}
6496 
6497 	if (tid_init(&adapter->tids) < 0) {
6498 		dev_warn(&pdev->dev, "could not allocate TID table, "
6499 			 "continuing\n");
6500 		adapter->params.offload = 0;
6501 	} else {
6502 		adapter->tc_u32 = cxgb4_init_tc_u32(adapter);
6503 		if (!adapter->tc_u32)
6504 			dev_warn(&pdev->dev,
6505 				 "could not offload tc u32, continuing\n");
6506 
6507 		if (cxgb4_init_tc_flower(adapter))
6508 			dev_warn(&pdev->dev,
6509 				 "could not offload tc flower, continuing\n");
6510 
6511 		if (cxgb4_init_tc_mqprio(adapter))
6512 			dev_warn(&pdev->dev,
6513 				 "could not offload tc mqprio, continuing\n");
6514 
6515 		if (cxgb4_init_tc_matchall(adapter))
6516 			dev_warn(&pdev->dev,
6517 				 "could not offload tc matchall, continuing\n");
6518 	}
6519 
6520 	if (is_offload(adapter) || is_hashfilter(adapter)) {
6521 		if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
6522 			u32 hash_base, hash_reg;
6523 
6524 			if (chip_ver <= CHELSIO_T5) {
6525 				hash_reg = LE_DB_TID_HASHBASE_A;
6526 				hash_base = t4_read_reg(adapter, hash_reg);
6527 				adapter->tids.hash_base = hash_base / 4;
6528 			} else {
6529 				hash_reg = T6_LE_DB_HASH_TID_BASE_A;
6530 				hash_base = t4_read_reg(adapter, hash_reg);
6531 				adapter->tids.hash_base = hash_base;
6532 			}
6533 		}
6534 	}
6535 
6536 	/* See what interrupts we'll be using */
6537 	if (msi > 1 && enable_msix(adapter) == 0)
6538 		adapter->flags |= CXGB4_USING_MSIX;
6539 	else if (msi > 0 && pci_enable_msi(pdev) == 0) {
6540 		adapter->flags |= CXGB4_USING_MSI;
6541 		if (msi > 1)
6542 			free_msix_info(adapter);
6543 	}
6544 
6545 	/* check for PCI Express bandwidth capabiltites */
6546 	pcie_print_link_status(pdev);
6547 
6548 	cxgb4_init_mps_ref_entries(adapter);
6549 
6550 	err = init_rss(adapter);
6551 	if (err)
6552 		goto out_free_dev;
6553 
6554 	err = setup_non_data_intr(adapter);
6555 	if (err) {
6556 		dev_err(adapter->pdev_dev,
6557 			"Non Data interrupt allocation failed, err: %d\n", err);
6558 		goto out_free_dev;
6559 	}
6560 
6561 	err = setup_fw_sge_queues(adapter);
6562 	if (err) {
6563 		dev_err(adapter->pdev_dev,
6564 			"FW sge queue allocation failed, err %d", err);
6565 		goto out_free_dev;
6566 	}
6567 
6568 fw_attach_fail:
6569 	/*
6570 	 * The card is now ready to go.  If any errors occur during device
6571 	 * registration we do not fail the whole card but rather proceed only
6572 	 * with the ports we manage to register successfully.  However we must
6573 	 * register at least one net device.
6574 	 */
6575 	for_each_port(adapter, i) {
6576 		pi = adap2pinfo(adapter, i);
6577 		adapter->port[i]->dev_port = pi->lport;
6578 		netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6579 		netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6580 
6581 		netif_carrier_off(adapter->port[i]);
6582 
6583 		err = register_netdev(adapter->port[i]);
6584 		if (err)
6585 			break;
6586 		adapter->chan_map[pi->tx_chan] = i;
6587 		print_port_info(adapter->port[i]);
6588 	}
6589 	if (i == 0) {
6590 		dev_err(&pdev->dev, "could not register any net devices\n");
6591 		goto out_free_dev;
6592 	}
6593 	if (err) {
6594 		dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6595 		err = 0;
6596 	}
6597 
6598 	if (cxgb4_debugfs_root) {
6599 		adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6600 							   cxgb4_debugfs_root);
6601 		setup_debugfs(adapter);
6602 	}
6603 
6604 	/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6605 	pdev->needs_freset = 1;
6606 
6607 	if (is_uld(adapter))
6608 		cxgb4_uld_enable(adapter);
6609 
6610 	if (!is_t4(adapter->params.chip))
6611 		cxgb4_ptp_init(adapter);
6612 
6613 	if (IS_REACHABLE(CONFIG_THERMAL) &&
6614 	    !is_t4(adapter->params.chip) && (adapter->flags & CXGB4_FW_OK))
6615 		cxgb4_thermal_init(adapter);
6616 
6617 	print_adapter_info(adapter);
6618 	return 0;
6619 
6620  out_free_dev:
6621 	t4_free_sge_resources(adapter);
6622 	free_some_resources(adapter);
6623 	if (adapter->flags & CXGB4_USING_MSIX)
6624 		free_msix_info(adapter);
6625 	if (adapter->num_uld || adapter->num_ofld_uld)
6626 		t4_uld_mem_free(adapter);
6627  out_unmap_bar:
6628 	if (!is_t4(adapter->params.chip))
6629 		iounmap(adapter->bar2);
6630  out_free_adapter:
6631 	if (adapter->workq)
6632 		destroy_workqueue(adapter->workq);
6633 
6634 	kfree(adapter->mbox_log);
6635 	kfree(adapter);
6636  out_unmap_bar0:
6637 	iounmap(regs);
6638  out_disable_device:
6639 	pci_disable_pcie_error_reporting(pdev);
6640 	pci_disable_device(pdev);
6641  out_release_regions:
6642 	pci_release_regions(pdev);
6643 	return err;
6644 }
6645 
6646 static void remove_one(struct pci_dev *pdev)
6647 {
6648 	struct adapter *adapter = pci_get_drvdata(pdev);
6649 	struct hash_mac_addr *entry, *tmp;
6650 
6651 	if (!adapter) {
6652 		pci_release_regions(pdev);
6653 		return;
6654 	}
6655 
6656 	/* If we allocated filters, free up state associated with any
6657 	 * valid filters ...
6658 	 */
6659 	clear_all_filters(adapter);
6660 
6661 	adapter->flags |= CXGB4_SHUTTING_DOWN;
6662 
6663 	if (adapter->pf == 4) {
6664 		int i;
6665 
6666 		/* Tear down per-adapter Work Queue first since it can contain
6667 		 * references to our adapter data structure.
6668 		 */
6669 		destroy_workqueue(adapter->workq);
6670 
6671 		if (is_uld(adapter)) {
6672 			detach_ulds(adapter);
6673 			t4_uld_clean_up(adapter);
6674 		}
6675 
6676 		adap_free_hma_mem(adapter);
6677 
6678 		disable_interrupts(adapter);
6679 
6680 		cxgb4_free_mps_ref_entries(adapter);
6681 
6682 		for_each_port(adapter, i)
6683 			if (adapter->port[i]->reg_state == NETREG_REGISTERED)
6684 				unregister_netdev(adapter->port[i]);
6685 
6686 		debugfs_remove_recursive(adapter->debugfs_root);
6687 
6688 		if (!is_t4(adapter->params.chip))
6689 			cxgb4_ptp_stop(adapter);
6690 		if (IS_REACHABLE(CONFIG_THERMAL))
6691 			cxgb4_thermal_remove(adapter);
6692 
6693 		if (adapter->flags & CXGB4_FULL_INIT_DONE)
6694 			cxgb_down(adapter);
6695 
6696 		if (adapter->flags & CXGB4_USING_MSIX)
6697 			free_msix_info(adapter);
6698 		if (adapter->num_uld || adapter->num_ofld_uld)
6699 			t4_uld_mem_free(adapter);
6700 		free_some_resources(adapter);
6701 		list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
6702 					 list) {
6703 			list_del(&entry->list);
6704 			kfree(entry);
6705 		}
6706 
6707 #if IS_ENABLED(CONFIG_IPV6)
6708 		t4_cleanup_clip_tbl(adapter);
6709 #endif
6710 		if (!is_t4(adapter->params.chip))
6711 			iounmap(adapter->bar2);
6712 	}
6713 #ifdef CONFIG_PCI_IOV
6714 	else {
6715 		cxgb4_iov_configure(adapter->pdev, 0);
6716 	}
6717 #endif
6718 	iounmap(adapter->regs);
6719 	pci_disable_pcie_error_reporting(pdev);
6720 	if ((adapter->flags & CXGB4_DEV_ENABLED)) {
6721 		pci_disable_device(pdev);
6722 		adapter->flags &= ~CXGB4_DEV_ENABLED;
6723 	}
6724 	pci_release_regions(pdev);
6725 	kfree(adapter->mbox_log);
6726 	synchronize_rcu();
6727 	kfree(adapter);
6728 }
6729 
6730 /* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
6731  * delivery.  This is essentially a stripped down version of the PCI remove()
6732  * function where we do the minimal amount of work necessary to shutdown any
6733  * further activity.
6734  */
6735 static void shutdown_one(struct pci_dev *pdev)
6736 {
6737 	struct adapter *adapter = pci_get_drvdata(pdev);
6738 
6739 	/* As with remove_one() above (see extended comment), we only want do
6740 	 * do cleanup on PCI Devices which went all the way through init_one()
6741 	 * ...
6742 	 */
6743 	if (!adapter) {
6744 		pci_release_regions(pdev);
6745 		return;
6746 	}
6747 
6748 	adapter->flags |= CXGB4_SHUTTING_DOWN;
6749 
6750 	if (adapter->pf == 4) {
6751 		int i;
6752 
6753 		for_each_port(adapter, i)
6754 			if (adapter->port[i]->reg_state == NETREG_REGISTERED)
6755 				cxgb_close(adapter->port[i]);
6756 
6757 		rtnl_lock();
6758 		cxgb4_mqprio_stop_offload(adapter);
6759 		rtnl_unlock();
6760 
6761 		if (is_uld(adapter)) {
6762 			detach_ulds(adapter);
6763 			t4_uld_clean_up(adapter);
6764 		}
6765 
6766 		disable_interrupts(adapter);
6767 		disable_msi(adapter);
6768 
6769 		t4_sge_stop(adapter);
6770 		if (adapter->flags & CXGB4_FW_OK)
6771 			t4_fw_bye(adapter, adapter->mbox);
6772 	}
6773 }
6774 
6775 static struct pci_driver cxgb4_driver = {
6776 	.name     = KBUILD_MODNAME,
6777 	.id_table = cxgb4_pci_tbl,
6778 	.probe    = init_one,
6779 	.remove   = remove_one,
6780 	.shutdown = shutdown_one,
6781 #ifdef CONFIG_PCI_IOV
6782 	.sriov_configure = cxgb4_iov_configure,
6783 #endif
6784 	.err_handler = &cxgb4_eeh,
6785 };
6786 
6787 static int __init cxgb4_init_module(void)
6788 {
6789 	int ret;
6790 
6791 	cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6792 
6793 	ret = pci_register_driver(&cxgb4_driver);
6794 	if (ret < 0)
6795 		goto err_pci;
6796 
6797 #if IS_ENABLED(CONFIG_IPV6)
6798 	if (!inet6addr_registered) {
6799 		ret = register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6800 		if (ret)
6801 			pci_unregister_driver(&cxgb4_driver);
6802 		else
6803 			inet6addr_registered = true;
6804 	}
6805 #endif
6806 
6807 	if (ret == 0)
6808 		return ret;
6809 
6810 err_pci:
6811 	debugfs_remove(cxgb4_debugfs_root);
6812 
6813 	return ret;
6814 }
6815 
6816 static void __exit cxgb4_cleanup_module(void)
6817 {
6818 #if IS_ENABLED(CONFIG_IPV6)
6819 	if (inet6addr_registered) {
6820 		unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6821 		inet6addr_registered = false;
6822 	}
6823 #endif
6824 	pci_unregister_driver(&cxgb4_driver);
6825 	debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
6826 }
6827 
6828 module_init(cxgb4_init_module);
6829 module_exit(cxgb4_cleanup_module);
6830