1 /*
2  * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3  * driver for Linux.
4  *
5  * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 
38 #include <linux/module.h>
39 #include <linux/moduleparam.h>
40 #include <linux/init.h>
41 #include <linux/pci.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/debugfs.h>
46 #include <linux/ethtool.h>
47 #include <linux/mdio.h>
48 
49 #include "t4vf_common.h"
50 #include "t4vf_defs.h"
51 
52 #include "../cxgb4/t4_regs.h"
53 #include "../cxgb4/t4_msg.h"
54 
55 /*
56  * Generic information about the driver.
57  */
58 #define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver"
59 
60 /*
61  * Module Parameters.
62  * ==================
63  */
64 
65 /*
66  * Default ethtool "message level" for adapters.
67  */
68 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
69 			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
70 			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
71 
72 /*
73  * The driver uses the best interrupt scheme available on a platform in the
74  * order MSI-X then MSI.  This parameter determines which of these schemes the
75  * driver may consider as follows:
76  *
77  *     msi = 2: choose from among MSI-X and MSI
78  *     msi = 1: only consider MSI interrupts
79  *
80  * Note that unlike the Physical Function driver, this Virtual Function driver
81  * does _not_ support legacy INTx interrupts (this limitation is mandated by
82  * the PCI-E SR-IOV standard).
83  */
84 #define MSI_MSIX	2
85 #define MSI_MSI		1
86 #define MSI_DEFAULT	MSI_MSIX
87 
88 static int msi = MSI_DEFAULT;
89 
90 module_param(msi, int, 0644);
91 MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI");
92 
93 /*
94  * Fundamental constants.
95  * ======================
96  */
97 
98 enum {
99 	MAX_TXQ_ENTRIES		= 16384,
100 	MAX_RSPQ_ENTRIES	= 16384,
101 	MAX_RX_BUFFERS		= 16384,
102 
103 	MIN_TXQ_ENTRIES		= 32,
104 	MIN_RSPQ_ENTRIES	= 128,
105 	MIN_FL_ENTRIES		= 16,
106 
107 	/*
108 	 * For purposes of manipulating the Free List size we need to
109 	 * recognize that Free Lists are actually Egress Queues (the host
110 	 * produces free buffers which the hardware consumes), Egress Queues
111 	 * indices are all in units of Egress Context Units bytes, and free
112 	 * list entries are 64-bit PCI DMA addresses.  And since the state of
113 	 * the Producer Index == the Consumer Index implies an EMPTY list, we
114 	 * always have at least one Egress Unit's worth of Free List entries
115 	 * unused.  See sge.c for more details ...
116 	 */
117 	EQ_UNIT = SGE_EQ_IDXSIZE,
118 	FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
119 	MIN_FL_RESID = FL_PER_EQ_UNIT,
120 };
121 
122 /*
123  * Global driver state.
124  * ====================
125  */
126 
127 static struct dentry *cxgb4vf_debugfs_root;
128 
129 /*
130  * OS "Callback" functions.
131  * ========================
132  */
133 
134 /*
135  * The link status has changed on the indicated "port" (Virtual Interface).
136  */
137 void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
138 {
139 	struct net_device *dev = adapter->port[pidx];
140 
141 	/*
142 	 * If the port is disabled or the current recorded "link up"
143 	 * status matches the new status, just return.
144 	 */
145 	if (!netif_running(dev) || link_ok == netif_carrier_ok(dev))
146 		return;
147 
148 	/*
149 	 * Tell the OS that the link status has changed and print a short
150 	 * informative message on the console about the event.
151 	 */
152 	if (link_ok) {
153 		const char *s;
154 		const char *fc;
155 		const struct port_info *pi = netdev_priv(dev);
156 
157 		netif_carrier_on(dev);
158 
159 		switch (pi->link_cfg.speed) {
160 		case 100:
161 			s = "100Mbps";
162 			break;
163 		case 1000:
164 			s = "1Gbps";
165 			break;
166 		case 10000:
167 			s = "10Gbps";
168 			break;
169 		case 25000:
170 			s = "25Gbps";
171 			break;
172 		case 40000:
173 			s = "40Gbps";
174 			break;
175 		case 100000:
176 			s = "100Gbps";
177 			break;
178 
179 		default:
180 			s = "unknown";
181 			break;
182 		}
183 
184 		switch ((int)pi->link_cfg.fc) {
185 		case PAUSE_RX:
186 			fc = "RX";
187 			break;
188 
189 		case PAUSE_TX:
190 			fc = "TX";
191 			break;
192 
193 		case PAUSE_RX | PAUSE_TX:
194 			fc = "RX/TX";
195 			break;
196 
197 		default:
198 			fc = "no";
199 			break;
200 		}
201 
202 		netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc);
203 	} else {
204 		netif_carrier_off(dev);
205 		netdev_info(dev, "link down\n");
206 	}
207 }
208 
209 /*
210  * THe port module type has changed on the indicated "port" (Virtual
211  * Interface).
212  */
213 void t4vf_os_portmod_changed(struct adapter *adapter, int pidx)
214 {
215 	static const char * const mod_str[] = {
216 		NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
217 	};
218 	const struct net_device *dev = adapter->port[pidx];
219 	const struct port_info *pi = netdev_priv(dev);
220 
221 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
222 		dev_info(adapter->pdev_dev, "%s: port module unplugged\n",
223 			 dev->name);
224 	else if (pi->mod_type < ARRAY_SIZE(mod_str))
225 		dev_info(adapter->pdev_dev, "%s: %s port module inserted\n",
226 			 dev->name, mod_str[pi->mod_type]);
227 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
228 		dev_info(adapter->pdev_dev, "%s: unsupported optical port "
229 			 "module inserted\n", dev->name);
230 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
231 		dev_info(adapter->pdev_dev, "%s: unknown port module inserted,"
232 			 "forcing TWINAX\n", dev->name);
233 	else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
234 		dev_info(adapter->pdev_dev, "%s: transceiver module error\n",
235 			 dev->name);
236 	else
237 		dev_info(adapter->pdev_dev, "%s: unknown module type %d "
238 			 "inserted\n", dev->name, pi->mod_type);
239 }
240 
241 static int cxgb4vf_set_addr_hash(struct port_info *pi)
242 {
243 	struct adapter *adapter = pi->adapter;
244 	u64 vec = 0;
245 	bool ucast = false;
246 	struct hash_mac_addr *entry;
247 
248 	/* Calculate the hash vector for the updated list and program it */
249 	list_for_each_entry(entry, &adapter->mac_hlist, list) {
250 		ucast |= is_unicast_ether_addr(entry->addr);
251 		vec |= (1ULL << hash_mac_addr(entry->addr));
252 	}
253 	return t4vf_set_addr_hash(adapter, pi->viid, ucast, vec, false);
254 }
255 
256 /**
257  *	cxgb4vf_change_mac - Update match filter for a MAC address.
258  *	@pi: the port_info
259  *	@viid: the VI id
260  *	@tcam_idx: TCAM index of existing filter for old value of MAC address,
261  *		   or -1
262  *	@addr: the new MAC address value
263  *	@persistent: whether a new MAC allocation should be persistent
264  *
265  *	Modifies an MPS filter and sets it to the new MAC address if
266  *	@tcam_idx >= 0, or adds the MAC address to a new filter if
267  *	@tcam_idx < 0. In the latter case the address is added persistently
268  *	if @persist is %true.
269  *	Addresses are programmed to hash region, if tcam runs out of entries.
270  *
271  */
272 static int cxgb4vf_change_mac(struct port_info *pi, unsigned int viid,
273 			      int *tcam_idx, const u8 *addr, bool persistent)
274 {
275 	struct hash_mac_addr *new_entry, *entry;
276 	struct adapter *adapter = pi->adapter;
277 	int ret;
278 
279 	ret = t4vf_change_mac(adapter, viid, *tcam_idx, addr, persistent);
280 	/* We ran out of TCAM entries. try programming hash region. */
281 	if (ret == -ENOMEM) {
282 		/* If the MAC address to be updated is in the hash addr
283 		 * list, update it from the list
284 		 */
285 		list_for_each_entry(entry, &adapter->mac_hlist, list) {
286 			if (entry->iface_mac) {
287 				ether_addr_copy(entry->addr, addr);
288 				goto set_hash;
289 			}
290 		}
291 		new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
292 		if (!new_entry)
293 			return -ENOMEM;
294 		ether_addr_copy(new_entry->addr, addr);
295 		new_entry->iface_mac = true;
296 		list_add_tail(&new_entry->list, &adapter->mac_hlist);
297 set_hash:
298 		ret = cxgb4vf_set_addr_hash(pi);
299 	} else if (ret >= 0) {
300 		*tcam_idx = ret;
301 		ret = 0;
302 	}
303 
304 	return ret;
305 }
306 
307 /*
308  * Net device operations.
309  * ======================
310  */
311 
312 
313 
314 
315 /*
316  * Perform the MAC and PHY actions needed to enable a "port" (Virtual
317  * Interface).
318  */
319 static int link_start(struct net_device *dev)
320 {
321 	int ret;
322 	struct port_info *pi = netdev_priv(dev);
323 
324 	/*
325 	 * We do not set address filters and promiscuity here, the stack does
326 	 * that step explicitly. Enable vlan accel.
327 	 */
328 	ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1,
329 			      true);
330 	if (ret == 0)
331 		ret = cxgb4vf_change_mac(pi, pi->viid,
332 					 &pi->xact_addr_filt,
333 					 dev->dev_addr, true);
334 
335 	/*
336 	 * We don't need to actually "start the link" itself since the
337 	 * firmware will do that for us when the first Virtual Interface
338 	 * is enabled on a port.
339 	 */
340 	if (ret == 0)
341 		ret = t4vf_enable_pi(pi->adapter, pi, true, true);
342 
343 	return ret;
344 }
345 
346 /*
347  * Name the MSI-X interrupts.
348  */
349 static void name_msix_vecs(struct adapter *adapter)
350 {
351 	int namelen = sizeof(adapter->msix_info[0].desc) - 1;
352 	int pidx;
353 
354 	/*
355 	 * Firmware events.
356 	 */
357 	snprintf(adapter->msix_info[MSIX_FW].desc, namelen,
358 		 "%s-FWeventq", adapter->name);
359 	adapter->msix_info[MSIX_FW].desc[namelen] = 0;
360 
361 	/*
362 	 * Ethernet queues.
363 	 */
364 	for_each_port(adapter, pidx) {
365 		struct net_device *dev = adapter->port[pidx];
366 		const struct port_info *pi = netdev_priv(dev);
367 		int qs, msi;
368 
369 		for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
370 			snprintf(adapter->msix_info[msi].desc, namelen,
371 				 "%s-%d", dev->name, qs);
372 			adapter->msix_info[msi].desc[namelen] = 0;
373 		}
374 	}
375 }
376 
377 /*
378  * Request all of our MSI-X resources.
379  */
380 static int request_msix_queue_irqs(struct adapter *adapter)
381 {
382 	struct sge *s = &adapter->sge;
383 	int rxq, msi, err;
384 
385 	/*
386 	 * Firmware events.
387 	 */
388 	err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix,
389 			  0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq);
390 	if (err)
391 		return err;
392 
393 	/*
394 	 * Ethernet queues.
395 	 */
396 	msi = MSIX_IQFLINT;
397 	for_each_ethrxq(s, rxq) {
398 		err = request_irq(adapter->msix_info[msi].vec,
399 				  t4vf_sge_intr_msix, 0,
400 				  adapter->msix_info[msi].desc,
401 				  &s->ethrxq[rxq].rspq);
402 		if (err)
403 			goto err_free_irqs;
404 		msi++;
405 	}
406 	return 0;
407 
408 err_free_irqs:
409 	while (--rxq >= 0)
410 		free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
411 	free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
412 	return err;
413 }
414 
415 /*
416  * Free our MSI-X resources.
417  */
418 static void free_msix_queue_irqs(struct adapter *adapter)
419 {
420 	struct sge *s = &adapter->sge;
421 	int rxq, msi;
422 
423 	free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
424 	msi = MSIX_IQFLINT;
425 	for_each_ethrxq(s, rxq)
426 		free_irq(adapter->msix_info[msi++].vec,
427 			 &s->ethrxq[rxq].rspq);
428 }
429 
430 /*
431  * Turn on NAPI and start up interrupts on a response queue.
432  */
433 static void qenable(struct sge_rspq *rspq)
434 {
435 	napi_enable(&rspq->napi);
436 
437 	/*
438 	 * 0-increment the Going To Sleep register to start the timer and
439 	 * enable interrupts.
440 	 */
441 	t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
442 		     CIDXINC_V(0) |
443 		     SEINTARM_V(rspq->intr_params) |
444 		     INGRESSQID_V(rspq->cntxt_id));
445 }
446 
447 /*
448  * Enable NAPI scheduling and interrupt generation for all Receive Queues.
449  */
450 static void enable_rx(struct adapter *adapter)
451 {
452 	int rxq;
453 	struct sge *s = &adapter->sge;
454 
455 	for_each_ethrxq(s, rxq)
456 		qenable(&s->ethrxq[rxq].rspq);
457 	qenable(&s->fw_evtq);
458 
459 	/*
460 	 * The interrupt queue doesn't use NAPI so we do the 0-increment of
461 	 * its Going To Sleep register here to get it started.
462 	 */
463 	if (adapter->flags & CXGB4VF_USING_MSI)
464 		t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
465 			     CIDXINC_V(0) |
466 			     SEINTARM_V(s->intrq.intr_params) |
467 			     INGRESSQID_V(s->intrq.cntxt_id));
468 
469 }
470 
471 /*
472  * Wait until all NAPI handlers are descheduled.
473  */
474 static void quiesce_rx(struct adapter *adapter)
475 {
476 	struct sge *s = &adapter->sge;
477 	int rxq;
478 
479 	for_each_ethrxq(s, rxq)
480 		napi_disable(&s->ethrxq[rxq].rspq.napi);
481 	napi_disable(&s->fw_evtq.napi);
482 }
483 
484 /*
485  * Response queue handler for the firmware event queue.
486  */
487 static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
488 			  const struct pkt_gl *gl)
489 {
490 	/*
491 	 * Extract response opcode and get pointer to CPL message body.
492 	 */
493 	struct adapter *adapter = rspq->adapter;
494 	u8 opcode = ((const struct rss_header *)rsp)->opcode;
495 	void *cpl = (void *)(rsp + 1);
496 
497 	switch (opcode) {
498 	case CPL_FW6_MSG: {
499 		/*
500 		 * We've received an asynchronous message from the firmware.
501 		 */
502 		const struct cpl_fw6_msg *fw_msg = cpl;
503 		if (fw_msg->type == FW6_TYPE_CMD_RPL)
504 			t4vf_handle_fw_rpl(adapter, fw_msg->data);
505 		break;
506 	}
507 
508 	case CPL_FW4_MSG: {
509 		/* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
510 		 */
511 		const struct cpl_sge_egr_update *p = (void *)(rsp + 3);
512 		opcode = CPL_OPCODE_G(ntohl(p->opcode_qid));
513 		if (opcode != CPL_SGE_EGR_UPDATE) {
514 			dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
515 				, opcode);
516 			break;
517 		}
518 		cpl = (void *)p;
519 	}
520 		/* Fall through */
521 
522 	case CPL_SGE_EGR_UPDATE: {
523 		/*
524 		 * We've received an Egress Queue Status Update message.  We
525 		 * get these, if the SGE is configured to send these when the
526 		 * firmware passes certain points in processing our TX
527 		 * Ethernet Queue or if we make an explicit request for one.
528 		 * We use these updates to determine when we may need to
529 		 * restart a TX Ethernet Queue which was stopped for lack of
530 		 * free TX Queue Descriptors ...
531 		 */
532 		const struct cpl_sge_egr_update *p = cpl;
533 		unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid));
534 		struct sge *s = &adapter->sge;
535 		struct sge_txq *tq;
536 		struct sge_eth_txq *txq;
537 		unsigned int eq_idx;
538 
539 		/*
540 		 * Perform sanity checking on the Queue ID to make sure it
541 		 * really refers to one of our TX Ethernet Egress Queues which
542 		 * is active and matches the queue's ID.  None of these error
543 		 * conditions should ever happen so we may want to either make
544 		 * them fatal and/or conditionalized under DEBUG.
545 		 */
546 		eq_idx = EQ_IDX(s, qid);
547 		if (unlikely(eq_idx >= MAX_EGRQ)) {
548 			dev_err(adapter->pdev_dev,
549 				"Egress Update QID %d out of range\n", qid);
550 			break;
551 		}
552 		tq = s->egr_map[eq_idx];
553 		if (unlikely(tq == NULL)) {
554 			dev_err(adapter->pdev_dev,
555 				"Egress Update QID %d TXQ=NULL\n", qid);
556 			break;
557 		}
558 		txq = container_of(tq, struct sge_eth_txq, q);
559 		if (unlikely(tq->abs_id != qid)) {
560 			dev_err(adapter->pdev_dev,
561 				"Egress Update QID %d refers to TXQ %d\n",
562 				qid, tq->abs_id);
563 			break;
564 		}
565 
566 		/*
567 		 * Restart a stopped TX Queue which has less than half of its
568 		 * TX ring in use ...
569 		 */
570 		txq->q.restarts++;
571 		netif_tx_wake_queue(txq->txq);
572 		break;
573 	}
574 
575 	default:
576 		dev_err(adapter->pdev_dev,
577 			"unexpected CPL %#x on FW event queue\n", opcode);
578 	}
579 
580 	return 0;
581 }
582 
583 /*
584  * Allocate SGE TX/RX response queues.  Determine how many sets of SGE queues
585  * to use and initializes them.  We support multiple "Queue Sets" per port if
586  * we have MSI-X, otherwise just one queue set per port.
587  */
588 static int setup_sge_queues(struct adapter *adapter)
589 {
590 	struct sge *s = &adapter->sge;
591 	int err, pidx, msix;
592 
593 	/*
594 	 * Clear "Queue Set" Free List Starving and TX Queue Mapping Error
595 	 * state.
596 	 */
597 	bitmap_zero(s->starving_fl, MAX_EGRQ);
598 
599 	/*
600 	 * If we're using MSI interrupt mode we need to set up a "forwarded
601 	 * interrupt" queue which we'll set up with our MSI vector.  The rest
602 	 * of the ingress queues will be set up to forward their interrupts to
603 	 * this queue ...  This must be first since t4vf_sge_alloc_rxq() uses
604 	 * the intrq's queue ID as the interrupt forwarding queue for the
605 	 * subsequent calls ...
606 	 */
607 	if (adapter->flags & CXGB4VF_USING_MSI) {
608 		err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false,
609 					 adapter->port[0], 0, NULL, NULL);
610 		if (err)
611 			goto err_free_queues;
612 	}
613 
614 	/*
615 	 * Allocate our ingress queue for asynchronous firmware messages.
616 	 */
617 	err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0],
618 				 MSIX_FW, NULL, fwevtq_handler);
619 	if (err)
620 		goto err_free_queues;
621 
622 	/*
623 	 * Allocate each "port"'s initial Queue Sets.  These can be changed
624 	 * later on ... up to the point where any interface on the adapter is
625 	 * brought up at which point lots of things get nailed down
626 	 * permanently ...
627 	 */
628 	msix = MSIX_IQFLINT;
629 	for_each_port(adapter, pidx) {
630 		struct net_device *dev = adapter->port[pidx];
631 		struct port_info *pi = netdev_priv(dev);
632 		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
633 		struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
634 		int qs;
635 
636 		for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
637 			err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false,
638 						 dev, msix++,
639 						 &rxq->fl, t4vf_ethrx_handler);
640 			if (err)
641 				goto err_free_queues;
642 
643 			err = t4vf_sge_alloc_eth_txq(adapter, txq, dev,
644 					     netdev_get_tx_queue(dev, qs),
645 					     s->fw_evtq.cntxt_id);
646 			if (err)
647 				goto err_free_queues;
648 
649 			rxq->rspq.idx = qs;
650 			memset(&rxq->stats, 0, sizeof(rxq->stats));
651 		}
652 	}
653 
654 	/*
655 	 * Create the reverse mappings for the queues.
656 	 */
657 	s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id;
658 	s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id;
659 	IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq;
660 	for_each_port(adapter, pidx) {
661 		struct net_device *dev = adapter->port[pidx];
662 		struct port_info *pi = netdev_priv(dev);
663 		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
664 		struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
665 		int qs;
666 
667 		for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
668 			IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq;
669 			EQ_MAP(s, txq->q.abs_id) = &txq->q;
670 
671 			/*
672 			 * The FW_IQ_CMD doesn't return the Absolute Queue IDs
673 			 * for Free Lists but since all of the Egress Queues
674 			 * (including Free Lists) have Relative Queue IDs
675 			 * which are computed as Absolute - Base Queue ID, we
676 			 * can synthesize the Absolute Queue IDs for the Free
677 			 * Lists.  This is useful for debugging purposes when
678 			 * we want to dump Queue Contexts via the PF Driver.
679 			 */
680 			rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base;
681 			EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl;
682 		}
683 	}
684 	return 0;
685 
686 err_free_queues:
687 	t4vf_free_sge_resources(adapter);
688 	return err;
689 }
690 
691 /*
692  * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive
693  * queues.  We configure the RSS CPU lookup table to distribute to the number
694  * of HW receive queues, and the response queue lookup table to narrow that
695  * down to the response queues actually configured for each "port" (Virtual
696  * Interface).  We always configure the RSS mapping for all ports since the
697  * mapping table has plenty of entries.
698  */
699 static int setup_rss(struct adapter *adapter)
700 {
701 	int pidx;
702 
703 	for_each_port(adapter, pidx) {
704 		struct port_info *pi = adap2pinfo(adapter, pidx);
705 		struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
706 		u16 rss[MAX_PORT_QSETS];
707 		int qs, err;
708 
709 		for (qs = 0; qs < pi->nqsets; qs++)
710 			rss[qs] = rxq[qs].rspq.abs_id;
711 
712 		err = t4vf_config_rss_range(adapter, pi->viid,
713 					    0, pi->rss_size, rss, pi->nqsets);
714 		if (err)
715 			return err;
716 
717 		/*
718 		 * Perform Global RSS Mode-specific initialization.
719 		 */
720 		switch (adapter->params.rss.mode) {
721 		case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL:
722 			/*
723 			 * If Tunnel All Lookup isn't specified in the global
724 			 * RSS Configuration, then we need to specify a
725 			 * default Ingress Queue for any ingress packets which
726 			 * aren't hashed.  We'll use our first ingress queue
727 			 * ...
728 			 */
729 			if (!adapter->params.rss.u.basicvirtual.tnlalllookup) {
730 				union rss_vi_config config;
731 				err = t4vf_read_rss_vi_config(adapter,
732 							      pi->viid,
733 							      &config);
734 				if (err)
735 					return err;
736 				config.basicvirtual.defaultq =
737 					rxq[0].rspq.abs_id;
738 				err = t4vf_write_rss_vi_config(adapter,
739 							       pi->viid,
740 							       &config);
741 				if (err)
742 					return err;
743 			}
744 			break;
745 		}
746 	}
747 
748 	return 0;
749 }
750 
751 /*
752  * Bring the adapter up.  Called whenever we go from no "ports" open to having
753  * one open.  This function performs the actions necessary to make an adapter
754  * operational, such as completing the initialization of HW modules, and
755  * enabling interrupts.  Must be called with the rtnl lock held.  (Note that
756  * this is called "cxgb_up" in the PF Driver.)
757  */
758 static int adapter_up(struct adapter *adapter)
759 {
760 	int err;
761 
762 	/*
763 	 * If this is the first time we've been called, perform basic
764 	 * adapter setup.  Once we've done this, many of our adapter
765 	 * parameters can no longer be changed ...
766 	 */
767 	if ((adapter->flags & CXGB4VF_FULL_INIT_DONE) == 0) {
768 		err = setup_sge_queues(adapter);
769 		if (err)
770 			return err;
771 		err = setup_rss(adapter);
772 		if (err) {
773 			t4vf_free_sge_resources(adapter);
774 			return err;
775 		}
776 
777 		if (adapter->flags & CXGB4VF_USING_MSIX)
778 			name_msix_vecs(adapter);
779 
780 		adapter->flags |= CXGB4VF_FULL_INIT_DONE;
781 	}
782 
783 	/*
784 	 * Acquire our interrupt resources.  We only support MSI-X and MSI.
785 	 */
786 	BUG_ON((adapter->flags &
787 	       (CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0);
788 	if (adapter->flags & CXGB4VF_USING_MSIX)
789 		err = request_msix_queue_irqs(adapter);
790 	else
791 		err = request_irq(adapter->pdev->irq,
792 				  t4vf_intr_handler(adapter), 0,
793 				  adapter->name, adapter);
794 	if (err) {
795 		dev_err(adapter->pdev_dev, "request_irq failed, err %d\n",
796 			err);
797 		return err;
798 	}
799 
800 	/*
801 	 * Enable NAPI ingress processing and return success.
802 	 */
803 	enable_rx(adapter);
804 	t4vf_sge_start(adapter);
805 
806 	return 0;
807 }
808 
809 /*
810  * Bring the adapter down.  Called whenever the last "port" (Virtual
811  * Interface) closed.  (Note that this routine is called "cxgb_down" in the PF
812  * Driver.)
813  */
814 static void adapter_down(struct adapter *adapter)
815 {
816 	/*
817 	 * Free interrupt resources.
818 	 */
819 	if (adapter->flags & CXGB4VF_USING_MSIX)
820 		free_msix_queue_irqs(adapter);
821 	else
822 		free_irq(adapter->pdev->irq, adapter);
823 
824 	/*
825 	 * Wait for NAPI handlers to finish.
826 	 */
827 	quiesce_rx(adapter);
828 }
829 
830 /*
831  * Start up a net device.
832  */
833 static int cxgb4vf_open(struct net_device *dev)
834 {
835 	int err;
836 	struct port_info *pi = netdev_priv(dev);
837 	struct adapter *adapter = pi->adapter;
838 
839 	/*
840 	 * If we don't have a connection to the firmware there's nothing we
841 	 * can do.
842 	 */
843 	if (!(adapter->flags & CXGB4VF_FW_OK))
844 		return -ENXIO;
845 
846 	/*
847 	 * If this is the first interface that we're opening on the "adapter",
848 	 * bring the "adapter" up now.
849 	 */
850 	if (adapter->open_device_map == 0) {
851 		err = adapter_up(adapter);
852 		if (err)
853 			return err;
854 	}
855 
856 	/* It's possible that the basic port information could have
857 	 * changed since we first read it.
858 	 */
859 	err = t4vf_update_port_info(pi);
860 	if (err < 0)
861 		return err;
862 
863 	/*
864 	 * Note that this interface is up and start everything up ...
865 	 */
866 	err = link_start(dev);
867 	if (err)
868 		goto err_unwind;
869 
870 	pi->vlan_id = t4vf_get_vf_vlan_acl(adapter);
871 
872 	netif_tx_start_all_queues(dev);
873 	set_bit(pi->port_id, &adapter->open_device_map);
874 	return 0;
875 
876 err_unwind:
877 	if (adapter->open_device_map == 0)
878 		adapter_down(adapter);
879 	return err;
880 }
881 
882 /*
883  * Shut down a net device.  This routine is called "cxgb_close" in the PF
884  * Driver ...
885  */
886 static int cxgb4vf_stop(struct net_device *dev)
887 {
888 	struct port_info *pi = netdev_priv(dev);
889 	struct adapter *adapter = pi->adapter;
890 
891 	netif_tx_stop_all_queues(dev);
892 	netif_carrier_off(dev);
893 	t4vf_enable_pi(adapter, pi, false, false);
894 
895 	clear_bit(pi->port_id, &adapter->open_device_map);
896 	if (adapter->open_device_map == 0)
897 		adapter_down(adapter);
898 	return 0;
899 }
900 
901 /*
902  * Translate our basic statistics into the standard "ifconfig" statistics.
903  */
904 static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
905 {
906 	struct t4vf_port_stats stats;
907 	struct port_info *pi = netdev2pinfo(dev);
908 	struct adapter *adapter = pi->adapter;
909 	struct net_device_stats *ns = &dev->stats;
910 	int err;
911 
912 	spin_lock(&adapter->stats_lock);
913 	err = t4vf_get_port_stats(adapter, pi->pidx, &stats);
914 	spin_unlock(&adapter->stats_lock);
915 
916 	memset(ns, 0, sizeof(*ns));
917 	if (err)
918 		return ns;
919 
920 	ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes +
921 			stats.tx_ucast_bytes + stats.tx_offload_bytes);
922 	ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames +
923 			  stats.tx_ucast_frames + stats.tx_offload_frames);
924 	ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes +
925 			stats.rx_ucast_bytes);
926 	ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames +
927 			  stats.rx_ucast_frames);
928 	ns->multicast = stats.rx_mcast_frames;
929 	ns->tx_errors = stats.tx_drop_frames;
930 	ns->rx_errors = stats.rx_err_frames;
931 
932 	return ns;
933 }
934 
935 static int cxgb4vf_mac_sync(struct net_device *netdev, const u8 *mac_addr)
936 {
937 	struct port_info *pi = netdev_priv(netdev);
938 	struct adapter *adapter = pi->adapter;
939 	int ret;
940 	u64 mhash = 0;
941 	u64 uhash = 0;
942 	bool free = false;
943 	bool ucast = is_unicast_ether_addr(mac_addr);
944 	const u8 *maclist[1] = {mac_addr};
945 	struct hash_mac_addr *new_entry;
946 
947 	ret = t4vf_alloc_mac_filt(adapter, pi->viid, free, 1, maclist,
948 				  NULL, ucast ? &uhash : &mhash, false);
949 	if (ret < 0)
950 		goto out;
951 	/* if hash != 0, then add the addr to hash addr list
952 	 * so on the end we will calculate the hash for the
953 	 * list and program it
954 	 */
955 	if (uhash || mhash) {
956 		new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
957 		if (!new_entry)
958 			return -ENOMEM;
959 		ether_addr_copy(new_entry->addr, mac_addr);
960 		list_add_tail(&new_entry->list, &adapter->mac_hlist);
961 		ret = cxgb4vf_set_addr_hash(pi);
962 	}
963 out:
964 	return ret < 0 ? ret : 0;
965 }
966 
967 static int cxgb4vf_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
968 {
969 	struct port_info *pi = netdev_priv(netdev);
970 	struct adapter *adapter = pi->adapter;
971 	int ret;
972 	const u8 *maclist[1] = {mac_addr};
973 	struct hash_mac_addr *entry, *tmp;
974 
975 	/* If the MAC address to be removed is in the hash addr
976 	 * list, delete it from the list and update hash vector
977 	 */
978 	list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) {
979 		if (ether_addr_equal(entry->addr, mac_addr)) {
980 			list_del(&entry->list);
981 			kfree(entry);
982 			return cxgb4vf_set_addr_hash(pi);
983 		}
984 	}
985 
986 	ret = t4vf_free_mac_filt(adapter, pi->viid, 1, maclist, false);
987 	return ret < 0 ? -EINVAL : 0;
988 }
989 
990 /*
991  * Set RX properties of a port, such as promiscruity, address filters, and MTU.
992  * If @mtu is -1 it is left unchanged.
993  */
994 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
995 {
996 	struct port_info *pi = netdev_priv(dev);
997 
998 	__dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
999 	__dev_mc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
1000 	return t4vf_set_rxmode(pi->adapter, pi->viid, -1,
1001 			       (dev->flags & IFF_PROMISC) != 0,
1002 			       (dev->flags & IFF_ALLMULTI) != 0,
1003 			       1, -1, sleep_ok);
1004 }
1005 
1006 /*
1007  * Set the current receive modes on the device.
1008  */
1009 static void cxgb4vf_set_rxmode(struct net_device *dev)
1010 {
1011 	/* unfortunately we can't return errors to the stack */
1012 	set_rxmode(dev, -1, false);
1013 }
1014 
1015 /*
1016  * Find the entry in the interrupt holdoff timer value array which comes
1017  * closest to the specified interrupt holdoff value.
1018  */
1019 static int closest_timer(const struct sge *s, int us)
1020 {
1021 	int i, timer_idx = 0, min_delta = INT_MAX;
1022 
1023 	for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1024 		int delta = us - s->timer_val[i];
1025 		if (delta < 0)
1026 			delta = -delta;
1027 		if (delta < min_delta) {
1028 			min_delta = delta;
1029 			timer_idx = i;
1030 		}
1031 	}
1032 	return timer_idx;
1033 }
1034 
1035 static int closest_thres(const struct sge *s, int thres)
1036 {
1037 	int i, delta, pktcnt_idx = 0, min_delta = INT_MAX;
1038 
1039 	for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1040 		delta = thres - s->counter_val[i];
1041 		if (delta < 0)
1042 			delta = -delta;
1043 		if (delta < min_delta) {
1044 			min_delta = delta;
1045 			pktcnt_idx = i;
1046 		}
1047 	}
1048 	return pktcnt_idx;
1049 }
1050 
1051 /*
1052  * Return a queue's interrupt hold-off time in us.  0 means no timer.
1053  */
1054 static unsigned int qtimer_val(const struct adapter *adapter,
1055 			       const struct sge_rspq *rspq)
1056 {
1057 	unsigned int timer_idx = QINTR_TIMER_IDX_G(rspq->intr_params);
1058 
1059 	return timer_idx < SGE_NTIMERS
1060 		? adapter->sge.timer_val[timer_idx]
1061 		: 0;
1062 }
1063 
1064 /**
1065  *	set_rxq_intr_params - set a queue's interrupt holdoff parameters
1066  *	@adapter: the adapter
1067  *	@rspq: the RX response queue
1068  *	@us: the hold-off time in us, or 0 to disable timer
1069  *	@cnt: the hold-off packet count, or 0 to disable counter
1070  *
1071  *	Sets an RX response queue's interrupt hold-off time and packet count.
1072  *	At least one of the two needs to be enabled for the queue to generate
1073  *	interrupts.
1074  */
1075 static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
1076 			       unsigned int us, unsigned int cnt)
1077 {
1078 	unsigned int timer_idx;
1079 
1080 	/*
1081 	 * If both the interrupt holdoff timer and count are specified as
1082 	 * zero, default to a holdoff count of 1 ...
1083 	 */
1084 	if ((us | cnt) == 0)
1085 		cnt = 1;
1086 
1087 	/*
1088 	 * If an interrupt holdoff count has been specified, then find the
1089 	 * closest configured holdoff count and use that.  If the response
1090 	 * queue has already been created, then update its queue context
1091 	 * parameters ...
1092 	 */
1093 	if (cnt) {
1094 		int err;
1095 		u32 v, pktcnt_idx;
1096 
1097 		pktcnt_idx = closest_thres(&adapter->sge, cnt);
1098 		if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) {
1099 			v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1100 			    FW_PARAMS_PARAM_X_V(
1101 					FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1102 			    FW_PARAMS_PARAM_YZ_V(rspq->cntxt_id);
1103 			err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx);
1104 			if (err)
1105 				return err;
1106 		}
1107 		rspq->pktcnt_idx = pktcnt_idx;
1108 	}
1109 
1110 	/*
1111 	 * Compute the closest holdoff timer index from the supplied holdoff
1112 	 * timer value.
1113 	 */
1114 	timer_idx = (us == 0
1115 		     ? SGE_TIMER_RSTRT_CNTR
1116 		     : closest_timer(&adapter->sge, us));
1117 
1118 	/*
1119 	 * Update the response queue's interrupt coalescing parameters and
1120 	 * return success.
1121 	 */
1122 	rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
1123 			     QINTR_CNT_EN_V(cnt > 0));
1124 	return 0;
1125 }
1126 
1127 /*
1128  * Return a version number to identify the type of adapter.  The scheme is:
1129  * - bits 0..9: chip version
1130  * - bits 10..15: chip revision
1131  */
1132 static inline unsigned int mk_adap_vers(const struct adapter *adapter)
1133 {
1134 	/*
1135 	 * Chip version 4, revision 0x3f (cxgb4vf).
1136 	 */
1137 	return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10);
1138 }
1139 
1140 /*
1141  * Execute the specified ioctl command.
1142  */
1143 static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1144 {
1145 	int ret = 0;
1146 
1147 	switch (cmd) {
1148 	    /*
1149 	     * The VF Driver doesn't have access to any of the other
1150 	     * common Ethernet device ioctl()'s (like reading/writing
1151 	     * PHY registers, etc.
1152 	     */
1153 
1154 	default:
1155 		ret = -EOPNOTSUPP;
1156 		break;
1157 	}
1158 	return ret;
1159 }
1160 
1161 /*
1162  * Change the device's MTU.
1163  */
1164 static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
1165 {
1166 	int ret;
1167 	struct port_info *pi = netdev_priv(dev);
1168 
1169 	ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
1170 			      -1, -1, -1, -1, true);
1171 	if (!ret)
1172 		dev->mtu = new_mtu;
1173 	return ret;
1174 }
1175 
1176 static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
1177 	netdev_features_t features)
1178 {
1179 	/*
1180 	 * Since there is no support for separate rx/tx vlan accel
1181 	 * enable/disable make sure tx flag is always in same state as rx.
1182 	 */
1183 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
1184 		features |= NETIF_F_HW_VLAN_CTAG_TX;
1185 	else
1186 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
1187 
1188 	return features;
1189 }
1190 
1191 static int cxgb4vf_set_features(struct net_device *dev,
1192 	netdev_features_t features)
1193 {
1194 	struct port_info *pi = netdev_priv(dev);
1195 	netdev_features_t changed = dev->features ^ features;
1196 
1197 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1198 		t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
1199 				features & NETIF_F_HW_VLAN_CTAG_TX, 0);
1200 
1201 	return 0;
1202 }
1203 
1204 /*
1205  * Change the devices MAC address.
1206  */
1207 static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
1208 {
1209 	int ret;
1210 	struct sockaddr *addr = _addr;
1211 	struct port_info *pi = netdev_priv(dev);
1212 
1213 	if (!is_valid_ether_addr(addr->sa_data))
1214 		return -EADDRNOTAVAIL;
1215 
1216 	ret = cxgb4vf_change_mac(pi, pi->viid, &pi->xact_addr_filt,
1217 				 addr->sa_data, true);
1218 	if (ret < 0)
1219 		return ret;
1220 
1221 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1222 	return 0;
1223 }
1224 
1225 #ifdef CONFIG_NET_POLL_CONTROLLER
1226 /*
1227  * Poll all of our receive queues.  This is called outside of normal interrupt
1228  * context.
1229  */
1230 static void cxgb4vf_poll_controller(struct net_device *dev)
1231 {
1232 	struct port_info *pi = netdev_priv(dev);
1233 	struct adapter *adapter = pi->adapter;
1234 
1235 	if (adapter->flags & CXGB4VF_USING_MSIX) {
1236 		struct sge_eth_rxq *rxq;
1237 		int nqsets;
1238 
1239 		rxq = &adapter->sge.ethrxq[pi->first_qset];
1240 		for (nqsets = pi->nqsets; nqsets; nqsets--) {
1241 			t4vf_sge_intr_msix(0, &rxq->rspq);
1242 			rxq++;
1243 		}
1244 	} else
1245 		t4vf_intr_handler(adapter)(0, adapter);
1246 }
1247 #endif
1248 
1249 /*
1250  * Ethtool operations.
1251  * ===================
1252  *
1253  * Note that we don't support any ethtool operations which change the physical
1254  * state of the port to which we're linked.
1255  */
1256 
1257 /**
1258  *	from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool
1259  *	@port_type: Firmware Port Type
1260  *	@mod_type: Firmware Module Type
1261  *
1262  *	Translate Firmware Port/Module type to Ethtool Port Type.
1263  */
1264 static int from_fw_port_mod_type(enum fw_port_type port_type,
1265 				 enum fw_port_module_type mod_type)
1266 {
1267 	if (port_type == FW_PORT_TYPE_BT_SGMII ||
1268 	    port_type == FW_PORT_TYPE_BT_XFI ||
1269 	    port_type == FW_PORT_TYPE_BT_XAUI) {
1270 		return PORT_TP;
1271 	} else if (port_type == FW_PORT_TYPE_FIBER_XFI ||
1272 		   port_type == FW_PORT_TYPE_FIBER_XAUI) {
1273 		return PORT_FIBRE;
1274 	} else if (port_type == FW_PORT_TYPE_SFP ||
1275 		   port_type == FW_PORT_TYPE_QSFP_10G ||
1276 		   port_type == FW_PORT_TYPE_QSA ||
1277 		   port_type == FW_PORT_TYPE_QSFP ||
1278 		   port_type == FW_PORT_TYPE_CR4_QSFP ||
1279 		   port_type == FW_PORT_TYPE_CR_QSFP ||
1280 		   port_type == FW_PORT_TYPE_CR2_QSFP ||
1281 		   port_type == FW_PORT_TYPE_SFP28) {
1282 		if (mod_type == FW_PORT_MOD_TYPE_LR ||
1283 		    mod_type == FW_PORT_MOD_TYPE_SR ||
1284 		    mod_type == FW_PORT_MOD_TYPE_ER ||
1285 		    mod_type == FW_PORT_MOD_TYPE_LRM)
1286 			return PORT_FIBRE;
1287 		else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1288 			 mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1289 			return PORT_DA;
1290 		else
1291 			return PORT_OTHER;
1292 	} else if (port_type == FW_PORT_TYPE_KR4_100G ||
1293 		   port_type == FW_PORT_TYPE_KR_SFP28 ||
1294 		   port_type == FW_PORT_TYPE_KR_XLAUI) {
1295 		return PORT_NONE;
1296 	}
1297 
1298 	return PORT_OTHER;
1299 }
1300 
1301 /**
1302  *	fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask
1303  *	@port_type: Firmware Port Type
1304  *	@fw_caps: Firmware Port Capabilities
1305  *	@link_mode_mask: ethtool Link Mode Mask
1306  *
1307  *	Translate a Firmware Port Capabilities specification to an ethtool
1308  *	Link Mode Mask.
1309  */
1310 static void fw_caps_to_lmm(enum fw_port_type port_type,
1311 			   unsigned int fw_caps,
1312 			   unsigned long *link_mode_mask)
1313 {
1314 	#define SET_LMM(__lmm_name) \
1315 		__set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
1316 			  link_mode_mask)
1317 
1318 	#define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
1319 		do { \
1320 			if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
1321 				SET_LMM(__lmm_name); \
1322 		} while (0)
1323 
1324 	switch (port_type) {
1325 	case FW_PORT_TYPE_BT_SGMII:
1326 	case FW_PORT_TYPE_BT_XFI:
1327 	case FW_PORT_TYPE_BT_XAUI:
1328 		SET_LMM(TP);
1329 		FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full);
1330 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1331 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
1332 		break;
1333 
1334 	case FW_PORT_TYPE_KX4:
1335 	case FW_PORT_TYPE_KX:
1336 		SET_LMM(Backplane);
1337 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
1338 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
1339 		break;
1340 
1341 	case FW_PORT_TYPE_KR:
1342 		SET_LMM(Backplane);
1343 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
1344 		break;
1345 
1346 	case FW_PORT_TYPE_BP_AP:
1347 		SET_LMM(Backplane);
1348 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
1349 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC);
1350 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
1351 		break;
1352 
1353 	case FW_PORT_TYPE_BP4_AP:
1354 		SET_LMM(Backplane);
1355 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
1356 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC);
1357 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
1358 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
1359 		break;
1360 
1361 	case FW_PORT_TYPE_FIBER_XFI:
1362 	case FW_PORT_TYPE_FIBER_XAUI:
1363 	case FW_PORT_TYPE_SFP:
1364 	case FW_PORT_TYPE_QSFP_10G:
1365 	case FW_PORT_TYPE_QSA:
1366 		SET_LMM(FIBRE);
1367 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1368 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
1369 		break;
1370 
1371 	case FW_PORT_TYPE_BP40_BA:
1372 	case FW_PORT_TYPE_QSFP:
1373 		SET_LMM(FIBRE);
1374 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1375 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
1376 		FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
1377 		break;
1378 
1379 	case FW_PORT_TYPE_CR_QSFP:
1380 	case FW_PORT_TYPE_SFP28:
1381 		SET_LMM(FIBRE);
1382 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1383 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
1384 		FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
1385 		break;
1386 
1387 	case FW_PORT_TYPE_KR_SFP28:
1388 		SET_LMM(Backplane);
1389 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1390 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
1391 		FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full);
1392 		break;
1393 
1394 	case FW_PORT_TYPE_KR_XLAUI:
1395 		SET_LMM(Backplane);
1396 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
1397 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
1398 		FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full);
1399 		break;
1400 
1401 	case FW_PORT_TYPE_CR2_QSFP:
1402 		SET_LMM(FIBRE);
1403 		FW_CAPS_TO_LMM(SPEED_50G, 50000baseSR2_Full);
1404 		break;
1405 
1406 	case FW_PORT_TYPE_KR4_100G:
1407 	case FW_PORT_TYPE_CR4_QSFP:
1408 		SET_LMM(FIBRE);
1409 		FW_CAPS_TO_LMM(SPEED_1G,  1000baseT_Full);
1410 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
1411 		FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
1412 		FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
1413 		FW_CAPS_TO_LMM(SPEED_50G, 50000baseCR2_Full);
1414 		FW_CAPS_TO_LMM(SPEED_100G, 100000baseCR4_Full);
1415 		break;
1416 
1417 	default:
1418 		break;
1419 	}
1420 
1421 	if (fw_caps & FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M)) {
1422 		FW_CAPS_TO_LMM(FEC_RS, FEC_RS);
1423 		FW_CAPS_TO_LMM(FEC_BASER_RS, FEC_BASER);
1424 	} else {
1425 		SET_LMM(FEC_NONE);
1426 	}
1427 
1428 	FW_CAPS_TO_LMM(ANEG, Autoneg);
1429 	FW_CAPS_TO_LMM(802_3_PAUSE, Pause);
1430 	FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause);
1431 
1432 	#undef FW_CAPS_TO_LMM
1433 	#undef SET_LMM
1434 }
1435 
1436 static int cxgb4vf_get_link_ksettings(struct net_device *dev,
1437 				  struct ethtool_link_ksettings *link_ksettings)
1438 {
1439 	struct port_info *pi = netdev_priv(dev);
1440 	struct ethtool_link_settings *base = &link_ksettings->base;
1441 
1442 	/* For the nonce, the Firmware doesn't send up Port State changes
1443 	 * when the Virtual Interface attached to the Port is down.  So
1444 	 * if it's down, let's grab any changes.
1445 	 */
1446 	if (!netif_running(dev))
1447 		(void)t4vf_update_port_info(pi);
1448 
1449 	ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
1450 	ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
1451 	ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
1452 
1453 	base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type);
1454 
1455 	if (pi->mdio_addr >= 0) {
1456 		base->phy_address = pi->mdio_addr;
1457 		base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII
1458 				      ? ETH_MDIO_SUPPORTS_C22
1459 				      : ETH_MDIO_SUPPORTS_C45);
1460 	} else {
1461 		base->phy_address = 255;
1462 		base->mdio_support = 0;
1463 	}
1464 
1465 	fw_caps_to_lmm(pi->port_type, pi->link_cfg.pcaps,
1466 		       link_ksettings->link_modes.supported);
1467 	fw_caps_to_lmm(pi->port_type, pi->link_cfg.acaps,
1468 		       link_ksettings->link_modes.advertising);
1469 	fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
1470 		       link_ksettings->link_modes.lp_advertising);
1471 
1472 	if (netif_carrier_ok(dev)) {
1473 		base->speed = pi->link_cfg.speed;
1474 		base->duplex = DUPLEX_FULL;
1475 	} else {
1476 		base->speed = SPEED_UNKNOWN;
1477 		base->duplex = DUPLEX_UNKNOWN;
1478 	}
1479 
1480 	base->autoneg = pi->link_cfg.autoneg;
1481 	if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)
1482 		ethtool_link_ksettings_add_link_mode(link_ksettings,
1483 						     supported, Autoneg);
1484 	if (pi->link_cfg.autoneg)
1485 		ethtool_link_ksettings_add_link_mode(link_ksettings,
1486 						     advertising, Autoneg);
1487 
1488 	return 0;
1489 }
1490 
1491 /* Translate the Firmware FEC value into the ethtool value. */
1492 static inline unsigned int fwcap_to_eth_fec(unsigned int fw_fec)
1493 {
1494 	unsigned int eth_fec = 0;
1495 
1496 	if (fw_fec & FW_PORT_CAP32_FEC_RS)
1497 		eth_fec |= ETHTOOL_FEC_RS;
1498 	if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
1499 		eth_fec |= ETHTOOL_FEC_BASER;
1500 
1501 	/* if nothing is set, then FEC is off */
1502 	if (!eth_fec)
1503 		eth_fec = ETHTOOL_FEC_OFF;
1504 
1505 	return eth_fec;
1506 }
1507 
1508 /* Translate Common Code FEC value into ethtool value. */
1509 static inline unsigned int cc_to_eth_fec(unsigned int cc_fec)
1510 {
1511 	unsigned int eth_fec = 0;
1512 
1513 	if (cc_fec & FEC_AUTO)
1514 		eth_fec |= ETHTOOL_FEC_AUTO;
1515 	if (cc_fec & FEC_RS)
1516 		eth_fec |= ETHTOOL_FEC_RS;
1517 	if (cc_fec & FEC_BASER_RS)
1518 		eth_fec |= ETHTOOL_FEC_BASER;
1519 
1520 	/* if nothing is set, then FEC is off */
1521 	if (!eth_fec)
1522 		eth_fec = ETHTOOL_FEC_OFF;
1523 
1524 	return eth_fec;
1525 }
1526 
1527 static int cxgb4vf_get_fecparam(struct net_device *dev,
1528 				struct ethtool_fecparam *fec)
1529 {
1530 	const struct port_info *pi = netdev_priv(dev);
1531 	const struct link_config *lc = &pi->link_cfg;
1532 
1533 	/* Translate the Firmware FEC Support into the ethtool value.  We
1534 	 * always support IEEE 802.3 "automatic" selection of Link FEC type if
1535 	 * any FEC is supported.
1536 	 */
1537 	fec->fec = fwcap_to_eth_fec(lc->pcaps);
1538 	if (fec->fec != ETHTOOL_FEC_OFF)
1539 		fec->fec |= ETHTOOL_FEC_AUTO;
1540 
1541 	/* Translate the current internal FEC parameters into the
1542 	 * ethtool values.
1543 	 */
1544 	fec->active_fec = cc_to_eth_fec(lc->fec);
1545 	return 0;
1546 }
1547 
1548 /*
1549  * Return our driver information.
1550  */
1551 static void cxgb4vf_get_drvinfo(struct net_device *dev,
1552 				struct ethtool_drvinfo *drvinfo)
1553 {
1554 	struct adapter *adapter = netdev2adap(dev);
1555 
1556 	strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
1557 	strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
1558 		sizeof(drvinfo->bus_info));
1559 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1560 		 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1561 		 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.fwrev),
1562 		 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.fwrev),
1563 		 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.fwrev),
1564 		 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.fwrev),
1565 		 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.tprev),
1566 		 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.tprev),
1567 		 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.tprev),
1568 		 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.tprev));
1569 }
1570 
1571 /*
1572  * Return current adapter message level.
1573  */
1574 static u32 cxgb4vf_get_msglevel(struct net_device *dev)
1575 {
1576 	return netdev2adap(dev)->msg_enable;
1577 }
1578 
1579 /*
1580  * Set current adapter message level.
1581  */
1582 static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
1583 {
1584 	netdev2adap(dev)->msg_enable = msglevel;
1585 }
1586 
1587 /*
1588  * Return the device's current Queue Set ring size parameters along with the
1589  * allowed maximum values.  Since ethtool doesn't understand the concept of
1590  * multi-queue devices, we just return the current values associated with the
1591  * first Queue Set.
1592  */
1593 static void cxgb4vf_get_ringparam(struct net_device *dev,
1594 				  struct ethtool_ringparam *rp)
1595 {
1596 	const struct port_info *pi = netdev_priv(dev);
1597 	const struct sge *s = &pi->adapter->sge;
1598 
1599 	rp->rx_max_pending = MAX_RX_BUFFERS;
1600 	rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1601 	rp->rx_jumbo_max_pending = 0;
1602 	rp->tx_max_pending = MAX_TXQ_ENTRIES;
1603 
1604 	rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID;
1605 	rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1606 	rp->rx_jumbo_pending = 0;
1607 	rp->tx_pending = s->ethtxq[pi->first_qset].q.size;
1608 }
1609 
1610 /*
1611  * Set the Queue Set ring size parameters for the device.  Again, since
1612  * ethtool doesn't allow for the concept of multiple queues per device, we'll
1613  * apply these new values across all of the Queue Sets associated with the
1614  * device -- after vetting them of course!
1615  */
1616 static int cxgb4vf_set_ringparam(struct net_device *dev,
1617 				 struct ethtool_ringparam *rp)
1618 {
1619 	const struct port_info *pi = netdev_priv(dev);
1620 	struct adapter *adapter = pi->adapter;
1621 	struct sge *s = &adapter->sge;
1622 	int qs;
1623 
1624 	if (rp->rx_pending > MAX_RX_BUFFERS ||
1625 	    rp->rx_jumbo_pending ||
1626 	    rp->tx_pending > MAX_TXQ_ENTRIES ||
1627 	    rp->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1628 	    rp->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1629 	    rp->rx_pending < MIN_FL_ENTRIES ||
1630 	    rp->tx_pending < MIN_TXQ_ENTRIES)
1631 		return -EINVAL;
1632 
1633 	if (adapter->flags & CXGB4VF_FULL_INIT_DONE)
1634 		return -EBUSY;
1635 
1636 	for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) {
1637 		s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID;
1638 		s->ethrxq[qs].rspq.size = rp->rx_mini_pending;
1639 		s->ethtxq[qs].q.size = rp->tx_pending;
1640 	}
1641 	return 0;
1642 }
1643 
1644 /*
1645  * Return the interrupt holdoff timer and count for the first Queue Set on the
1646  * device.  Our extension ioctl() (the cxgbtool interface) allows the
1647  * interrupt holdoff timer to be read on all of the device's Queue Sets.
1648  */
1649 static int cxgb4vf_get_coalesce(struct net_device *dev,
1650 				struct ethtool_coalesce *coalesce)
1651 {
1652 	const struct port_info *pi = netdev_priv(dev);
1653 	const struct adapter *adapter = pi->adapter;
1654 	const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq;
1655 
1656 	coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
1657 	coalesce->rx_max_coalesced_frames =
1658 		((rspq->intr_params & QINTR_CNT_EN_F)
1659 		 ? adapter->sge.counter_val[rspq->pktcnt_idx]
1660 		 : 0);
1661 	return 0;
1662 }
1663 
1664 /*
1665  * Set the RX interrupt holdoff timer and count for the first Queue Set on the
1666  * interface.  Our extension ioctl() (the cxgbtool interface) allows us to set
1667  * the interrupt holdoff timer on any of the device's Queue Sets.
1668  */
1669 static int cxgb4vf_set_coalesce(struct net_device *dev,
1670 				struct ethtool_coalesce *coalesce)
1671 {
1672 	const struct port_info *pi = netdev_priv(dev);
1673 	struct adapter *adapter = pi->adapter;
1674 
1675 	return set_rxq_intr_params(adapter,
1676 				   &adapter->sge.ethrxq[pi->first_qset].rspq,
1677 				   coalesce->rx_coalesce_usecs,
1678 				   coalesce->rx_max_coalesced_frames);
1679 }
1680 
1681 /*
1682  * Report current port link pause parameter settings.
1683  */
1684 static void cxgb4vf_get_pauseparam(struct net_device *dev,
1685 				   struct ethtool_pauseparam *pauseparam)
1686 {
1687 	struct port_info *pi = netdev_priv(dev);
1688 
1689 	pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1690 	pauseparam->rx_pause = (pi->link_cfg.advertised_fc & PAUSE_RX) != 0;
1691 	pauseparam->tx_pause = (pi->link_cfg.advertised_fc & PAUSE_TX) != 0;
1692 }
1693 
1694 /*
1695  * Identify the port by blinking the port's LED.
1696  */
1697 static int cxgb4vf_phys_id(struct net_device *dev,
1698 			   enum ethtool_phys_id_state state)
1699 {
1700 	unsigned int val;
1701 	struct port_info *pi = netdev_priv(dev);
1702 
1703 	if (state == ETHTOOL_ID_ACTIVE)
1704 		val = 0xffff;
1705 	else if (state == ETHTOOL_ID_INACTIVE)
1706 		val = 0;
1707 	else
1708 		return -EINVAL;
1709 
1710 	return t4vf_identify_port(pi->adapter, pi->viid, val);
1711 }
1712 
1713 /*
1714  * Port stats maintained per queue of the port.
1715  */
1716 struct queue_port_stats {
1717 	u64 tso;
1718 	u64 tx_csum;
1719 	u64 rx_csum;
1720 	u64 vlan_ex;
1721 	u64 vlan_ins;
1722 	u64 lro_pkts;
1723 	u64 lro_merged;
1724 };
1725 
1726 /*
1727  * Strings for the ETH_SS_STATS statistics set ("ethtool -S").  Note that
1728  * these need to match the order of statistics returned by
1729  * t4vf_get_port_stats().
1730  */
1731 static const char stats_strings[][ETH_GSTRING_LEN] = {
1732 	/*
1733 	 * These must match the layout of the t4vf_port_stats structure.
1734 	 */
1735 	"TxBroadcastBytes  ",
1736 	"TxBroadcastFrames ",
1737 	"TxMulticastBytes  ",
1738 	"TxMulticastFrames ",
1739 	"TxUnicastBytes    ",
1740 	"TxUnicastFrames   ",
1741 	"TxDroppedFrames   ",
1742 	"TxOffloadBytes    ",
1743 	"TxOffloadFrames   ",
1744 	"RxBroadcastBytes  ",
1745 	"RxBroadcastFrames ",
1746 	"RxMulticastBytes  ",
1747 	"RxMulticastFrames ",
1748 	"RxUnicastBytes    ",
1749 	"RxUnicastFrames   ",
1750 	"RxErrorFrames     ",
1751 
1752 	/*
1753 	 * These are accumulated per-queue statistics and must match the
1754 	 * order of the fields in the queue_port_stats structure.
1755 	 */
1756 	"TSO               ",
1757 	"TxCsumOffload     ",
1758 	"RxCsumGood        ",
1759 	"VLANextractions   ",
1760 	"VLANinsertions    ",
1761 	"GROPackets        ",
1762 	"GROMerged         ",
1763 };
1764 
1765 /*
1766  * Return the number of statistics in the specified statistics set.
1767  */
1768 static int cxgb4vf_get_sset_count(struct net_device *dev, int sset)
1769 {
1770 	switch (sset) {
1771 	case ETH_SS_STATS:
1772 		return ARRAY_SIZE(stats_strings);
1773 	default:
1774 		return -EOPNOTSUPP;
1775 	}
1776 	/*NOTREACHED*/
1777 }
1778 
1779 /*
1780  * Return the strings for the specified statistics set.
1781  */
1782 static void cxgb4vf_get_strings(struct net_device *dev,
1783 				u32 sset,
1784 				u8 *data)
1785 {
1786 	switch (sset) {
1787 	case ETH_SS_STATS:
1788 		memcpy(data, stats_strings, sizeof(stats_strings));
1789 		break;
1790 	}
1791 }
1792 
1793 /*
1794  * Small utility routine to accumulate queue statistics across the queues of
1795  * a "port".
1796  */
1797 static void collect_sge_port_stats(const struct adapter *adapter,
1798 				   const struct port_info *pi,
1799 				   struct queue_port_stats *stats)
1800 {
1801 	const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset];
1802 	const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
1803 	int qs;
1804 
1805 	memset(stats, 0, sizeof(*stats));
1806 	for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
1807 		stats->tso += txq->tso;
1808 		stats->tx_csum += txq->tx_cso;
1809 		stats->rx_csum += rxq->stats.rx_cso;
1810 		stats->vlan_ex += rxq->stats.vlan_ex;
1811 		stats->vlan_ins += txq->vlan_ins;
1812 		stats->lro_pkts += rxq->stats.lro_pkts;
1813 		stats->lro_merged += rxq->stats.lro_merged;
1814 	}
1815 }
1816 
1817 /*
1818  * Return the ETH_SS_STATS statistics set.
1819  */
1820 static void cxgb4vf_get_ethtool_stats(struct net_device *dev,
1821 				      struct ethtool_stats *stats,
1822 				      u64 *data)
1823 {
1824 	struct port_info *pi = netdev2pinfo(dev);
1825 	struct adapter *adapter = pi->adapter;
1826 	int err = t4vf_get_port_stats(adapter, pi->pidx,
1827 				      (struct t4vf_port_stats *)data);
1828 	if (err)
1829 		memset(data, 0, sizeof(struct t4vf_port_stats));
1830 
1831 	data += sizeof(struct t4vf_port_stats) / sizeof(u64);
1832 	collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1833 }
1834 
1835 /*
1836  * Return the size of our register map.
1837  */
1838 static int cxgb4vf_get_regs_len(struct net_device *dev)
1839 {
1840 	return T4VF_REGMAP_SIZE;
1841 }
1842 
1843 /*
1844  * Dump a block of registers, start to end inclusive, into a buffer.
1845  */
1846 static void reg_block_dump(struct adapter *adapter, void *regbuf,
1847 			   unsigned int start, unsigned int end)
1848 {
1849 	u32 *bp = regbuf + start - T4VF_REGMAP_START;
1850 
1851 	for ( ; start <= end; start += sizeof(u32)) {
1852 		/*
1853 		 * Avoid reading the Mailbox Control register since that
1854 		 * can trigger a Mailbox Ownership Arbitration cycle and
1855 		 * interfere with communication with the firmware.
1856 		 */
1857 		if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL)
1858 			*bp++ = 0xffff;
1859 		else
1860 			*bp++ = t4_read_reg(adapter, start);
1861 	}
1862 }
1863 
1864 /*
1865  * Copy our entire register map into the provided buffer.
1866  */
1867 static void cxgb4vf_get_regs(struct net_device *dev,
1868 			     struct ethtool_regs *regs,
1869 			     void *regbuf)
1870 {
1871 	struct adapter *adapter = netdev2adap(dev);
1872 
1873 	regs->version = mk_adap_vers(adapter);
1874 
1875 	/*
1876 	 * Fill in register buffer with our register map.
1877 	 */
1878 	memset(regbuf, 0, T4VF_REGMAP_SIZE);
1879 
1880 	reg_block_dump(adapter, regbuf,
1881 		       T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST,
1882 		       T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST);
1883 	reg_block_dump(adapter, regbuf,
1884 		       T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST,
1885 		       T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST);
1886 
1887 	/* T5 adds new registers in the PL Register map.
1888 	 */
1889 	reg_block_dump(adapter, regbuf,
1890 		       T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
1891 		       T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
1892 		       ? PL_VF_WHOAMI_A : PL_VF_REVISION_A));
1893 	reg_block_dump(adapter, regbuf,
1894 		       T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
1895 		       T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
1896 
1897 	reg_block_dump(adapter, regbuf,
1898 		       T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST,
1899 		       T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST);
1900 }
1901 
1902 /*
1903  * Report current Wake On LAN settings.
1904  */
1905 static void cxgb4vf_get_wol(struct net_device *dev,
1906 			    struct ethtool_wolinfo *wol)
1907 {
1908 	wol->supported = 0;
1909 	wol->wolopts = 0;
1910 	memset(&wol->sopass, 0, sizeof(wol->sopass));
1911 }
1912 
1913 /*
1914  * TCP Segmentation Offload flags which we support.
1915  */
1916 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1917 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
1918 		   NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
1919 
1920 static const struct ethtool_ops cxgb4vf_ethtool_ops = {
1921 	.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
1922 				     ETHTOOL_COALESCE_RX_MAX_FRAMES,
1923 	.get_link_ksettings	= cxgb4vf_get_link_ksettings,
1924 	.get_fecparam		= cxgb4vf_get_fecparam,
1925 	.get_drvinfo		= cxgb4vf_get_drvinfo,
1926 	.get_msglevel		= cxgb4vf_get_msglevel,
1927 	.set_msglevel		= cxgb4vf_set_msglevel,
1928 	.get_ringparam		= cxgb4vf_get_ringparam,
1929 	.set_ringparam		= cxgb4vf_set_ringparam,
1930 	.get_coalesce		= cxgb4vf_get_coalesce,
1931 	.set_coalesce		= cxgb4vf_set_coalesce,
1932 	.get_pauseparam		= cxgb4vf_get_pauseparam,
1933 	.get_link		= ethtool_op_get_link,
1934 	.get_strings		= cxgb4vf_get_strings,
1935 	.set_phys_id		= cxgb4vf_phys_id,
1936 	.get_sset_count		= cxgb4vf_get_sset_count,
1937 	.get_ethtool_stats	= cxgb4vf_get_ethtool_stats,
1938 	.get_regs_len		= cxgb4vf_get_regs_len,
1939 	.get_regs		= cxgb4vf_get_regs,
1940 	.get_wol		= cxgb4vf_get_wol,
1941 };
1942 
1943 /*
1944  * /sys/kernel/debug/cxgb4vf support code and data.
1945  * ================================================
1946  */
1947 
1948 /*
1949  * Show Firmware Mailbox Command/Reply Log
1950  *
1951  * Note that we don't do any locking when dumping the Firmware Mailbox Log so
1952  * it's possible that we can catch things during a log update and therefore
1953  * see partially corrupted log entries.  But i9t's probably Good Enough(tm).
1954  * If we ever decide that we want to make sure that we're dumping a coherent
1955  * log, we'd need to perform locking in the mailbox logging and in
1956  * mboxlog_open() where we'd need to grab the entire mailbox log in one go
1957  * like we do for the Firmware Device Log.  But as stated above, meh ...
1958  */
1959 static int mboxlog_show(struct seq_file *seq, void *v)
1960 {
1961 	struct adapter *adapter = seq->private;
1962 	struct mbox_cmd_log *log = adapter->mbox_log;
1963 	struct mbox_cmd *entry;
1964 	int entry_idx, i;
1965 
1966 	if (v == SEQ_START_TOKEN) {
1967 		seq_printf(seq,
1968 			   "%10s  %15s  %5s  %5s  %s\n",
1969 			   "Seq#", "Tstamp", "Atime", "Etime",
1970 			   "Command/Reply");
1971 		return 0;
1972 	}
1973 
1974 	entry_idx = log->cursor + ((uintptr_t)v - 2);
1975 	if (entry_idx >= log->size)
1976 		entry_idx -= log->size;
1977 	entry = mbox_cmd_log_entry(log, entry_idx);
1978 
1979 	/* skip over unused entries */
1980 	if (entry->timestamp == 0)
1981 		return 0;
1982 
1983 	seq_printf(seq, "%10u  %15llu  %5d  %5d",
1984 		   entry->seqno, entry->timestamp,
1985 		   entry->access, entry->execute);
1986 	for (i = 0; i < MBOX_LEN / 8; i++) {
1987 		u64 flit = entry->cmd[i];
1988 		u32 hi = (u32)(flit >> 32);
1989 		u32 lo = (u32)flit;
1990 
1991 		seq_printf(seq, "  %08x %08x", hi, lo);
1992 	}
1993 	seq_puts(seq, "\n");
1994 	return 0;
1995 }
1996 
1997 static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos)
1998 {
1999 	struct adapter *adapter = seq->private;
2000 	struct mbox_cmd_log *log = adapter->mbox_log;
2001 
2002 	return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL);
2003 }
2004 
2005 static void *mboxlog_start(struct seq_file *seq, loff_t *pos)
2006 {
2007 	return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN;
2008 }
2009 
2010 static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos)
2011 {
2012 	++*pos;
2013 	return mboxlog_get_idx(seq, *pos);
2014 }
2015 
2016 static void mboxlog_stop(struct seq_file *seq, void *v)
2017 {
2018 }
2019 
2020 static const struct seq_operations mboxlog_seq_ops = {
2021 	.start = mboxlog_start,
2022 	.next  = mboxlog_next,
2023 	.stop  = mboxlog_stop,
2024 	.show  = mboxlog_show
2025 };
2026 
2027 static int mboxlog_open(struct inode *inode, struct file *file)
2028 {
2029 	int res = seq_open(file, &mboxlog_seq_ops);
2030 
2031 	if (!res) {
2032 		struct seq_file *seq = file->private_data;
2033 
2034 		seq->private = inode->i_private;
2035 	}
2036 	return res;
2037 }
2038 
2039 static const struct file_operations mboxlog_fops = {
2040 	.owner   = THIS_MODULE,
2041 	.open    = mboxlog_open,
2042 	.read    = seq_read,
2043 	.llseek  = seq_lseek,
2044 	.release = seq_release,
2045 };
2046 
2047 /*
2048  * Show SGE Queue Set information.  We display QPL Queues Sets per line.
2049  */
2050 #define QPL	4
2051 
2052 static int sge_qinfo_show(struct seq_file *seq, void *v)
2053 {
2054 	struct adapter *adapter = seq->private;
2055 	int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
2056 	int qs, r = (uintptr_t)v - 1;
2057 
2058 	if (r)
2059 		seq_putc(seq, '\n');
2060 
2061 	#define S3(fmt_spec, s, v) \
2062 		do {\
2063 			seq_printf(seq, "%-12s", s); \
2064 			for (qs = 0; qs < n; ++qs) \
2065 				seq_printf(seq, " %16" fmt_spec, v); \
2066 			seq_putc(seq, '\n'); \
2067 		} while (0)
2068 	#define S(s, v)		S3("s", s, v)
2069 	#define T(s, v)		S3("u", s, txq[qs].v)
2070 	#define R(s, v)		S3("u", s, rxq[qs].v)
2071 
2072 	if (r < eth_entries) {
2073 		const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
2074 		const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
2075 		int n = min(QPL, adapter->sge.ethqsets - QPL * r);
2076 
2077 		S("QType:", "Ethernet");
2078 		S("Interface:",
2079 		  (rxq[qs].rspq.netdev
2080 		   ? rxq[qs].rspq.netdev->name
2081 		   : "N/A"));
2082 		S3("d", "Port:",
2083 		   (rxq[qs].rspq.netdev
2084 		    ? ((struct port_info *)
2085 		       netdev_priv(rxq[qs].rspq.netdev))->port_id
2086 		    : -1));
2087 		T("TxQ ID:", q.abs_id);
2088 		T("TxQ size:", q.size);
2089 		T("TxQ inuse:", q.in_use);
2090 		T("TxQ PIdx:", q.pidx);
2091 		T("TxQ CIdx:", q.cidx);
2092 		R("RspQ ID:", rspq.abs_id);
2093 		R("RspQ size:", rspq.size);
2094 		R("RspQE size:", rspq.iqe_len);
2095 		S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq));
2096 		S3("u", "Intr pktcnt:",
2097 		   adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]);
2098 		R("RspQ CIdx:", rspq.cidx);
2099 		R("RspQ Gen:", rspq.gen);
2100 		R("FL ID:", fl.abs_id);
2101 		R("FL size:", fl.size - MIN_FL_RESID);
2102 		R("FL avail:", fl.avail);
2103 		R("FL PIdx:", fl.pidx);
2104 		R("FL CIdx:", fl.cidx);
2105 		return 0;
2106 	}
2107 
2108 	r -= eth_entries;
2109 	if (r == 0) {
2110 		const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
2111 
2112 		seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
2113 		seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
2114 		seq_printf(seq, "%-12s %16u\n", "Intr delay:",
2115 			   qtimer_val(adapter, evtq));
2116 		seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
2117 			   adapter->sge.counter_val[evtq->pktcnt_idx]);
2118 		seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx);
2119 		seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
2120 	} else if (r == 1) {
2121 		const struct sge_rspq *intrq = &adapter->sge.intrq;
2122 
2123 		seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue");
2124 		seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id);
2125 		seq_printf(seq, "%-12s %16u\n", "Intr delay:",
2126 			   qtimer_val(adapter, intrq));
2127 		seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
2128 			   adapter->sge.counter_val[intrq->pktcnt_idx]);
2129 		seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx);
2130 		seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen);
2131 	}
2132 
2133 	#undef R
2134 	#undef T
2135 	#undef S
2136 	#undef S3
2137 
2138 	return 0;
2139 }
2140 
2141 /*
2142  * Return the number of "entries" in our "file".  We group the multi-Queue
2143  * sections with QPL Queue Sets per "entry".  The sections of the output are:
2144  *
2145  *     Ethernet RX/TX Queue Sets
2146  *     Firmware Event Queue
2147  *     Forwarded Interrupt Queue (if in MSI mode)
2148  */
2149 static int sge_queue_entries(const struct adapter *adapter)
2150 {
2151 	return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
2152 		((adapter->flags & CXGB4VF_USING_MSI) != 0);
2153 }
2154 
2155 static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
2156 {
2157 	int entries = sge_queue_entries(seq->private);
2158 
2159 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2160 }
2161 
2162 static void sge_queue_stop(struct seq_file *seq, void *v)
2163 {
2164 }
2165 
2166 static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
2167 {
2168 	int entries = sge_queue_entries(seq->private);
2169 
2170 	++*pos;
2171 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2172 }
2173 
2174 static const struct seq_operations sge_qinfo_seq_ops = {
2175 	.start = sge_queue_start,
2176 	.next  = sge_queue_next,
2177 	.stop  = sge_queue_stop,
2178 	.show  = sge_qinfo_show
2179 };
2180 
2181 static int sge_qinfo_open(struct inode *inode, struct file *file)
2182 {
2183 	int res = seq_open(file, &sge_qinfo_seq_ops);
2184 
2185 	if (!res) {
2186 		struct seq_file *seq = file->private_data;
2187 		seq->private = inode->i_private;
2188 	}
2189 	return res;
2190 }
2191 
2192 static const struct file_operations sge_qinfo_debugfs_fops = {
2193 	.owner   = THIS_MODULE,
2194 	.open    = sge_qinfo_open,
2195 	.read    = seq_read,
2196 	.llseek  = seq_lseek,
2197 	.release = seq_release,
2198 };
2199 
2200 /*
2201  * Show SGE Queue Set statistics.  We display QPL Queues Sets per line.
2202  */
2203 #define QPL	4
2204 
2205 static int sge_qstats_show(struct seq_file *seq, void *v)
2206 {
2207 	struct adapter *adapter = seq->private;
2208 	int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
2209 	int qs, r = (uintptr_t)v - 1;
2210 
2211 	if (r)
2212 		seq_putc(seq, '\n');
2213 
2214 	#define S3(fmt, s, v) \
2215 		do { \
2216 			seq_printf(seq, "%-16s", s); \
2217 			for (qs = 0; qs < n; ++qs) \
2218 				seq_printf(seq, " %8" fmt, v); \
2219 			seq_putc(seq, '\n'); \
2220 		} while (0)
2221 	#define S(s, v)		S3("s", s, v)
2222 
2223 	#define T3(fmt, s, v)	S3(fmt, s, txq[qs].v)
2224 	#define T(s, v)		T3("lu", s, v)
2225 
2226 	#define R3(fmt, s, v)	S3(fmt, s, rxq[qs].v)
2227 	#define R(s, v)		R3("lu", s, v)
2228 
2229 	if (r < eth_entries) {
2230 		const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
2231 		const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
2232 		int n = min(QPL, adapter->sge.ethqsets - QPL * r);
2233 
2234 		S("QType:", "Ethernet");
2235 		S("Interface:",
2236 		  (rxq[qs].rspq.netdev
2237 		   ? rxq[qs].rspq.netdev->name
2238 		   : "N/A"));
2239 		R3("u", "RspQNullInts:", rspq.unhandled_irqs);
2240 		R("RxPackets:", stats.pkts);
2241 		R("RxCSO:", stats.rx_cso);
2242 		R("VLANxtract:", stats.vlan_ex);
2243 		R("LROmerged:", stats.lro_merged);
2244 		R("LROpackets:", stats.lro_pkts);
2245 		R("RxDrops:", stats.rx_drops);
2246 		T("TSO:", tso);
2247 		T("TxCSO:", tx_cso);
2248 		T("VLANins:", vlan_ins);
2249 		T("TxQFull:", q.stops);
2250 		T("TxQRestarts:", q.restarts);
2251 		T("TxMapErr:", mapping_err);
2252 		R("FLAllocErr:", fl.alloc_failed);
2253 		R("FLLrgAlcErr:", fl.large_alloc_failed);
2254 		R("FLStarving:", fl.starving);
2255 		return 0;
2256 	}
2257 
2258 	r -= eth_entries;
2259 	if (r == 0) {
2260 		const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
2261 
2262 		seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue");
2263 		seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
2264 			   evtq->unhandled_irqs);
2265 		seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx);
2266 		seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen);
2267 	} else if (r == 1) {
2268 		const struct sge_rspq *intrq = &adapter->sge.intrq;
2269 
2270 		seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue");
2271 		seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
2272 			   intrq->unhandled_irqs);
2273 		seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx);
2274 		seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen);
2275 	}
2276 
2277 	#undef R
2278 	#undef T
2279 	#undef S
2280 	#undef R3
2281 	#undef T3
2282 	#undef S3
2283 
2284 	return 0;
2285 }
2286 
2287 /*
2288  * Return the number of "entries" in our "file".  We group the multi-Queue
2289  * sections with QPL Queue Sets per "entry".  The sections of the output are:
2290  *
2291  *     Ethernet RX/TX Queue Sets
2292  *     Firmware Event Queue
2293  *     Forwarded Interrupt Queue (if in MSI mode)
2294  */
2295 static int sge_qstats_entries(const struct adapter *adapter)
2296 {
2297 	return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
2298 		((adapter->flags & CXGB4VF_USING_MSI) != 0);
2299 }
2300 
2301 static void *sge_qstats_start(struct seq_file *seq, loff_t *pos)
2302 {
2303 	int entries = sge_qstats_entries(seq->private);
2304 
2305 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2306 }
2307 
2308 static void sge_qstats_stop(struct seq_file *seq, void *v)
2309 {
2310 }
2311 
2312 static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos)
2313 {
2314 	int entries = sge_qstats_entries(seq->private);
2315 
2316 	(*pos)++;
2317 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2318 }
2319 
2320 static const struct seq_operations sge_qstats_seq_ops = {
2321 	.start = sge_qstats_start,
2322 	.next  = sge_qstats_next,
2323 	.stop  = sge_qstats_stop,
2324 	.show  = sge_qstats_show
2325 };
2326 
2327 static int sge_qstats_open(struct inode *inode, struct file *file)
2328 {
2329 	int res = seq_open(file, &sge_qstats_seq_ops);
2330 
2331 	if (res == 0) {
2332 		struct seq_file *seq = file->private_data;
2333 		seq->private = inode->i_private;
2334 	}
2335 	return res;
2336 }
2337 
2338 static const struct file_operations sge_qstats_proc_fops = {
2339 	.owner   = THIS_MODULE,
2340 	.open    = sge_qstats_open,
2341 	.read    = seq_read,
2342 	.llseek  = seq_lseek,
2343 	.release = seq_release,
2344 };
2345 
2346 /*
2347  * Show PCI-E SR-IOV Virtual Function Resource Limits.
2348  */
2349 static int resources_show(struct seq_file *seq, void *v)
2350 {
2351 	struct adapter *adapter = seq->private;
2352 	struct vf_resources *vfres = &adapter->params.vfres;
2353 
2354 	#define S(desc, fmt, var) \
2355 		seq_printf(seq, "%-60s " fmt "\n", \
2356 			   desc " (" #var "):", vfres->var)
2357 
2358 	S("Virtual Interfaces", "%d", nvi);
2359 	S("Egress Queues", "%d", neq);
2360 	S("Ethernet Control", "%d", nethctrl);
2361 	S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
2362 	S("Ingress Queues", "%d", niq);
2363 	S("Traffic Class", "%d", tc);
2364 	S("Port Access Rights Mask", "%#x", pmask);
2365 	S("MAC Address Filters", "%d", nexactf);
2366 	S("Firmware Command Read Capabilities", "%#x", r_caps);
2367 	S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
2368 
2369 	#undef S
2370 
2371 	return 0;
2372 }
2373 DEFINE_SHOW_ATTRIBUTE(resources);
2374 
2375 /*
2376  * Show Virtual Interfaces.
2377  */
2378 static int interfaces_show(struct seq_file *seq, void *v)
2379 {
2380 	if (v == SEQ_START_TOKEN) {
2381 		seq_puts(seq, "Interface  Port   VIID\n");
2382 	} else {
2383 		struct adapter *adapter = seq->private;
2384 		int pidx = (uintptr_t)v - 2;
2385 		struct net_device *dev = adapter->port[pidx];
2386 		struct port_info *pi = netdev_priv(dev);
2387 
2388 		seq_printf(seq, "%9s  %4d  %#5x\n",
2389 			   dev->name, pi->port_id, pi->viid);
2390 	}
2391 	return 0;
2392 }
2393 
2394 static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos)
2395 {
2396 	return pos <= adapter->params.nports
2397 		? (void *)(uintptr_t)(pos + 1)
2398 		: NULL;
2399 }
2400 
2401 static void *interfaces_start(struct seq_file *seq, loff_t *pos)
2402 {
2403 	return *pos
2404 		? interfaces_get_idx(seq->private, *pos)
2405 		: SEQ_START_TOKEN;
2406 }
2407 
2408 static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos)
2409 {
2410 	(*pos)++;
2411 	return interfaces_get_idx(seq->private, *pos);
2412 }
2413 
2414 static void interfaces_stop(struct seq_file *seq, void *v)
2415 {
2416 }
2417 
2418 static const struct seq_operations interfaces_seq_ops = {
2419 	.start = interfaces_start,
2420 	.next  = interfaces_next,
2421 	.stop  = interfaces_stop,
2422 	.show  = interfaces_show
2423 };
2424 
2425 static int interfaces_open(struct inode *inode, struct file *file)
2426 {
2427 	int res = seq_open(file, &interfaces_seq_ops);
2428 
2429 	if (res == 0) {
2430 		struct seq_file *seq = file->private_data;
2431 		seq->private = inode->i_private;
2432 	}
2433 	return res;
2434 }
2435 
2436 static const struct file_operations interfaces_proc_fops = {
2437 	.owner   = THIS_MODULE,
2438 	.open    = interfaces_open,
2439 	.read    = seq_read,
2440 	.llseek  = seq_lseek,
2441 	.release = seq_release,
2442 };
2443 
2444 /*
2445  * /sys/kernel/debugfs/cxgb4vf/ files list.
2446  */
2447 struct cxgb4vf_debugfs_entry {
2448 	const char *name;		/* name of debugfs node */
2449 	umode_t mode;			/* file system mode */
2450 	const struct file_operations *fops;
2451 };
2452 
2453 static struct cxgb4vf_debugfs_entry debugfs_files[] = {
2454 	{ "mboxlog",    0444, &mboxlog_fops },
2455 	{ "sge_qinfo",  0444, &sge_qinfo_debugfs_fops },
2456 	{ "sge_qstats", 0444, &sge_qstats_proc_fops },
2457 	{ "resources",  0444, &resources_fops },
2458 	{ "interfaces", 0444, &interfaces_proc_fops },
2459 };
2460 
2461 /*
2462  * Module and device initialization and cleanup code.
2463  * ==================================================
2464  */
2465 
2466 /*
2467  * Set up out /sys/kernel/debug/cxgb4vf sub-nodes.  We assume that the
2468  * directory (debugfs_root) has already been set up.
2469  */
2470 static int setup_debugfs(struct adapter *adapter)
2471 {
2472 	int i;
2473 
2474 	BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2475 
2476 	/*
2477 	 * Debugfs support is best effort.
2478 	 */
2479 	for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
2480 		debugfs_create_file(debugfs_files[i].name,
2481 				    debugfs_files[i].mode,
2482 				    adapter->debugfs_root, adapter,
2483 				    debugfs_files[i].fops);
2484 
2485 	return 0;
2486 }
2487 
2488 /*
2489  * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above.  We leave
2490  * it to our caller to tear down the directory (debugfs_root).
2491  */
2492 static void cleanup_debugfs(struct adapter *adapter)
2493 {
2494 	BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2495 
2496 	/*
2497 	 * Unlike our sister routine cleanup_proc(), we don't need to remove
2498 	 * individual entries because a call will be made to
2499 	 * debugfs_remove_recursive().  We just need to clean up any ancillary
2500 	 * persistent state.
2501 	 */
2502 	/* nothing to do */
2503 }
2504 
2505 /* Figure out how many Ports and Queue Sets we can support.  This depends on
2506  * knowing our Virtual Function Resources and may be called a second time if
2507  * we fall back from MSI-X to MSI Interrupt Mode.
2508  */
2509 static void size_nports_qsets(struct adapter *adapter)
2510 {
2511 	struct vf_resources *vfres = &adapter->params.vfres;
2512 	unsigned int ethqsets, pmask_nports;
2513 
2514 	/* The number of "ports" which we support is equal to the number of
2515 	 * Virtual Interfaces with which we've been provisioned.
2516 	 */
2517 	adapter->params.nports = vfres->nvi;
2518 	if (adapter->params.nports > MAX_NPORTS) {
2519 		dev_warn(adapter->pdev_dev, "only using %d of %d maximum"
2520 			 " allowed virtual interfaces\n", MAX_NPORTS,
2521 			 adapter->params.nports);
2522 		adapter->params.nports = MAX_NPORTS;
2523 	}
2524 
2525 	/* We may have been provisioned with more VIs than the number of
2526 	 * ports we're allowed to access (our Port Access Rights Mask).
2527 	 * This is obviously a configuration conflict but we don't want to
2528 	 * crash the kernel or anything silly just because of that.
2529 	 */
2530 	pmask_nports = hweight32(adapter->params.vfres.pmask);
2531 	if (pmask_nports < adapter->params.nports) {
2532 		dev_warn(adapter->pdev_dev, "only using %d of %d provisioned"
2533 			 " virtual interfaces; limited by Port Access Rights"
2534 			 " mask %#x\n", pmask_nports, adapter->params.nports,
2535 			 adapter->params.vfres.pmask);
2536 		adapter->params.nports = pmask_nports;
2537 	}
2538 
2539 	/* We need to reserve an Ingress Queue for the Asynchronous Firmware
2540 	 * Event Queue.  And if we're using MSI Interrupts, we'll also need to
2541 	 * reserve an Ingress Queue for a Forwarded Interrupts.
2542 	 *
2543 	 * The rest of the FL/Intr-capable ingress queues will be matched up
2544 	 * one-for-one with Ethernet/Control egress queues in order to form
2545 	 * "Queue Sets" which will be aportioned between the "ports".  For
2546 	 * each Queue Set, we'll need the ability to allocate two Egress
2547 	 * Contexts -- one for the Ingress Queue Free List and one for the TX
2548 	 * Ethernet Queue.
2549 	 *
2550 	 * Note that even if we're currently configured to use MSI-X
2551 	 * Interrupts (module variable msi == MSI_MSIX) we may get downgraded
2552 	 * to MSI Interrupts if we can't get enough MSI-X Interrupts.  If that
2553 	 * happens we'll need to adjust things later.
2554 	 */
2555 	ethqsets = vfres->niqflint - 1 - (msi == MSI_MSI);
2556 	if (vfres->nethctrl != ethqsets)
2557 		ethqsets = min(vfres->nethctrl, ethqsets);
2558 	if (vfres->neq < ethqsets*2)
2559 		ethqsets = vfres->neq/2;
2560 	if (ethqsets > MAX_ETH_QSETS)
2561 		ethqsets = MAX_ETH_QSETS;
2562 	adapter->sge.max_ethqsets = ethqsets;
2563 
2564 	if (adapter->sge.max_ethqsets < adapter->params.nports) {
2565 		dev_warn(adapter->pdev_dev, "only using %d of %d available"
2566 			 " virtual interfaces (too few Queue Sets)\n",
2567 			 adapter->sge.max_ethqsets, adapter->params.nports);
2568 		adapter->params.nports = adapter->sge.max_ethqsets;
2569 	}
2570 }
2571 
2572 /*
2573  * Perform early "adapter" initialization.  This is where we discover what
2574  * adapter parameters we're going to be using and initialize basic adapter
2575  * hardware support.
2576  */
2577 static int adap_init0(struct adapter *adapter)
2578 {
2579 	struct sge_params *sge_params = &adapter->params.sge;
2580 	struct sge *s = &adapter->sge;
2581 	int err;
2582 	u32 param, val = 0;
2583 
2584 	/*
2585 	 * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
2586 	 * 2.6.31 and later we can't call pci_reset_function() in order to
2587 	 * issue an FLR because of a self- deadlock on the device semaphore.
2588 	 * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
2589 	 * cases where they're needed -- for instance, some versions of KVM
2590 	 * fail to reset "Assigned Devices" when the VM reboots.  Therefore we
2591 	 * use the firmware based reset in order to reset any per function
2592 	 * state.
2593 	 */
2594 	err = t4vf_fw_reset(adapter);
2595 	if (err < 0) {
2596 		dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
2597 		return err;
2598 	}
2599 
2600 	/*
2601 	 * Grab basic operational parameters.  These will predominantly have
2602 	 * been set up by the Physical Function Driver or will be hard coded
2603 	 * into the adapter.  We just have to live with them ...  Note that
2604 	 * we _must_ get our VPD parameters before our SGE parameters because
2605 	 * we need to know the adapter's core clock from the VPD in order to
2606 	 * properly decode the SGE Timer Values.
2607 	 */
2608 	err = t4vf_get_dev_params(adapter);
2609 	if (err) {
2610 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2611 			" device parameters: err=%d\n", err);
2612 		return err;
2613 	}
2614 	err = t4vf_get_vpd_params(adapter);
2615 	if (err) {
2616 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2617 			" VPD parameters: err=%d\n", err);
2618 		return err;
2619 	}
2620 	err = t4vf_get_sge_params(adapter);
2621 	if (err) {
2622 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2623 			" SGE parameters: err=%d\n", err);
2624 		return err;
2625 	}
2626 	err = t4vf_get_rss_glb_config(adapter);
2627 	if (err) {
2628 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2629 			" RSS parameters: err=%d\n", err);
2630 		return err;
2631 	}
2632 	if (adapter->params.rss.mode !=
2633 	    FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2634 		dev_err(adapter->pdev_dev, "unable to operate with global RSS"
2635 			" mode %d\n", adapter->params.rss.mode);
2636 		return -EINVAL;
2637 	}
2638 	err = t4vf_sge_init(adapter);
2639 	if (err) {
2640 		dev_err(adapter->pdev_dev, "unable to use adapter parameters:"
2641 			" err=%d\n", err);
2642 		return err;
2643 	}
2644 
2645 	/* If we're running on newer firmware, let it know that we're
2646 	 * prepared to deal with encapsulated CPL messages.  Older
2647 	 * firmware won't understand this and we'll just get
2648 	 * unencapsulated messages ...
2649 	 */
2650 	param = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
2651 		FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
2652 	val = 1;
2653 	(void) t4vf_set_params(adapter, 1, &param, &val);
2654 
2655 	/*
2656 	 * Retrieve our RX interrupt holdoff timer values and counter
2657 	 * threshold values from the SGE parameters.
2658 	 */
2659 	s->timer_val[0] = core_ticks_to_us(adapter,
2660 		TIMERVALUE0_G(sge_params->sge_timer_value_0_and_1));
2661 	s->timer_val[1] = core_ticks_to_us(adapter,
2662 		TIMERVALUE1_G(sge_params->sge_timer_value_0_and_1));
2663 	s->timer_val[2] = core_ticks_to_us(adapter,
2664 		TIMERVALUE0_G(sge_params->sge_timer_value_2_and_3));
2665 	s->timer_val[3] = core_ticks_to_us(adapter,
2666 		TIMERVALUE1_G(sge_params->sge_timer_value_2_and_3));
2667 	s->timer_val[4] = core_ticks_to_us(adapter,
2668 		TIMERVALUE0_G(sge_params->sge_timer_value_4_and_5));
2669 	s->timer_val[5] = core_ticks_to_us(adapter,
2670 		TIMERVALUE1_G(sge_params->sge_timer_value_4_and_5));
2671 
2672 	s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold);
2673 	s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold);
2674 	s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold);
2675 	s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold);
2676 
2677 	/*
2678 	 * Grab our Virtual Interface resource allocation, extract the
2679 	 * features that we're interested in and do a bit of sanity testing on
2680 	 * what we discover.
2681 	 */
2682 	err = t4vf_get_vfres(adapter);
2683 	if (err) {
2684 		dev_err(adapter->pdev_dev, "unable to get virtual interface"
2685 			" resources: err=%d\n", err);
2686 		return err;
2687 	}
2688 
2689 	/* Check for various parameter sanity issues */
2690 	if (adapter->params.vfres.pmask == 0) {
2691 		dev_err(adapter->pdev_dev, "no port access configured\n"
2692 			"usable!\n");
2693 		return -EINVAL;
2694 	}
2695 	if (adapter->params.vfres.nvi == 0) {
2696 		dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
2697 			"usable!\n");
2698 		return -EINVAL;
2699 	}
2700 
2701 	/* Initialize nports and max_ethqsets now that we have our Virtual
2702 	 * Function Resources.
2703 	 */
2704 	size_nports_qsets(adapter);
2705 
2706 	adapter->flags |= CXGB4VF_FW_OK;
2707 	return 0;
2708 }
2709 
2710 static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
2711 			     u8 pkt_cnt_idx, unsigned int size,
2712 			     unsigned int iqe_size)
2713 {
2714 	rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
2715 			     (pkt_cnt_idx < SGE_NCOUNTERS ?
2716 			      QINTR_CNT_EN_F : 0));
2717 	rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
2718 			    ? pkt_cnt_idx
2719 			    : 0);
2720 	rspq->iqe_len = iqe_size;
2721 	rspq->size = size;
2722 }
2723 
2724 /*
2725  * Perform default configuration of DMA queues depending on the number and
2726  * type of ports we found and the number of available CPUs.  Most settings can
2727  * be modified by the admin via ethtool and cxgbtool prior to the adapter
2728  * being brought up for the first time.
2729  */
2730 static void cfg_queues(struct adapter *adapter)
2731 {
2732 	struct sge *s = &adapter->sge;
2733 	int q10g, n10g, qidx, pidx, qs;
2734 	size_t iqe_size;
2735 
2736 	/*
2737 	 * We should not be called till we know how many Queue Sets we can
2738 	 * support.  In particular, this means that we need to know what kind
2739 	 * of interrupts we'll be using ...
2740 	 */
2741 	BUG_ON((adapter->flags &
2742 	       (CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0);
2743 
2744 	/*
2745 	 * Count the number of 10GbE Virtual Interfaces that we have.
2746 	 */
2747 	n10g = 0;
2748 	for_each_port(adapter, pidx)
2749 		n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
2750 
2751 	/*
2752 	 * We default to 1 queue per non-10G port and up to # of cores queues
2753 	 * per 10G port.
2754 	 */
2755 	if (n10g == 0)
2756 		q10g = 0;
2757 	else {
2758 		int n1g = (adapter->params.nports - n10g);
2759 		q10g = (adapter->sge.max_ethqsets - n1g) / n10g;
2760 		if (q10g > num_online_cpus())
2761 			q10g = num_online_cpus();
2762 	}
2763 
2764 	/*
2765 	 * Allocate the "Queue Sets" to the various Virtual Interfaces.
2766 	 * The layout will be established in setup_sge_queues() when the
2767 	 * adapter is brough up for the first time.
2768 	 */
2769 	qidx = 0;
2770 	for_each_port(adapter, pidx) {
2771 		struct port_info *pi = adap2pinfo(adapter, pidx);
2772 
2773 		pi->first_qset = qidx;
2774 		pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
2775 		qidx += pi->nqsets;
2776 	}
2777 	s->ethqsets = qidx;
2778 
2779 	/*
2780 	 * The Ingress Queue Entry Size for our various Response Queues needs
2781 	 * to be big enough to accommodate the largest message we can receive
2782 	 * from the chip/firmware; which is 64 bytes ...
2783 	 */
2784 	iqe_size = 64;
2785 
2786 	/*
2787 	 * Set up default Queue Set parameters ...  Start off with the
2788 	 * shortest interrupt holdoff timer.
2789 	 */
2790 	for (qs = 0; qs < s->max_ethqsets; qs++) {
2791 		struct sge_eth_rxq *rxq = &s->ethrxq[qs];
2792 		struct sge_eth_txq *txq = &s->ethtxq[qs];
2793 
2794 		init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
2795 		rxq->fl.size = 72;
2796 		txq->q.size = 1024;
2797 	}
2798 
2799 	/*
2800 	 * The firmware event queue is used for link state changes and
2801 	 * notifications of TX DMA completions.
2802 	 */
2803 	init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size);
2804 
2805 	/*
2806 	 * The forwarded interrupt queue is used when we're in MSI interrupt
2807 	 * mode.  In this mode all interrupts associated with RX queues will
2808 	 * be forwarded to a single queue which we'll associate with our MSI
2809 	 * interrupt vector.  The messages dropped in the forwarded interrupt
2810 	 * queue will indicate which ingress queue needs servicing ...  This
2811 	 * queue needs to be large enough to accommodate all of the ingress
2812 	 * queues which are forwarding their interrupt (+1 to prevent the PIDX
2813 	 * from equalling the CIDX if every ingress queue has an outstanding
2814 	 * interrupt).  The queue doesn't need to be any larger because no
2815 	 * ingress queue will ever have more than one outstanding interrupt at
2816 	 * any time ...
2817 	 */
2818 	init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
2819 		  iqe_size);
2820 }
2821 
2822 /*
2823  * Reduce the number of Ethernet queues across all ports to at most n.
2824  * n provides at least one queue per port.
2825  */
2826 static void reduce_ethqs(struct adapter *adapter, int n)
2827 {
2828 	int i;
2829 	struct port_info *pi;
2830 
2831 	/*
2832 	 * While we have too many active Ether Queue Sets, interate across the
2833 	 * "ports" and reduce their individual Queue Set allocations.
2834 	 */
2835 	BUG_ON(n < adapter->params.nports);
2836 	while (n < adapter->sge.ethqsets)
2837 		for_each_port(adapter, i) {
2838 			pi = adap2pinfo(adapter, i);
2839 			if (pi->nqsets > 1) {
2840 				pi->nqsets--;
2841 				adapter->sge.ethqsets--;
2842 				if (adapter->sge.ethqsets <= n)
2843 					break;
2844 			}
2845 		}
2846 
2847 	/*
2848 	 * Reassign the starting Queue Sets for each of the "ports" ...
2849 	 */
2850 	n = 0;
2851 	for_each_port(adapter, i) {
2852 		pi = adap2pinfo(adapter, i);
2853 		pi->first_qset = n;
2854 		n += pi->nqsets;
2855 	}
2856 }
2857 
2858 /*
2859  * We need to grab enough MSI-X vectors to cover our interrupt needs.  Ideally
2860  * we get a separate MSI-X vector for every "Queue Set" plus any extras we
2861  * need.  Minimally we need one for every Virtual Interface plus those needed
2862  * for our "extras".  Note that this process may lower the maximum number of
2863  * allowed Queue Sets ...
2864  */
2865 static int enable_msix(struct adapter *adapter)
2866 {
2867 	int i, want, need, nqsets;
2868 	struct msix_entry entries[MSIX_ENTRIES];
2869 	struct sge *s = &adapter->sge;
2870 
2871 	for (i = 0; i < MSIX_ENTRIES; ++i)
2872 		entries[i].entry = i;
2873 
2874 	/*
2875 	 * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets"
2876 	 * plus those needed for our "extras" (for example, the firmware
2877 	 * message queue).  We _need_ at least one "Queue Set" per Virtual
2878 	 * Interface plus those needed for our "extras".  So now we get to see
2879 	 * if the song is right ...
2880 	 */
2881 	want = s->max_ethqsets + MSIX_EXTRAS;
2882 	need = adapter->params.nports + MSIX_EXTRAS;
2883 
2884 	want = pci_enable_msix_range(adapter->pdev, entries, need, want);
2885 	if (want < 0)
2886 		return want;
2887 
2888 	nqsets = want - MSIX_EXTRAS;
2889 	if (nqsets < s->max_ethqsets) {
2890 		dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
2891 			 " for %d Queue Sets\n", nqsets);
2892 		s->max_ethqsets = nqsets;
2893 		if (nqsets < s->ethqsets)
2894 			reduce_ethqs(adapter, nqsets);
2895 	}
2896 	for (i = 0; i < want; ++i)
2897 		adapter->msix_info[i].vec = entries[i].vector;
2898 
2899 	return 0;
2900 }
2901 
2902 static const struct net_device_ops cxgb4vf_netdev_ops	= {
2903 	.ndo_open		= cxgb4vf_open,
2904 	.ndo_stop		= cxgb4vf_stop,
2905 	.ndo_start_xmit		= t4vf_eth_xmit,
2906 	.ndo_get_stats		= cxgb4vf_get_stats,
2907 	.ndo_set_rx_mode	= cxgb4vf_set_rxmode,
2908 	.ndo_set_mac_address	= cxgb4vf_set_mac_addr,
2909 	.ndo_validate_addr	= eth_validate_addr,
2910 	.ndo_do_ioctl		= cxgb4vf_do_ioctl,
2911 	.ndo_change_mtu		= cxgb4vf_change_mtu,
2912 	.ndo_fix_features	= cxgb4vf_fix_features,
2913 	.ndo_set_features	= cxgb4vf_set_features,
2914 #ifdef CONFIG_NET_POLL_CONTROLLER
2915 	.ndo_poll_controller	= cxgb4vf_poll_controller,
2916 #endif
2917 };
2918 
2919 /**
2920  *	cxgb4vf_get_port_mask - Get port mask for the VF based on mac
2921  *				address stored on the adapter
2922  *	@adapter: The adapter
2923  *
2924  *	Find the the port mask for the VF based on the index of mac
2925  *	address stored in the adapter. If no mac address is stored on
2926  *	the adapter for the VF, use the port mask received from the
2927  *	firmware.
2928  */
2929 static unsigned int cxgb4vf_get_port_mask(struct adapter *adapter)
2930 {
2931 	unsigned int naddr = 1, pidx = 0;
2932 	unsigned int pmask, rmask = 0;
2933 	u8 mac[ETH_ALEN];
2934 	int err;
2935 
2936 	pmask = adapter->params.vfres.pmask;
2937 	while (pmask) {
2938 		if (pmask & 1) {
2939 			err = t4vf_get_vf_mac_acl(adapter, pidx, &naddr, mac);
2940 			if (!err && !is_zero_ether_addr(mac))
2941 				rmask |= (1 << pidx);
2942 		}
2943 		pmask >>= 1;
2944 		pidx++;
2945 	}
2946 	if (!rmask)
2947 		rmask = adapter->params.vfres.pmask;
2948 
2949 	return rmask;
2950 }
2951 
2952 /*
2953  * "Probe" a device: initialize a device and construct all kernel and driver
2954  * state needed to manage the device.  This routine is called "init_one" in
2955  * the PF Driver ...
2956  */
2957 static int cxgb4vf_pci_probe(struct pci_dev *pdev,
2958 			     const struct pci_device_id *ent)
2959 {
2960 	struct adapter *adapter;
2961 	struct net_device *netdev;
2962 	struct port_info *pi;
2963 	unsigned int pmask;
2964 	int pci_using_dac;
2965 	int err, pidx;
2966 
2967 	/*
2968 	 * Initialize generic PCI device state.
2969 	 */
2970 	err = pci_enable_device(pdev);
2971 	if (err) {
2972 		dev_err(&pdev->dev, "cannot enable PCI device\n");
2973 		return err;
2974 	}
2975 
2976 	/*
2977 	 * Reserve PCI resources for the device.  If we can't get them some
2978 	 * other driver may have already claimed the device ...
2979 	 */
2980 	err = pci_request_regions(pdev, KBUILD_MODNAME);
2981 	if (err) {
2982 		dev_err(&pdev->dev, "cannot obtain PCI resources\n");
2983 		goto err_disable_device;
2984 	}
2985 
2986 	/*
2987 	 * Set up our DMA mask: try for 64-bit address masking first and
2988 	 * fall back to 32-bit if we can't get 64 bits ...
2989 	 */
2990 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2991 	if (err == 0) {
2992 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2993 		if (err) {
2994 			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for"
2995 				" coherent allocations\n");
2996 			goto err_release_regions;
2997 		}
2998 		pci_using_dac = 1;
2999 	} else {
3000 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3001 		if (err != 0) {
3002 			dev_err(&pdev->dev, "no usable DMA configuration\n");
3003 			goto err_release_regions;
3004 		}
3005 		pci_using_dac = 0;
3006 	}
3007 
3008 	/*
3009 	 * Enable bus mastering for the device ...
3010 	 */
3011 	pci_set_master(pdev);
3012 
3013 	/*
3014 	 * Allocate our adapter data structure and attach it to the device.
3015 	 */
3016 	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3017 	if (!adapter) {
3018 		err = -ENOMEM;
3019 		goto err_release_regions;
3020 	}
3021 	pci_set_drvdata(pdev, adapter);
3022 	adapter->pdev = pdev;
3023 	adapter->pdev_dev = &pdev->dev;
3024 
3025 	adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
3026 				    (sizeof(struct mbox_cmd) *
3027 				     T4VF_OS_LOG_MBOX_CMDS),
3028 				    GFP_KERNEL);
3029 	if (!adapter->mbox_log) {
3030 		err = -ENOMEM;
3031 		goto err_free_adapter;
3032 	}
3033 	adapter->mbox_log->size = T4VF_OS_LOG_MBOX_CMDS;
3034 
3035 	/*
3036 	 * Initialize SMP data synchronization resources.
3037 	 */
3038 	spin_lock_init(&adapter->stats_lock);
3039 	spin_lock_init(&adapter->mbox_lock);
3040 	INIT_LIST_HEAD(&adapter->mlist.list);
3041 
3042 	/*
3043 	 * Map our I/O registers in BAR0.
3044 	 */
3045 	adapter->regs = pci_ioremap_bar(pdev, 0);
3046 	if (!adapter->regs) {
3047 		dev_err(&pdev->dev, "cannot map device registers\n");
3048 		err = -ENOMEM;
3049 		goto err_free_adapter;
3050 	}
3051 
3052 	/* Wait for the device to become ready before proceeding ...
3053 	 */
3054 	err = t4vf_prep_adapter(adapter);
3055 	if (err) {
3056 		dev_err(adapter->pdev_dev, "device didn't become ready:"
3057 			" err=%d\n", err);
3058 		goto err_unmap_bar0;
3059 	}
3060 
3061 	/* For T5 and later we want to use the new BAR-based User Doorbells,
3062 	 * so we need to map BAR2 here ...
3063 	 */
3064 	if (!is_t4(adapter->params.chip)) {
3065 		adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
3066 					   pci_resource_len(pdev, 2));
3067 		if (!adapter->bar2) {
3068 			dev_err(adapter->pdev_dev, "cannot map BAR2 doorbells\n");
3069 			err = -ENOMEM;
3070 			goto err_unmap_bar0;
3071 		}
3072 	}
3073 	/*
3074 	 * Initialize adapter level features.
3075 	 */
3076 	adapter->name = pci_name(pdev);
3077 	adapter->msg_enable = DFLT_MSG_ENABLE;
3078 
3079 	/* If possible, we use PCIe Relaxed Ordering Attribute to deliver
3080 	 * Ingress Packet Data to Free List Buffers in order to allow for
3081 	 * chipset performance optimizations between the Root Complex and
3082 	 * Memory Controllers.  (Messages to the associated Ingress Queue
3083 	 * notifying new Packet Placement in the Free Lists Buffers will be
3084 	 * send without the Relaxed Ordering Attribute thus guaranteeing that
3085 	 * all preceding PCIe Transaction Layer Packets will be processed
3086 	 * first.)  But some Root Complexes have various issues with Upstream
3087 	 * Transaction Layer Packets with the Relaxed Ordering Attribute set.
3088 	 * The PCIe devices which under the Root Complexes will be cleared the
3089 	 * Relaxed Ordering bit in the configuration space, So we check our
3090 	 * PCIe configuration space to see if it's flagged with advice against
3091 	 * using Relaxed Ordering.
3092 	 */
3093 	if (!pcie_relaxed_ordering_enabled(pdev))
3094 		adapter->flags |= CXGB4VF_ROOT_NO_RELAXED_ORDERING;
3095 
3096 	err = adap_init0(adapter);
3097 	if (err)
3098 		dev_err(&pdev->dev,
3099 			"Adapter initialization failed, error %d. Continuing in debug mode\n",
3100 			err);
3101 
3102 	/* Initialize hash mac addr list */
3103 	INIT_LIST_HEAD(&adapter->mac_hlist);
3104 
3105 	/*
3106 	 * Allocate our "adapter ports" and stitch everything together.
3107 	 */
3108 	pmask = cxgb4vf_get_port_mask(adapter);
3109 	for_each_port(adapter, pidx) {
3110 		int port_id, viid;
3111 		u8 mac[ETH_ALEN];
3112 		unsigned int naddr = 1;
3113 
3114 		/*
3115 		 * We simplistically allocate our virtual interfaces
3116 		 * sequentially across the port numbers to which we have
3117 		 * access rights.  This should be configurable in some manner
3118 		 * ...
3119 		 */
3120 		if (pmask == 0)
3121 			break;
3122 		port_id = ffs(pmask) - 1;
3123 		pmask &= ~(1 << port_id);
3124 
3125 		/*
3126 		 * Allocate our network device and stitch things together.
3127 		 */
3128 		netdev = alloc_etherdev_mq(sizeof(struct port_info),
3129 					   MAX_PORT_QSETS);
3130 		if (netdev == NULL) {
3131 			err = -ENOMEM;
3132 			goto err_free_dev;
3133 		}
3134 		adapter->port[pidx] = netdev;
3135 		SET_NETDEV_DEV(netdev, &pdev->dev);
3136 		pi = netdev_priv(netdev);
3137 		pi->adapter = adapter;
3138 		pi->pidx = pidx;
3139 		pi->port_id = port_id;
3140 
3141 		/*
3142 		 * Initialize the starting state of our "port" and register
3143 		 * it.
3144 		 */
3145 		pi->xact_addr_filt = -1;
3146 		netdev->irq = pdev->irq;
3147 
3148 		netdev->hw_features = NETIF_F_SG | TSO_FLAGS | NETIF_F_GRO |
3149 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3150 			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
3151 		netdev->features = netdev->hw_features;
3152 		if (pci_using_dac)
3153 			netdev->features |= NETIF_F_HIGHDMA;
3154 		netdev->vlan_features = netdev->features & VLAN_FEAT;
3155 
3156 		netdev->priv_flags |= IFF_UNICAST_FLT;
3157 		netdev->min_mtu = 81;
3158 		netdev->max_mtu = ETH_MAX_MTU;
3159 
3160 		netdev->netdev_ops = &cxgb4vf_netdev_ops;
3161 		netdev->ethtool_ops = &cxgb4vf_ethtool_ops;
3162 		netdev->dev_port = pi->port_id;
3163 
3164 		/*
3165 		 * If we haven't been able to contact the firmware, there's
3166 		 * nothing else we can do for this "port" ...
3167 		 */
3168 		if (!(adapter->flags & CXGB4VF_FW_OK))
3169 			continue;
3170 
3171 		viid = t4vf_alloc_vi(adapter, port_id);
3172 		if (viid < 0) {
3173 			dev_err(&pdev->dev,
3174 				"cannot allocate VI for port %d: err=%d\n",
3175 				port_id, viid);
3176 			err = viid;
3177 			goto err_free_dev;
3178 		}
3179 		pi->viid = viid;
3180 
3181 		/*
3182 		 * Initialize the hardware/software state for the port.
3183 		 */
3184 		err = t4vf_port_init(adapter, pidx);
3185 		if (err) {
3186 			dev_err(&pdev->dev, "cannot initialize port %d\n",
3187 				pidx);
3188 			goto err_free_dev;
3189 		}
3190 
3191 		err = t4vf_get_vf_mac_acl(adapter, port_id, &naddr, mac);
3192 		if (err) {
3193 			dev_err(&pdev->dev,
3194 				"unable to determine MAC ACL address, "
3195 				"continuing anyway.. (status %d)\n", err);
3196 		} else if (naddr && adapter->params.vfres.nvi == 1) {
3197 			struct sockaddr addr;
3198 
3199 			ether_addr_copy(addr.sa_data, mac);
3200 			err = cxgb4vf_set_mac_addr(netdev, &addr);
3201 			if (err) {
3202 				dev_err(&pdev->dev,
3203 					"unable to set MAC address %pM\n",
3204 					mac);
3205 				goto err_free_dev;
3206 			}
3207 			dev_info(&pdev->dev,
3208 				 "Using assigned MAC ACL: %pM\n", mac);
3209 		}
3210 	}
3211 
3212 	/* See what interrupts we'll be using.  If we've been configured to
3213 	 * use MSI-X interrupts, try to enable them but fall back to using
3214 	 * MSI interrupts if we can't enable MSI-X interrupts.  If we can't
3215 	 * get MSI interrupts we bail with the error.
3216 	 */
3217 	if (msi == MSI_MSIX && enable_msix(adapter) == 0)
3218 		adapter->flags |= CXGB4VF_USING_MSIX;
3219 	else {
3220 		if (msi == MSI_MSIX) {
3221 			dev_info(adapter->pdev_dev,
3222 				 "Unable to use MSI-X Interrupts; falling "
3223 				 "back to MSI Interrupts\n");
3224 
3225 			/* We're going to need a Forwarded Interrupt Queue so
3226 			 * that may cut into how many Queue Sets we can
3227 			 * support.
3228 			 */
3229 			msi = MSI_MSI;
3230 			size_nports_qsets(adapter);
3231 		}
3232 		err = pci_enable_msi(pdev);
3233 		if (err) {
3234 			dev_err(&pdev->dev, "Unable to allocate MSI Interrupts;"
3235 				" err=%d\n", err);
3236 			goto err_free_dev;
3237 		}
3238 		adapter->flags |= CXGB4VF_USING_MSI;
3239 	}
3240 
3241 	/* Now that we know how many "ports" we have and what interrupt
3242 	 * mechanism we're going to use, we can configure our queue resources.
3243 	 */
3244 	cfg_queues(adapter);
3245 
3246 	/*
3247 	 * The "card" is now ready to go.  If any errors occur during device
3248 	 * registration we do not fail the whole "card" but rather proceed
3249 	 * only with the ports we manage to register successfully.  However we
3250 	 * must register at least one net device.
3251 	 */
3252 	for_each_port(adapter, pidx) {
3253 		struct port_info *pi = netdev_priv(adapter->port[pidx]);
3254 		netdev = adapter->port[pidx];
3255 		if (netdev == NULL)
3256 			continue;
3257 
3258 		netif_set_real_num_tx_queues(netdev, pi->nqsets);
3259 		netif_set_real_num_rx_queues(netdev, pi->nqsets);
3260 
3261 		err = register_netdev(netdev);
3262 		if (err) {
3263 			dev_warn(&pdev->dev, "cannot register net device %s,"
3264 				 " skipping\n", netdev->name);
3265 			continue;
3266 		}
3267 
3268 		netif_carrier_off(netdev);
3269 		set_bit(pidx, &adapter->registered_device_map);
3270 	}
3271 	if (adapter->registered_device_map == 0) {
3272 		dev_err(&pdev->dev, "could not register any net devices\n");
3273 		goto err_disable_interrupts;
3274 	}
3275 
3276 	/*
3277 	 * Set up our debugfs entries.
3278 	 */
3279 	if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
3280 		adapter->debugfs_root =
3281 			debugfs_create_dir(pci_name(pdev),
3282 					   cxgb4vf_debugfs_root);
3283 		setup_debugfs(adapter);
3284 	}
3285 
3286 	/*
3287 	 * Print a short notice on the existence and configuration of the new
3288 	 * VF network device ...
3289 	 */
3290 	for_each_port(adapter, pidx) {
3291 		dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n",
3292 			 adapter->port[pidx]->name,
3293 			 (adapter->flags & CXGB4VF_USING_MSIX) ? "MSI-X" :
3294 			 (adapter->flags & CXGB4VF_USING_MSI)  ? "MSI" : "");
3295 	}
3296 
3297 	/*
3298 	 * Return success!
3299 	 */
3300 	return 0;
3301 
3302 	/*
3303 	 * Error recovery and exit code.  Unwind state that's been created
3304 	 * so far and return the error.
3305 	 */
3306 err_disable_interrupts:
3307 	if (adapter->flags & CXGB4VF_USING_MSIX) {
3308 		pci_disable_msix(adapter->pdev);
3309 		adapter->flags &= ~CXGB4VF_USING_MSIX;
3310 	} else if (adapter->flags & CXGB4VF_USING_MSI) {
3311 		pci_disable_msi(adapter->pdev);
3312 		adapter->flags &= ~CXGB4VF_USING_MSI;
3313 	}
3314 
3315 err_free_dev:
3316 	for_each_port(adapter, pidx) {
3317 		netdev = adapter->port[pidx];
3318 		if (netdev == NULL)
3319 			continue;
3320 		pi = netdev_priv(netdev);
3321 		if (pi->viid)
3322 			t4vf_free_vi(adapter, pi->viid);
3323 		if (test_bit(pidx, &adapter->registered_device_map))
3324 			unregister_netdev(netdev);
3325 		free_netdev(netdev);
3326 	}
3327 
3328 	if (!is_t4(adapter->params.chip))
3329 		iounmap(adapter->bar2);
3330 
3331 err_unmap_bar0:
3332 	iounmap(adapter->regs);
3333 
3334 err_free_adapter:
3335 	kfree(adapter->mbox_log);
3336 	kfree(adapter);
3337 
3338 err_release_regions:
3339 	pci_release_regions(pdev);
3340 	pci_clear_master(pdev);
3341 
3342 err_disable_device:
3343 	pci_disable_device(pdev);
3344 
3345 	return err;
3346 }
3347 
3348 /*
3349  * "Remove" a device: tear down all kernel and driver state created in the
3350  * "probe" routine and quiesce the device (disable interrupts, etc.).  (Note
3351  * that this is called "remove_one" in the PF Driver.)
3352  */
3353 static void cxgb4vf_pci_remove(struct pci_dev *pdev)
3354 {
3355 	struct adapter *adapter = pci_get_drvdata(pdev);
3356 	struct hash_mac_addr *entry, *tmp;
3357 
3358 	/*
3359 	 * Tear down driver state associated with device.
3360 	 */
3361 	if (adapter) {
3362 		int pidx;
3363 
3364 		/*
3365 		 * Stop all of our activity.  Unregister network port,
3366 		 * disable interrupts, etc.
3367 		 */
3368 		for_each_port(adapter, pidx)
3369 			if (test_bit(pidx, &adapter->registered_device_map))
3370 				unregister_netdev(adapter->port[pidx]);
3371 		t4vf_sge_stop(adapter);
3372 		if (adapter->flags & CXGB4VF_USING_MSIX) {
3373 			pci_disable_msix(adapter->pdev);
3374 			adapter->flags &= ~CXGB4VF_USING_MSIX;
3375 		} else if (adapter->flags & CXGB4VF_USING_MSI) {
3376 			pci_disable_msi(adapter->pdev);
3377 			adapter->flags &= ~CXGB4VF_USING_MSI;
3378 		}
3379 
3380 		/*
3381 		 * Tear down our debugfs entries.
3382 		 */
3383 		if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
3384 			cleanup_debugfs(adapter);
3385 			debugfs_remove_recursive(adapter->debugfs_root);
3386 		}
3387 
3388 		/*
3389 		 * Free all of the various resources which we've acquired ...
3390 		 */
3391 		t4vf_free_sge_resources(adapter);
3392 		for_each_port(adapter, pidx) {
3393 			struct net_device *netdev = adapter->port[pidx];
3394 			struct port_info *pi;
3395 
3396 			if (netdev == NULL)
3397 				continue;
3398 
3399 			pi = netdev_priv(netdev);
3400 			if (pi->viid)
3401 				t4vf_free_vi(adapter, pi->viid);
3402 			free_netdev(netdev);
3403 		}
3404 		iounmap(adapter->regs);
3405 		if (!is_t4(adapter->params.chip))
3406 			iounmap(adapter->bar2);
3407 		kfree(adapter->mbox_log);
3408 		list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
3409 					 list) {
3410 			list_del(&entry->list);
3411 			kfree(entry);
3412 		}
3413 		kfree(adapter);
3414 	}
3415 
3416 	/*
3417 	 * Disable the device and release its PCI resources.
3418 	 */
3419 	pci_disable_device(pdev);
3420 	pci_clear_master(pdev);
3421 	pci_release_regions(pdev);
3422 }
3423 
3424 /*
3425  * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
3426  * delivery.
3427  */
3428 static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
3429 {
3430 	struct adapter *adapter;
3431 	int pidx;
3432 
3433 	adapter = pci_get_drvdata(pdev);
3434 	if (!adapter)
3435 		return;
3436 
3437 	/* Disable all Virtual Interfaces.  This will shut down the
3438 	 * delivery of all ingress packets into the chip for these
3439 	 * Virtual Interfaces.
3440 	 */
3441 	for_each_port(adapter, pidx)
3442 		if (test_bit(pidx, &adapter->registered_device_map))
3443 			unregister_netdev(adapter->port[pidx]);
3444 
3445 	/* Free up all Queues which will prevent further DMA and
3446 	 * Interrupts allowing various internal pathways to drain.
3447 	 */
3448 	t4vf_sge_stop(adapter);
3449 	if (adapter->flags & CXGB4VF_USING_MSIX) {
3450 		pci_disable_msix(adapter->pdev);
3451 		adapter->flags &= ~CXGB4VF_USING_MSIX;
3452 	} else if (adapter->flags & CXGB4VF_USING_MSI) {
3453 		pci_disable_msi(adapter->pdev);
3454 		adapter->flags &= ~CXGB4VF_USING_MSI;
3455 	}
3456 
3457 	/*
3458 	 * Free up all Queues which will prevent further DMA and
3459 	 * Interrupts allowing various internal pathways to drain.
3460 	 */
3461 	t4vf_free_sge_resources(adapter);
3462 	pci_set_drvdata(pdev, NULL);
3463 }
3464 
3465 /* Macros needed to support the PCI Device ID Table ...
3466  */
3467 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
3468 	static const struct pci_device_id cxgb4vf_pci_tbl[] = {
3469 #define CH_PCI_DEVICE_ID_FUNCTION	0x8
3470 
3471 #define CH_PCI_ID_TABLE_ENTRY(devid) \
3472 		{ PCI_VDEVICE(CHELSIO, (devid)), 0 }
3473 
3474 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } }
3475 
3476 #include "../cxgb4/t4_pci_id_tbl.h"
3477 
3478 MODULE_DESCRIPTION(DRV_DESC);
3479 MODULE_AUTHOR("Chelsio Communications");
3480 MODULE_LICENSE("Dual BSD/GPL");
3481 MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl);
3482 
3483 static struct pci_driver cxgb4vf_driver = {
3484 	.name		= KBUILD_MODNAME,
3485 	.id_table	= cxgb4vf_pci_tbl,
3486 	.probe		= cxgb4vf_pci_probe,
3487 	.remove		= cxgb4vf_pci_remove,
3488 	.shutdown	= cxgb4vf_pci_shutdown,
3489 };
3490 
3491 /*
3492  * Initialize global driver state.
3493  */
3494 static int __init cxgb4vf_module_init(void)
3495 {
3496 	int ret;
3497 
3498 	/*
3499 	 * Vet our module parameters.
3500 	 */
3501 	if (msi != MSI_MSIX && msi != MSI_MSI) {
3502 		pr_warn("bad module parameter msi=%d; must be %d (MSI-X or MSI) or %d (MSI)\n",
3503 			msi, MSI_MSIX, MSI_MSI);
3504 		return -EINVAL;
3505 	}
3506 
3507 	/* Debugfs support is optional, debugfs will warn if this fails */
3508 	cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
3509 
3510 	ret = pci_register_driver(&cxgb4vf_driver);
3511 	if (ret < 0)
3512 		debugfs_remove(cxgb4vf_debugfs_root);
3513 	return ret;
3514 }
3515 
3516 /*
3517  * Tear down global driver state.
3518  */
3519 static void __exit cxgb4vf_module_exit(void)
3520 {
3521 	pci_unregister_driver(&cxgb4vf_driver);
3522 	debugfs_remove(cxgb4vf_debugfs_root);
3523 }
3524 
3525 module_init(cxgb4vf_module_init);
3526 module_exit(cxgb4vf_module_exit);
3527