xref: /openbmc/linux/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c (revision 9dae47aba0a055f761176d9297371d5bb24289ec)
1 /*
2  * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3  * driver for Linux.
4  *
5  * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 
38 #include <linux/module.h>
39 #include <linux/moduleparam.h>
40 #include <linux/init.h>
41 #include <linux/pci.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/debugfs.h>
46 #include <linux/ethtool.h>
47 #include <linux/mdio.h>
48 
49 #include "t4vf_common.h"
50 #include "t4vf_defs.h"
51 
52 #include "../cxgb4/t4_regs.h"
53 #include "../cxgb4/t4_msg.h"
54 
55 /*
56  * Generic information about the driver.
57  */
58 #define DRV_VERSION "2.0.0-ko"
59 #define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver"
60 
61 /*
62  * Module Parameters.
63  * ==================
64  */
65 
66 /*
67  * Default ethtool "message level" for adapters.
68  */
69 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
70 			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
71 			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
72 
73 /*
74  * The driver uses the best interrupt scheme available on a platform in the
75  * order MSI-X then MSI.  This parameter determines which of these schemes the
76  * driver may consider as follows:
77  *
78  *     msi = 2: choose from among MSI-X and MSI
79  *     msi = 1: only consider MSI interrupts
80  *
81  * Note that unlike the Physical Function driver, this Virtual Function driver
82  * does _not_ support legacy INTx interrupts (this limitation is mandated by
83  * the PCI-E SR-IOV standard).
84  */
85 #define MSI_MSIX	2
86 #define MSI_MSI		1
87 #define MSI_DEFAULT	MSI_MSIX
88 
89 static int msi = MSI_DEFAULT;
90 
91 module_param(msi, int, 0644);
92 MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI");
93 
94 /*
95  * Fundamental constants.
96  * ======================
97  */
98 
99 enum {
100 	MAX_TXQ_ENTRIES		= 16384,
101 	MAX_RSPQ_ENTRIES	= 16384,
102 	MAX_RX_BUFFERS		= 16384,
103 
104 	MIN_TXQ_ENTRIES		= 32,
105 	MIN_RSPQ_ENTRIES	= 128,
106 	MIN_FL_ENTRIES		= 16,
107 
108 	/*
109 	 * For purposes of manipulating the Free List size we need to
110 	 * recognize that Free Lists are actually Egress Queues (the host
111 	 * produces free buffers which the hardware consumes), Egress Queues
112 	 * indices are all in units of Egress Context Units bytes, and free
113 	 * list entries are 64-bit PCI DMA addresses.  And since the state of
114 	 * the Producer Index == the Consumer Index implies an EMPTY list, we
115 	 * always have at least one Egress Unit's worth of Free List entries
116 	 * unused.  See sge.c for more details ...
117 	 */
118 	EQ_UNIT = SGE_EQ_IDXSIZE,
119 	FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
120 	MIN_FL_RESID = FL_PER_EQ_UNIT,
121 };
122 
123 /*
124  * Global driver state.
125  * ====================
126  */
127 
128 static struct dentry *cxgb4vf_debugfs_root;
129 
130 /*
131  * OS "Callback" functions.
132  * ========================
133  */
134 
135 /*
136  * The link status has changed on the indicated "port" (Virtual Interface).
137  */
138 void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
139 {
140 	struct net_device *dev = adapter->port[pidx];
141 
142 	/*
143 	 * If the port is disabled or the current recorded "link up"
144 	 * status matches the new status, just return.
145 	 */
146 	if (!netif_running(dev) || link_ok == netif_carrier_ok(dev))
147 		return;
148 
149 	/*
150 	 * Tell the OS that the link status has changed and print a short
151 	 * informative message on the console about the event.
152 	 */
153 	if (link_ok) {
154 		const char *s;
155 		const char *fc;
156 		const struct port_info *pi = netdev_priv(dev);
157 
158 		netif_carrier_on(dev);
159 
160 		switch (pi->link_cfg.speed) {
161 		case 100:
162 			s = "100Mbps";
163 			break;
164 		case 1000:
165 			s = "1Gbps";
166 			break;
167 		case 10000:
168 			s = "10Gbps";
169 			break;
170 		case 25000:
171 			s = "25Gbps";
172 			break;
173 		case 40000:
174 			s = "40Gbps";
175 			break;
176 		case 100000:
177 			s = "100Gbps";
178 			break;
179 
180 		default:
181 			s = "unknown";
182 			break;
183 		}
184 
185 		switch ((int)pi->link_cfg.fc) {
186 		case PAUSE_RX:
187 			fc = "RX";
188 			break;
189 
190 		case PAUSE_TX:
191 			fc = "TX";
192 			break;
193 
194 		case PAUSE_RX | PAUSE_TX:
195 			fc = "RX/TX";
196 			break;
197 
198 		default:
199 			fc = "no";
200 			break;
201 		}
202 
203 		netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc);
204 	} else {
205 		netif_carrier_off(dev);
206 		netdev_info(dev, "link down\n");
207 	}
208 }
209 
210 /*
211  * THe port module type has changed on the indicated "port" (Virtual
212  * Interface).
213  */
214 void t4vf_os_portmod_changed(struct adapter *adapter, int pidx)
215 {
216 	static const char * const mod_str[] = {
217 		NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
218 	};
219 	const struct net_device *dev = adapter->port[pidx];
220 	const struct port_info *pi = netdev_priv(dev);
221 
222 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
223 		dev_info(adapter->pdev_dev, "%s: port module unplugged\n",
224 			 dev->name);
225 	else if (pi->mod_type < ARRAY_SIZE(mod_str))
226 		dev_info(adapter->pdev_dev, "%s: %s port module inserted\n",
227 			 dev->name, mod_str[pi->mod_type]);
228 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
229 		dev_info(adapter->pdev_dev, "%s: unsupported optical port "
230 			 "module inserted\n", dev->name);
231 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
232 		dev_info(adapter->pdev_dev, "%s: unknown port module inserted,"
233 			 "forcing TWINAX\n", dev->name);
234 	else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
235 		dev_info(adapter->pdev_dev, "%s: transceiver module error\n",
236 			 dev->name);
237 	else
238 		dev_info(adapter->pdev_dev, "%s: unknown module type %d "
239 			 "inserted\n", dev->name, pi->mod_type);
240 }
241 
242 /*
243  * Net device operations.
244  * ======================
245  */
246 
247 
248 
249 
250 /*
251  * Perform the MAC and PHY actions needed to enable a "port" (Virtual
252  * Interface).
253  */
254 static int link_start(struct net_device *dev)
255 {
256 	int ret;
257 	struct port_info *pi = netdev_priv(dev);
258 
259 	/*
260 	 * We do not set address filters and promiscuity here, the stack does
261 	 * that step explicitly. Enable vlan accel.
262 	 */
263 	ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1,
264 			      true);
265 	if (ret == 0) {
266 		ret = t4vf_change_mac(pi->adapter, pi->viid,
267 				      pi->xact_addr_filt, dev->dev_addr, true);
268 		if (ret >= 0) {
269 			pi->xact_addr_filt = ret;
270 			ret = 0;
271 		}
272 	}
273 
274 	/*
275 	 * We don't need to actually "start the link" itself since the
276 	 * firmware will do that for us when the first Virtual Interface
277 	 * is enabled on a port.
278 	 */
279 	if (ret == 0)
280 		ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true);
281 	return ret;
282 }
283 
284 /*
285  * Name the MSI-X interrupts.
286  */
287 static void name_msix_vecs(struct adapter *adapter)
288 {
289 	int namelen = sizeof(adapter->msix_info[0].desc) - 1;
290 	int pidx;
291 
292 	/*
293 	 * Firmware events.
294 	 */
295 	snprintf(adapter->msix_info[MSIX_FW].desc, namelen,
296 		 "%s-FWeventq", adapter->name);
297 	adapter->msix_info[MSIX_FW].desc[namelen] = 0;
298 
299 	/*
300 	 * Ethernet queues.
301 	 */
302 	for_each_port(adapter, pidx) {
303 		struct net_device *dev = adapter->port[pidx];
304 		const struct port_info *pi = netdev_priv(dev);
305 		int qs, msi;
306 
307 		for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
308 			snprintf(adapter->msix_info[msi].desc, namelen,
309 				 "%s-%d", dev->name, qs);
310 			adapter->msix_info[msi].desc[namelen] = 0;
311 		}
312 	}
313 }
314 
315 /*
316  * Request all of our MSI-X resources.
317  */
318 static int request_msix_queue_irqs(struct adapter *adapter)
319 {
320 	struct sge *s = &adapter->sge;
321 	int rxq, msi, err;
322 
323 	/*
324 	 * Firmware events.
325 	 */
326 	err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix,
327 			  0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq);
328 	if (err)
329 		return err;
330 
331 	/*
332 	 * Ethernet queues.
333 	 */
334 	msi = MSIX_IQFLINT;
335 	for_each_ethrxq(s, rxq) {
336 		err = request_irq(adapter->msix_info[msi].vec,
337 				  t4vf_sge_intr_msix, 0,
338 				  adapter->msix_info[msi].desc,
339 				  &s->ethrxq[rxq].rspq);
340 		if (err)
341 			goto err_free_irqs;
342 		msi++;
343 	}
344 	return 0;
345 
346 err_free_irqs:
347 	while (--rxq >= 0)
348 		free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
349 	free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
350 	return err;
351 }
352 
353 /*
354  * Free our MSI-X resources.
355  */
356 static void free_msix_queue_irqs(struct adapter *adapter)
357 {
358 	struct sge *s = &adapter->sge;
359 	int rxq, msi;
360 
361 	free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
362 	msi = MSIX_IQFLINT;
363 	for_each_ethrxq(s, rxq)
364 		free_irq(adapter->msix_info[msi++].vec,
365 			 &s->ethrxq[rxq].rspq);
366 }
367 
368 /*
369  * Turn on NAPI and start up interrupts on a response queue.
370  */
371 static void qenable(struct sge_rspq *rspq)
372 {
373 	napi_enable(&rspq->napi);
374 
375 	/*
376 	 * 0-increment the Going To Sleep register to start the timer and
377 	 * enable interrupts.
378 	 */
379 	t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
380 		     CIDXINC_V(0) |
381 		     SEINTARM_V(rspq->intr_params) |
382 		     INGRESSQID_V(rspq->cntxt_id));
383 }
384 
385 /*
386  * Enable NAPI scheduling and interrupt generation for all Receive Queues.
387  */
388 static void enable_rx(struct adapter *adapter)
389 {
390 	int rxq;
391 	struct sge *s = &adapter->sge;
392 
393 	for_each_ethrxq(s, rxq)
394 		qenable(&s->ethrxq[rxq].rspq);
395 	qenable(&s->fw_evtq);
396 
397 	/*
398 	 * The interrupt queue doesn't use NAPI so we do the 0-increment of
399 	 * its Going To Sleep register here to get it started.
400 	 */
401 	if (adapter->flags & USING_MSI)
402 		t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
403 			     CIDXINC_V(0) |
404 			     SEINTARM_V(s->intrq.intr_params) |
405 			     INGRESSQID_V(s->intrq.cntxt_id));
406 
407 }
408 
409 /*
410  * Wait until all NAPI handlers are descheduled.
411  */
412 static void quiesce_rx(struct adapter *adapter)
413 {
414 	struct sge *s = &adapter->sge;
415 	int rxq;
416 
417 	for_each_ethrxq(s, rxq)
418 		napi_disable(&s->ethrxq[rxq].rspq.napi);
419 	napi_disable(&s->fw_evtq.napi);
420 }
421 
422 /*
423  * Response queue handler for the firmware event queue.
424  */
425 static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
426 			  const struct pkt_gl *gl)
427 {
428 	/*
429 	 * Extract response opcode and get pointer to CPL message body.
430 	 */
431 	struct adapter *adapter = rspq->adapter;
432 	u8 opcode = ((const struct rss_header *)rsp)->opcode;
433 	void *cpl = (void *)(rsp + 1);
434 
435 	switch (opcode) {
436 	case CPL_FW6_MSG: {
437 		/*
438 		 * We've received an asynchronous message from the firmware.
439 		 */
440 		const struct cpl_fw6_msg *fw_msg = cpl;
441 		if (fw_msg->type == FW6_TYPE_CMD_RPL)
442 			t4vf_handle_fw_rpl(adapter, fw_msg->data);
443 		break;
444 	}
445 
446 	case CPL_FW4_MSG: {
447 		/* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
448 		 */
449 		const struct cpl_sge_egr_update *p = (void *)(rsp + 3);
450 		opcode = CPL_OPCODE_G(ntohl(p->opcode_qid));
451 		if (opcode != CPL_SGE_EGR_UPDATE) {
452 			dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
453 				, opcode);
454 			break;
455 		}
456 		cpl = (void *)p;
457 		/*FALLTHROUGH*/
458 	}
459 
460 	case CPL_SGE_EGR_UPDATE: {
461 		/*
462 		 * We've received an Egress Queue Status Update message.  We
463 		 * get these, if the SGE is configured to send these when the
464 		 * firmware passes certain points in processing our TX
465 		 * Ethernet Queue or if we make an explicit request for one.
466 		 * We use these updates to determine when we may need to
467 		 * restart a TX Ethernet Queue which was stopped for lack of
468 		 * free TX Queue Descriptors ...
469 		 */
470 		const struct cpl_sge_egr_update *p = cpl;
471 		unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid));
472 		struct sge *s = &adapter->sge;
473 		struct sge_txq *tq;
474 		struct sge_eth_txq *txq;
475 		unsigned int eq_idx;
476 
477 		/*
478 		 * Perform sanity checking on the Queue ID to make sure it
479 		 * really refers to one of our TX Ethernet Egress Queues which
480 		 * is active and matches the queue's ID.  None of these error
481 		 * conditions should ever happen so we may want to either make
482 		 * them fatal and/or conditionalized under DEBUG.
483 		 */
484 		eq_idx = EQ_IDX(s, qid);
485 		if (unlikely(eq_idx >= MAX_EGRQ)) {
486 			dev_err(adapter->pdev_dev,
487 				"Egress Update QID %d out of range\n", qid);
488 			break;
489 		}
490 		tq = s->egr_map[eq_idx];
491 		if (unlikely(tq == NULL)) {
492 			dev_err(adapter->pdev_dev,
493 				"Egress Update QID %d TXQ=NULL\n", qid);
494 			break;
495 		}
496 		txq = container_of(tq, struct sge_eth_txq, q);
497 		if (unlikely(tq->abs_id != qid)) {
498 			dev_err(adapter->pdev_dev,
499 				"Egress Update QID %d refers to TXQ %d\n",
500 				qid, tq->abs_id);
501 			break;
502 		}
503 
504 		/*
505 		 * Restart a stopped TX Queue which has less than half of its
506 		 * TX ring in use ...
507 		 */
508 		txq->q.restarts++;
509 		netif_tx_wake_queue(txq->txq);
510 		break;
511 	}
512 
513 	default:
514 		dev_err(adapter->pdev_dev,
515 			"unexpected CPL %#x on FW event queue\n", opcode);
516 	}
517 
518 	return 0;
519 }
520 
521 /*
522  * Allocate SGE TX/RX response queues.  Determine how many sets of SGE queues
523  * to use and initializes them.  We support multiple "Queue Sets" per port if
524  * we have MSI-X, otherwise just one queue set per port.
525  */
526 static int setup_sge_queues(struct adapter *adapter)
527 {
528 	struct sge *s = &adapter->sge;
529 	int err, pidx, msix;
530 
531 	/*
532 	 * Clear "Queue Set" Free List Starving and TX Queue Mapping Error
533 	 * state.
534 	 */
535 	bitmap_zero(s->starving_fl, MAX_EGRQ);
536 
537 	/*
538 	 * If we're using MSI interrupt mode we need to set up a "forwarded
539 	 * interrupt" queue which we'll set up with our MSI vector.  The rest
540 	 * of the ingress queues will be set up to forward their interrupts to
541 	 * this queue ...  This must be first since t4vf_sge_alloc_rxq() uses
542 	 * the intrq's queue ID as the interrupt forwarding queue for the
543 	 * subsequent calls ...
544 	 */
545 	if (adapter->flags & USING_MSI) {
546 		err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false,
547 					 adapter->port[0], 0, NULL, NULL);
548 		if (err)
549 			goto err_free_queues;
550 	}
551 
552 	/*
553 	 * Allocate our ingress queue for asynchronous firmware messages.
554 	 */
555 	err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0],
556 				 MSIX_FW, NULL, fwevtq_handler);
557 	if (err)
558 		goto err_free_queues;
559 
560 	/*
561 	 * Allocate each "port"'s initial Queue Sets.  These can be changed
562 	 * later on ... up to the point where any interface on the adapter is
563 	 * brought up at which point lots of things get nailed down
564 	 * permanently ...
565 	 */
566 	msix = MSIX_IQFLINT;
567 	for_each_port(adapter, pidx) {
568 		struct net_device *dev = adapter->port[pidx];
569 		struct port_info *pi = netdev_priv(dev);
570 		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
571 		struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
572 		int qs;
573 
574 		for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
575 			err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false,
576 						 dev, msix++,
577 						 &rxq->fl, t4vf_ethrx_handler);
578 			if (err)
579 				goto err_free_queues;
580 
581 			err = t4vf_sge_alloc_eth_txq(adapter, txq, dev,
582 					     netdev_get_tx_queue(dev, qs),
583 					     s->fw_evtq.cntxt_id);
584 			if (err)
585 				goto err_free_queues;
586 
587 			rxq->rspq.idx = qs;
588 			memset(&rxq->stats, 0, sizeof(rxq->stats));
589 		}
590 	}
591 
592 	/*
593 	 * Create the reverse mappings for the queues.
594 	 */
595 	s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id;
596 	s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id;
597 	IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq;
598 	for_each_port(adapter, pidx) {
599 		struct net_device *dev = adapter->port[pidx];
600 		struct port_info *pi = netdev_priv(dev);
601 		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
602 		struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
603 		int qs;
604 
605 		for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
606 			IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq;
607 			EQ_MAP(s, txq->q.abs_id) = &txq->q;
608 
609 			/*
610 			 * The FW_IQ_CMD doesn't return the Absolute Queue IDs
611 			 * for Free Lists but since all of the Egress Queues
612 			 * (including Free Lists) have Relative Queue IDs
613 			 * which are computed as Absolute - Base Queue ID, we
614 			 * can synthesize the Absolute Queue IDs for the Free
615 			 * Lists.  This is useful for debugging purposes when
616 			 * we want to dump Queue Contexts via the PF Driver.
617 			 */
618 			rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base;
619 			EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl;
620 		}
621 	}
622 	return 0;
623 
624 err_free_queues:
625 	t4vf_free_sge_resources(adapter);
626 	return err;
627 }
628 
629 /*
630  * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive
631  * queues.  We configure the RSS CPU lookup table to distribute to the number
632  * of HW receive queues, and the response queue lookup table to narrow that
633  * down to the response queues actually configured for each "port" (Virtual
634  * Interface).  We always configure the RSS mapping for all ports since the
635  * mapping table has plenty of entries.
636  */
637 static int setup_rss(struct adapter *adapter)
638 {
639 	int pidx;
640 
641 	for_each_port(adapter, pidx) {
642 		struct port_info *pi = adap2pinfo(adapter, pidx);
643 		struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
644 		u16 rss[MAX_PORT_QSETS];
645 		int qs, err;
646 
647 		for (qs = 0; qs < pi->nqsets; qs++)
648 			rss[qs] = rxq[qs].rspq.abs_id;
649 
650 		err = t4vf_config_rss_range(adapter, pi->viid,
651 					    0, pi->rss_size, rss, pi->nqsets);
652 		if (err)
653 			return err;
654 
655 		/*
656 		 * Perform Global RSS Mode-specific initialization.
657 		 */
658 		switch (adapter->params.rss.mode) {
659 		case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL:
660 			/*
661 			 * If Tunnel All Lookup isn't specified in the global
662 			 * RSS Configuration, then we need to specify a
663 			 * default Ingress Queue for any ingress packets which
664 			 * aren't hashed.  We'll use our first ingress queue
665 			 * ...
666 			 */
667 			if (!adapter->params.rss.u.basicvirtual.tnlalllookup) {
668 				union rss_vi_config config;
669 				err = t4vf_read_rss_vi_config(adapter,
670 							      pi->viid,
671 							      &config);
672 				if (err)
673 					return err;
674 				config.basicvirtual.defaultq =
675 					rxq[0].rspq.abs_id;
676 				err = t4vf_write_rss_vi_config(adapter,
677 							       pi->viid,
678 							       &config);
679 				if (err)
680 					return err;
681 			}
682 			break;
683 		}
684 	}
685 
686 	return 0;
687 }
688 
689 /*
690  * Bring the adapter up.  Called whenever we go from no "ports" open to having
691  * one open.  This function performs the actions necessary to make an adapter
692  * operational, such as completing the initialization of HW modules, and
693  * enabling interrupts.  Must be called with the rtnl lock held.  (Note that
694  * this is called "cxgb_up" in the PF Driver.)
695  */
696 static int adapter_up(struct adapter *adapter)
697 {
698 	int err;
699 
700 	/*
701 	 * If this is the first time we've been called, perform basic
702 	 * adapter setup.  Once we've done this, many of our adapter
703 	 * parameters can no longer be changed ...
704 	 */
705 	if ((adapter->flags & FULL_INIT_DONE) == 0) {
706 		err = setup_sge_queues(adapter);
707 		if (err)
708 			return err;
709 		err = setup_rss(adapter);
710 		if (err) {
711 			t4vf_free_sge_resources(adapter);
712 			return err;
713 		}
714 
715 		if (adapter->flags & USING_MSIX)
716 			name_msix_vecs(adapter);
717 		adapter->flags |= FULL_INIT_DONE;
718 	}
719 
720 	/*
721 	 * Acquire our interrupt resources.  We only support MSI-X and MSI.
722 	 */
723 	BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
724 	if (adapter->flags & USING_MSIX)
725 		err = request_msix_queue_irqs(adapter);
726 	else
727 		err = request_irq(adapter->pdev->irq,
728 				  t4vf_intr_handler(adapter), 0,
729 				  adapter->name, adapter);
730 	if (err) {
731 		dev_err(adapter->pdev_dev, "request_irq failed, err %d\n",
732 			err);
733 		return err;
734 	}
735 
736 	/*
737 	 * Enable NAPI ingress processing and return success.
738 	 */
739 	enable_rx(adapter);
740 	t4vf_sge_start(adapter);
741 
742 	/* Initialize hash mac addr list*/
743 	INIT_LIST_HEAD(&adapter->mac_hlist);
744 	return 0;
745 }
746 
747 /*
748  * Bring the adapter down.  Called whenever the last "port" (Virtual
749  * Interface) closed.  (Note that this routine is called "cxgb_down" in the PF
750  * Driver.)
751  */
752 static void adapter_down(struct adapter *adapter)
753 {
754 	/*
755 	 * Free interrupt resources.
756 	 */
757 	if (adapter->flags & USING_MSIX)
758 		free_msix_queue_irqs(adapter);
759 	else
760 		free_irq(adapter->pdev->irq, adapter);
761 
762 	/*
763 	 * Wait for NAPI handlers to finish.
764 	 */
765 	quiesce_rx(adapter);
766 }
767 
768 /*
769  * Start up a net device.
770  */
771 static int cxgb4vf_open(struct net_device *dev)
772 {
773 	int err;
774 	struct port_info *pi = netdev_priv(dev);
775 	struct adapter *adapter = pi->adapter;
776 
777 	/*
778 	 * If this is the first interface that we're opening on the "adapter",
779 	 * bring the "adapter" up now.
780 	 */
781 	if (adapter->open_device_map == 0) {
782 		err = adapter_up(adapter);
783 		if (err)
784 			return err;
785 	}
786 
787 	/*
788 	 * Note that this interface is up and start everything up ...
789 	 */
790 	err = link_start(dev);
791 	if (err)
792 		goto err_unwind;
793 
794 	netif_tx_start_all_queues(dev);
795 	set_bit(pi->port_id, &adapter->open_device_map);
796 	return 0;
797 
798 err_unwind:
799 	if (adapter->open_device_map == 0)
800 		adapter_down(adapter);
801 	return err;
802 }
803 
804 /*
805  * Shut down a net device.  This routine is called "cxgb_close" in the PF
806  * Driver ...
807  */
808 static int cxgb4vf_stop(struct net_device *dev)
809 {
810 	struct port_info *pi = netdev_priv(dev);
811 	struct adapter *adapter = pi->adapter;
812 
813 	netif_tx_stop_all_queues(dev);
814 	netif_carrier_off(dev);
815 	t4vf_enable_vi(adapter, pi->viid, false, false);
816 	pi->link_cfg.link_ok = 0;
817 
818 	clear_bit(pi->port_id, &adapter->open_device_map);
819 	if (adapter->open_device_map == 0)
820 		adapter_down(adapter);
821 	return 0;
822 }
823 
824 /*
825  * Translate our basic statistics into the standard "ifconfig" statistics.
826  */
827 static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
828 {
829 	struct t4vf_port_stats stats;
830 	struct port_info *pi = netdev2pinfo(dev);
831 	struct adapter *adapter = pi->adapter;
832 	struct net_device_stats *ns = &dev->stats;
833 	int err;
834 
835 	spin_lock(&adapter->stats_lock);
836 	err = t4vf_get_port_stats(adapter, pi->pidx, &stats);
837 	spin_unlock(&adapter->stats_lock);
838 
839 	memset(ns, 0, sizeof(*ns));
840 	if (err)
841 		return ns;
842 
843 	ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes +
844 			stats.tx_ucast_bytes + stats.tx_offload_bytes);
845 	ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames +
846 			  stats.tx_ucast_frames + stats.tx_offload_frames);
847 	ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes +
848 			stats.rx_ucast_bytes);
849 	ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames +
850 			  stats.rx_ucast_frames);
851 	ns->multicast = stats.rx_mcast_frames;
852 	ns->tx_errors = stats.tx_drop_frames;
853 	ns->rx_errors = stats.rx_err_frames;
854 
855 	return ns;
856 }
857 
858 static inline int cxgb4vf_set_addr_hash(struct port_info *pi)
859 {
860 	struct adapter *adapter = pi->adapter;
861 	u64 vec = 0;
862 	bool ucast = false;
863 	struct hash_mac_addr *entry;
864 
865 	/* Calculate the hash vector for the updated list and program it */
866 	list_for_each_entry(entry, &adapter->mac_hlist, list) {
867 		ucast |= is_unicast_ether_addr(entry->addr);
868 		vec |= (1ULL << hash_mac_addr(entry->addr));
869 	}
870 	return t4vf_set_addr_hash(adapter, pi->viid, ucast, vec, false);
871 }
872 
873 static int cxgb4vf_mac_sync(struct net_device *netdev, const u8 *mac_addr)
874 {
875 	struct port_info *pi = netdev_priv(netdev);
876 	struct adapter *adapter = pi->adapter;
877 	int ret;
878 	u64 mhash = 0;
879 	u64 uhash = 0;
880 	bool free = false;
881 	bool ucast = is_unicast_ether_addr(mac_addr);
882 	const u8 *maclist[1] = {mac_addr};
883 	struct hash_mac_addr *new_entry;
884 
885 	ret = t4vf_alloc_mac_filt(adapter, pi->viid, free, 1, maclist,
886 				  NULL, ucast ? &uhash : &mhash, false);
887 	if (ret < 0)
888 		goto out;
889 	/* if hash != 0, then add the addr to hash addr list
890 	 * so on the end we will calculate the hash for the
891 	 * list and program it
892 	 */
893 	if (uhash || mhash) {
894 		new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
895 		if (!new_entry)
896 			return -ENOMEM;
897 		ether_addr_copy(new_entry->addr, mac_addr);
898 		list_add_tail(&new_entry->list, &adapter->mac_hlist);
899 		ret = cxgb4vf_set_addr_hash(pi);
900 	}
901 out:
902 	return ret < 0 ? ret : 0;
903 }
904 
905 static int cxgb4vf_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
906 {
907 	struct port_info *pi = netdev_priv(netdev);
908 	struct adapter *adapter = pi->adapter;
909 	int ret;
910 	const u8 *maclist[1] = {mac_addr};
911 	struct hash_mac_addr *entry, *tmp;
912 
913 	/* If the MAC address to be removed is in the hash addr
914 	 * list, delete it from the list and update hash vector
915 	 */
916 	list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) {
917 		if (ether_addr_equal(entry->addr, mac_addr)) {
918 			list_del(&entry->list);
919 			kfree(entry);
920 			return cxgb4vf_set_addr_hash(pi);
921 		}
922 	}
923 
924 	ret = t4vf_free_mac_filt(adapter, pi->viid, 1, maclist, false);
925 	return ret < 0 ? -EINVAL : 0;
926 }
927 
928 /*
929  * Set RX properties of a port, such as promiscruity, address filters, and MTU.
930  * If @mtu is -1 it is left unchanged.
931  */
932 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
933 {
934 	struct port_info *pi = netdev_priv(dev);
935 
936 	__dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
937 	__dev_mc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
938 	return t4vf_set_rxmode(pi->adapter, pi->viid, -1,
939 			       (dev->flags & IFF_PROMISC) != 0,
940 			       (dev->flags & IFF_ALLMULTI) != 0,
941 			       1, -1, sleep_ok);
942 }
943 
944 /*
945  * Set the current receive modes on the device.
946  */
947 static void cxgb4vf_set_rxmode(struct net_device *dev)
948 {
949 	/* unfortunately we can't return errors to the stack */
950 	set_rxmode(dev, -1, false);
951 }
952 
953 /*
954  * Find the entry in the interrupt holdoff timer value array which comes
955  * closest to the specified interrupt holdoff value.
956  */
957 static int closest_timer(const struct sge *s, int us)
958 {
959 	int i, timer_idx = 0, min_delta = INT_MAX;
960 
961 	for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
962 		int delta = us - s->timer_val[i];
963 		if (delta < 0)
964 			delta = -delta;
965 		if (delta < min_delta) {
966 			min_delta = delta;
967 			timer_idx = i;
968 		}
969 	}
970 	return timer_idx;
971 }
972 
973 static int closest_thres(const struct sge *s, int thres)
974 {
975 	int i, delta, pktcnt_idx = 0, min_delta = INT_MAX;
976 
977 	for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
978 		delta = thres - s->counter_val[i];
979 		if (delta < 0)
980 			delta = -delta;
981 		if (delta < min_delta) {
982 			min_delta = delta;
983 			pktcnt_idx = i;
984 		}
985 	}
986 	return pktcnt_idx;
987 }
988 
989 /*
990  * Return a queue's interrupt hold-off time in us.  0 means no timer.
991  */
992 static unsigned int qtimer_val(const struct adapter *adapter,
993 			       const struct sge_rspq *rspq)
994 {
995 	unsigned int timer_idx = QINTR_TIMER_IDX_G(rspq->intr_params);
996 
997 	return timer_idx < SGE_NTIMERS
998 		? adapter->sge.timer_val[timer_idx]
999 		: 0;
1000 }
1001 
1002 /**
1003  *	set_rxq_intr_params - set a queue's interrupt holdoff parameters
1004  *	@adapter: the adapter
1005  *	@rspq: the RX response queue
1006  *	@us: the hold-off time in us, or 0 to disable timer
1007  *	@cnt: the hold-off packet count, or 0 to disable counter
1008  *
1009  *	Sets an RX response queue's interrupt hold-off time and packet count.
1010  *	At least one of the two needs to be enabled for the queue to generate
1011  *	interrupts.
1012  */
1013 static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
1014 			       unsigned int us, unsigned int cnt)
1015 {
1016 	unsigned int timer_idx;
1017 
1018 	/*
1019 	 * If both the interrupt holdoff timer and count are specified as
1020 	 * zero, default to a holdoff count of 1 ...
1021 	 */
1022 	if ((us | cnt) == 0)
1023 		cnt = 1;
1024 
1025 	/*
1026 	 * If an interrupt holdoff count has been specified, then find the
1027 	 * closest configured holdoff count and use that.  If the response
1028 	 * queue has already been created, then update its queue context
1029 	 * parameters ...
1030 	 */
1031 	if (cnt) {
1032 		int err;
1033 		u32 v, pktcnt_idx;
1034 
1035 		pktcnt_idx = closest_thres(&adapter->sge, cnt);
1036 		if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) {
1037 			v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1038 			    FW_PARAMS_PARAM_X_V(
1039 					FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1040 			    FW_PARAMS_PARAM_YZ_V(rspq->cntxt_id);
1041 			err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx);
1042 			if (err)
1043 				return err;
1044 		}
1045 		rspq->pktcnt_idx = pktcnt_idx;
1046 	}
1047 
1048 	/*
1049 	 * Compute the closest holdoff timer index from the supplied holdoff
1050 	 * timer value.
1051 	 */
1052 	timer_idx = (us == 0
1053 		     ? SGE_TIMER_RSTRT_CNTR
1054 		     : closest_timer(&adapter->sge, us));
1055 
1056 	/*
1057 	 * Update the response queue's interrupt coalescing parameters and
1058 	 * return success.
1059 	 */
1060 	rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
1061 			     QINTR_CNT_EN_V(cnt > 0));
1062 	return 0;
1063 }
1064 
1065 /*
1066  * Return a version number to identify the type of adapter.  The scheme is:
1067  * - bits 0..9: chip version
1068  * - bits 10..15: chip revision
1069  */
1070 static inline unsigned int mk_adap_vers(const struct adapter *adapter)
1071 {
1072 	/*
1073 	 * Chip version 4, revision 0x3f (cxgb4vf).
1074 	 */
1075 	return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10);
1076 }
1077 
1078 /*
1079  * Execute the specified ioctl command.
1080  */
1081 static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1082 {
1083 	int ret = 0;
1084 
1085 	switch (cmd) {
1086 	    /*
1087 	     * The VF Driver doesn't have access to any of the other
1088 	     * common Ethernet device ioctl()'s (like reading/writing
1089 	     * PHY registers, etc.
1090 	     */
1091 
1092 	default:
1093 		ret = -EOPNOTSUPP;
1094 		break;
1095 	}
1096 	return ret;
1097 }
1098 
1099 /*
1100  * Change the device's MTU.
1101  */
1102 static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
1103 {
1104 	int ret;
1105 	struct port_info *pi = netdev_priv(dev);
1106 
1107 	ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
1108 			      -1, -1, -1, -1, true);
1109 	if (!ret)
1110 		dev->mtu = new_mtu;
1111 	return ret;
1112 }
1113 
1114 static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
1115 	netdev_features_t features)
1116 {
1117 	/*
1118 	 * Since there is no support for separate rx/tx vlan accel
1119 	 * enable/disable make sure tx flag is always in same state as rx.
1120 	 */
1121 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
1122 		features |= NETIF_F_HW_VLAN_CTAG_TX;
1123 	else
1124 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
1125 
1126 	return features;
1127 }
1128 
1129 static int cxgb4vf_set_features(struct net_device *dev,
1130 	netdev_features_t features)
1131 {
1132 	struct port_info *pi = netdev_priv(dev);
1133 	netdev_features_t changed = dev->features ^ features;
1134 
1135 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1136 		t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
1137 				features & NETIF_F_HW_VLAN_CTAG_TX, 0);
1138 
1139 	return 0;
1140 }
1141 
1142 /*
1143  * Change the devices MAC address.
1144  */
1145 static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
1146 {
1147 	int ret;
1148 	struct sockaddr *addr = _addr;
1149 	struct port_info *pi = netdev_priv(dev);
1150 
1151 	if (!is_valid_ether_addr(addr->sa_data))
1152 		return -EADDRNOTAVAIL;
1153 
1154 	ret = t4vf_change_mac(pi->adapter, pi->viid, pi->xact_addr_filt,
1155 			      addr->sa_data, true);
1156 	if (ret < 0)
1157 		return ret;
1158 
1159 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1160 	pi->xact_addr_filt = ret;
1161 	return 0;
1162 }
1163 
1164 #ifdef CONFIG_NET_POLL_CONTROLLER
1165 /*
1166  * Poll all of our receive queues.  This is called outside of normal interrupt
1167  * context.
1168  */
1169 static void cxgb4vf_poll_controller(struct net_device *dev)
1170 {
1171 	struct port_info *pi = netdev_priv(dev);
1172 	struct adapter *adapter = pi->adapter;
1173 
1174 	if (adapter->flags & USING_MSIX) {
1175 		struct sge_eth_rxq *rxq;
1176 		int nqsets;
1177 
1178 		rxq = &adapter->sge.ethrxq[pi->first_qset];
1179 		for (nqsets = pi->nqsets; nqsets; nqsets--) {
1180 			t4vf_sge_intr_msix(0, &rxq->rspq);
1181 			rxq++;
1182 		}
1183 	} else
1184 		t4vf_intr_handler(adapter)(0, adapter);
1185 }
1186 #endif
1187 
1188 /*
1189  * Ethtool operations.
1190  * ===================
1191  *
1192  * Note that we don't support any ethtool operations which change the physical
1193  * state of the port to which we're linked.
1194  */
1195 
1196 /**
1197  *	from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool
1198  *	@port_type: Firmware Port Type
1199  *	@mod_type: Firmware Module Type
1200  *
1201  *	Translate Firmware Port/Module type to Ethtool Port Type.
1202  */
1203 static int from_fw_port_mod_type(enum fw_port_type port_type,
1204 				 enum fw_port_module_type mod_type)
1205 {
1206 	if (port_type == FW_PORT_TYPE_BT_SGMII ||
1207 	    port_type == FW_PORT_TYPE_BT_XFI ||
1208 	    port_type == FW_PORT_TYPE_BT_XAUI) {
1209 		return PORT_TP;
1210 	} else if (port_type == FW_PORT_TYPE_FIBER_XFI ||
1211 		   port_type == FW_PORT_TYPE_FIBER_XAUI) {
1212 		return PORT_FIBRE;
1213 	} else if (port_type == FW_PORT_TYPE_SFP ||
1214 		   port_type == FW_PORT_TYPE_QSFP_10G ||
1215 		   port_type == FW_PORT_TYPE_QSA ||
1216 		   port_type == FW_PORT_TYPE_QSFP ||
1217 		   port_type == FW_PORT_TYPE_CR4_QSFP ||
1218 		   port_type == FW_PORT_TYPE_CR_QSFP ||
1219 		   port_type == FW_PORT_TYPE_CR2_QSFP ||
1220 		   port_type == FW_PORT_TYPE_SFP28) {
1221 		if (mod_type == FW_PORT_MOD_TYPE_LR ||
1222 		    mod_type == FW_PORT_MOD_TYPE_SR ||
1223 		    mod_type == FW_PORT_MOD_TYPE_ER ||
1224 		    mod_type == FW_PORT_MOD_TYPE_LRM)
1225 			return PORT_FIBRE;
1226 		else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1227 			 mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1228 			return PORT_DA;
1229 		else
1230 			return PORT_OTHER;
1231 	} else if (port_type == FW_PORT_TYPE_KR4_100G ||
1232 		   port_type == FW_PORT_TYPE_KR_SFP28 ||
1233 		   port_type == FW_PORT_TYPE_KR_XLAUI) {
1234 		return PORT_NONE;
1235 	}
1236 
1237 	return PORT_OTHER;
1238 }
1239 
1240 /**
1241  *	fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask
1242  *	@port_type: Firmware Port Type
1243  *	@fw_caps: Firmware Port Capabilities
1244  *	@link_mode_mask: ethtool Link Mode Mask
1245  *
1246  *	Translate a Firmware Port Capabilities specification to an ethtool
1247  *	Link Mode Mask.
1248  */
1249 static void fw_caps_to_lmm(enum fw_port_type port_type,
1250 			   unsigned int fw_caps,
1251 			   unsigned long *link_mode_mask)
1252 {
1253 	#define SET_LMM(__lmm_name) \
1254 		__set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
1255 			  link_mode_mask)
1256 
1257 	#define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
1258 		do { \
1259 			if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
1260 				SET_LMM(__lmm_name); \
1261 		} while (0)
1262 
1263 	switch (port_type) {
1264 	case FW_PORT_TYPE_BT_SGMII:
1265 	case FW_PORT_TYPE_BT_XFI:
1266 	case FW_PORT_TYPE_BT_XAUI:
1267 		SET_LMM(TP);
1268 		FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full);
1269 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1270 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
1271 		break;
1272 
1273 	case FW_PORT_TYPE_KX4:
1274 	case FW_PORT_TYPE_KX:
1275 		SET_LMM(Backplane);
1276 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
1277 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
1278 		break;
1279 
1280 	case FW_PORT_TYPE_KR:
1281 		SET_LMM(Backplane);
1282 		SET_LMM(10000baseKR_Full);
1283 		break;
1284 
1285 	case FW_PORT_TYPE_BP_AP:
1286 		SET_LMM(Backplane);
1287 		SET_LMM(10000baseR_FEC);
1288 		SET_LMM(10000baseKR_Full);
1289 		SET_LMM(1000baseKX_Full);
1290 		break;
1291 
1292 	case FW_PORT_TYPE_BP4_AP:
1293 		SET_LMM(Backplane);
1294 		SET_LMM(10000baseR_FEC);
1295 		SET_LMM(10000baseKR_Full);
1296 		SET_LMM(1000baseKX_Full);
1297 		SET_LMM(10000baseKX4_Full);
1298 		break;
1299 
1300 	case FW_PORT_TYPE_FIBER_XFI:
1301 	case FW_PORT_TYPE_FIBER_XAUI:
1302 	case FW_PORT_TYPE_SFP:
1303 	case FW_PORT_TYPE_QSFP_10G:
1304 	case FW_PORT_TYPE_QSA:
1305 		SET_LMM(FIBRE);
1306 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1307 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
1308 		break;
1309 
1310 	case FW_PORT_TYPE_BP40_BA:
1311 	case FW_PORT_TYPE_QSFP:
1312 		SET_LMM(FIBRE);
1313 		SET_LMM(40000baseSR4_Full);
1314 		break;
1315 
1316 	case FW_PORT_TYPE_CR_QSFP:
1317 	case FW_PORT_TYPE_SFP28:
1318 		SET_LMM(FIBRE);
1319 		SET_LMM(25000baseCR_Full);
1320 		break;
1321 
1322 	case FW_PORT_TYPE_KR_SFP28:
1323 		SET_LMM(Backplane);
1324 		SET_LMM(25000baseKR_Full);
1325 		break;
1326 
1327 	case FW_PORT_TYPE_KR_XLAUI:
1328 		SET_LMM(Backplane);
1329 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
1330 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
1331 		FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full);
1332 		break;
1333 
1334 	case FW_PORT_TYPE_CR2_QSFP:
1335 		SET_LMM(FIBRE);
1336 		SET_LMM(50000baseSR2_Full);
1337 		break;
1338 
1339 	case FW_PORT_TYPE_KR4_100G:
1340 	case FW_PORT_TYPE_CR4_QSFP:
1341 		SET_LMM(FIBRE);
1342 		SET_LMM(100000baseCR4_Full);
1343 		break;
1344 
1345 	default:
1346 		break;
1347 	}
1348 
1349 	FW_CAPS_TO_LMM(ANEG, Autoneg);
1350 	FW_CAPS_TO_LMM(802_3_PAUSE, Pause);
1351 	FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause);
1352 
1353 	#undef FW_CAPS_TO_LMM
1354 	#undef SET_LMM
1355 }
1356 
1357 static int cxgb4vf_get_link_ksettings(struct net_device *dev,
1358 				  struct ethtool_link_ksettings *link_ksettings)
1359 {
1360 	struct port_info *pi = netdev_priv(dev);
1361 	struct ethtool_link_settings *base = &link_ksettings->base;
1362 
1363 	/* For the nonce, the Firmware doesn't send up Port State changes
1364 	 * when the Virtual Interface attached to the Port is down.  So
1365 	 * if it's down, let's grab any changes.
1366 	 */
1367 	if (!netif_running(dev))
1368 		(void)t4vf_update_port_info(pi);
1369 
1370 	ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
1371 	ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
1372 	ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
1373 
1374 	base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type);
1375 
1376 	if (pi->mdio_addr >= 0) {
1377 		base->phy_address = pi->mdio_addr;
1378 		base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII
1379 				      ? ETH_MDIO_SUPPORTS_C22
1380 				      : ETH_MDIO_SUPPORTS_C45);
1381 	} else {
1382 		base->phy_address = 255;
1383 		base->mdio_support = 0;
1384 	}
1385 
1386 	fw_caps_to_lmm(pi->port_type, pi->link_cfg.pcaps,
1387 		       link_ksettings->link_modes.supported);
1388 	fw_caps_to_lmm(pi->port_type, pi->link_cfg.acaps,
1389 		       link_ksettings->link_modes.advertising);
1390 	fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
1391 		       link_ksettings->link_modes.lp_advertising);
1392 
1393 	if (netif_carrier_ok(dev)) {
1394 		base->speed = pi->link_cfg.speed;
1395 		base->duplex = DUPLEX_FULL;
1396 	} else {
1397 		base->speed = SPEED_UNKNOWN;
1398 		base->duplex = DUPLEX_UNKNOWN;
1399 	}
1400 
1401 	base->autoneg = pi->link_cfg.autoneg;
1402 	if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)
1403 		ethtool_link_ksettings_add_link_mode(link_ksettings,
1404 						     supported, Autoneg);
1405 	if (pi->link_cfg.autoneg)
1406 		ethtool_link_ksettings_add_link_mode(link_ksettings,
1407 						     advertising, Autoneg);
1408 
1409 	return 0;
1410 }
1411 
1412 /* Translate the Firmware FEC value into the ethtool value. */
1413 static inline unsigned int fwcap_to_eth_fec(unsigned int fw_fec)
1414 {
1415 	unsigned int eth_fec = 0;
1416 
1417 	if (fw_fec & FW_PORT_CAP32_FEC_RS)
1418 		eth_fec |= ETHTOOL_FEC_RS;
1419 	if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
1420 		eth_fec |= ETHTOOL_FEC_BASER;
1421 
1422 	/* if nothing is set, then FEC is off */
1423 	if (!eth_fec)
1424 		eth_fec = ETHTOOL_FEC_OFF;
1425 
1426 	return eth_fec;
1427 }
1428 
1429 /* Translate Common Code FEC value into ethtool value. */
1430 static inline unsigned int cc_to_eth_fec(unsigned int cc_fec)
1431 {
1432 	unsigned int eth_fec = 0;
1433 
1434 	if (cc_fec & FEC_AUTO)
1435 		eth_fec |= ETHTOOL_FEC_AUTO;
1436 	if (cc_fec & FEC_RS)
1437 		eth_fec |= ETHTOOL_FEC_RS;
1438 	if (cc_fec & FEC_BASER_RS)
1439 		eth_fec |= ETHTOOL_FEC_BASER;
1440 
1441 	/* if nothing is set, then FEC is off */
1442 	if (!eth_fec)
1443 		eth_fec = ETHTOOL_FEC_OFF;
1444 
1445 	return eth_fec;
1446 }
1447 
1448 static int cxgb4vf_get_fecparam(struct net_device *dev,
1449 				struct ethtool_fecparam *fec)
1450 {
1451 	const struct port_info *pi = netdev_priv(dev);
1452 	const struct link_config *lc = &pi->link_cfg;
1453 
1454 	/* Translate the Firmware FEC Support into the ethtool value.  We
1455 	 * always support IEEE 802.3 "automatic" selection of Link FEC type if
1456 	 * any FEC is supported.
1457 	 */
1458 	fec->fec = fwcap_to_eth_fec(lc->pcaps);
1459 	if (fec->fec != ETHTOOL_FEC_OFF)
1460 		fec->fec |= ETHTOOL_FEC_AUTO;
1461 
1462 	/* Translate the current internal FEC parameters into the
1463 	 * ethtool values.
1464 	 */
1465 	fec->active_fec = cc_to_eth_fec(lc->fec);
1466 	return 0;
1467 }
1468 
1469 /*
1470  * Return our driver information.
1471  */
1472 static void cxgb4vf_get_drvinfo(struct net_device *dev,
1473 				struct ethtool_drvinfo *drvinfo)
1474 {
1475 	struct adapter *adapter = netdev2adap(dev);
1476 
1477 	strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
1478 	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
1479 	strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
1480 		sizeof(drvinfo->bus_info));
1481 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1482 		 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1483 		 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.fwrev),
1484 		 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.fwrev),
1485 		 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.fwrev),
1486 		 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.fwrev),
1487 		 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.tprev),
1488 		 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.tprev),
1489 		 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.tprev),
1490 		 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.tprev));
1491 }
1492 
1493 /*
1494  * Return current adapter message level.
1495  */
1496 static u32 cxgb4vf_get_msglevel(struct net_device *dev)
1497 {
1498 	return netdev2adap(dev)->msg_enable;
1499 }
1500 
1501 /*
1502  * Set current adapter message level.
1503  */
1504 static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
1505 {
1506 	netdev2adap(dev)->msg_enable = msglevel;
1507 }
1508 
1509 /*
1510  * Return the device's current Queue Set ring size parameters along with the
1511  * allowed maximum values.  Since ethtool doesn't understand the concept of
1512  * multi-queue devices, we just return the current values associated with the
1513  * first Queue Set.
1514  */
1515 static void cxgb4vf_get_ringparam(struct net_device *dev,
1516 				  struct ethtool_ringparam *rp)
1517 {
1518 	const struct port_info *pi = netdev_priv(dev);
1519 	const struct sge *s = &pi->adapter->sge;
1520 
1521 	rp->rx_max_pending = MAX_RX_BUFFERS;
1522 	rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1523 	rp->rx_jumbo_max_pending = 0;
1524 	rp->tx_max_pending = MAX_TXQ_ENTRIES;
1525 
1526 	rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID;
1527 	rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1528 	rp->rx_jumbo_pending = 0;
1529 	rp->tx_pending = s->ethtxq[pi->first_qset].q.size;
1530 }
1531 
1532 /*
1533  * Set the Queue Set ring size parameters for the device.  Again, since
1534  * ethtool doesn't allow for the concept of multiple queues per device, we'll
1535  * apply these new values across all of the Queue Sets associated with the
1536  * device -- after vetting them of course!
1537  */
1538 static int cxgb4vf_set_ringparam(struct net_device *dev,
1539 				 struct ethtool_ringparam *rp)
1540 {
1541 	const struct port_info *pi = netdev_priv(dev);
1542 	struct adapter *adapter = pi->adapter;
1543 	struct sge *s = &adapter->sge;
1544 	int qs;
1545 
1546 	if (rp->rx_pending > MAX_RX_BUFFERS ||
1547 	    rp->rx_jumbo_pending ||
1548 	    rp->tx_pending > MAX_TXQ_ENTRIES ||
1549 	    rp->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1550 	    rp->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1551 	    rp->rx_pending < MIN_FL_ENTRIES ||
1552 	    rp->tx_pending < MIN_TXQ_ENTRIES)
1553 		return -EINVAL;
1554 
1555 	if (adapter->flags & FULL_INIT_DONE)
1556 		return -EBUSY;
1557 
1558 	for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) {
1559 		s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID;
1560 		s->ethrxq[qs].rspq.size = rp->rx_mini_pending;
1561 		s->ethtxq[qs].q.size = rp->tx_pending;
1562 	}
1563 	return 0;
1564 }
1565 
1566 /*
1567  * Return the interrupt holdoff timer and count for the first Queue Set on the
1568  * device.  Our extension ioctl() (the cxgbtool interface) allows the
1569  * interrupt holdoff timer to be read on all of the device's Queue Sets.
1570  */
1571 static int cxgb4vf_get_coalesce(struct net_device *dev,
1572 				struct ethtool_coalesce *coalesce)
1573 {
1574 	const struct port_info *pi = netdev_priv(dev);
1575 	const struct adapter *adapter = pi->adapter;
1576 	const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq;
1577 
1578 	coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
1579 	coalesce->rx_max_coalesced_frames =
1580 		((rspq->intr_params & QINTR_CNT_EN_F)
1581 		 ? adapter->sge.counter_val[rspq->pktcnt_idx]
1582 		 : 0);
1583 	return 0;
1584 }
1585 
1586 /*
1587  * Set the RX interrupt holdoff timer and count for the first Queue Set on the
1588  * interface.  Our extension ioctl() (the cxgbtool interface) allows us to set
1589  * the interrupt holdoff timer on any of the device's Queue Sets.
1590  */
1591 static int cxgb4vf_set_coalesce(struct net_device *dev,
1592 				struct ethtool_coalesce *coalesce)
1593 {
1594 	const struct port_info *pi = netdev_priv(dev);
1595 	struct adapter *adapter = pi->adapter;
1596 
1597 	return set_rxq_intr_params(adapter,
1598 				   &adapter->sge.ethrxq[pi->first_qset].rspq,
1599 				   coalesce->rx_coalesce_usecs,
1600 				   coalesce->rx_max_coalesced_frames);
1601 }
1602 
1603 /*
1604  * Report current port link pause parameter settings.
1605  */
1606 static void cxgb4vf_get_pauseparam(struct net_device *dev,
1607 				   struct ethtool_pauseparam *pauseparam)
1608 {
1609 	struct port_info *pi = netdev_priv(dev);
1610 
1611 	pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1612 	pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0;
1613 	pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0;
1614 }
1615 
1616 /*
1617  * Identify the port by blinking the port's LED.
1618  */
1619 static int cxgb4vf_phys_id(struct net_device *dev,
1620 			   enum ethtool_phys_id_state state)
1621 {
1622 	unsigned int val;
1623 	struct port_info *pi = netdev_priv(dev);
1624 
1625 	if (state == ETHTOOL_ID_ACTIVE)
1626 		val = 0xffff;
1627 	else if (state == ETHTOOL_ID_INACTIVE)
1628 		val = 0;
1629 	else
1630 		return -EINVAL;
1631 
1632 	return t4vf_identify_port(pi->adapter, pi->viid, val);
1633 }
1634 
1635 /*
1636  * Port stats maintained per queue of the port.
1637  */
1638 struct queue_port_stats {
1639 	u64 tso;
1640 	u64 tx_csum;
1641 	u64 rx_csum;
1642 	u64 vlan_ex;
1643 	u64 vlan_ins;
1644 	u64 lro_pkts;
1645 	u64 lro_merged;
1646 };
1647 
1648 /*
1649  * Strings for the ETH_SS_STATS statistics set ("ethtool -S").  Note that
1650  * these need to match the order of statistics returned by
1651  * t4vf_get_port_stats().
1652  */
1653 static const char stats_strings[][ETH_GSTRING_LEN] = {
1654 	/*
1655 	 * These must match the layout of the t4vf_port_stats structure.
1656 	 */
1657 	"TxBroadcastBytes  ",
1658 	"TxBroadcastFrames ",
1659 	"TxMulticastBytes  ",
1660 	"TxMulticastFrames ",
1661 	"TxUnicastBytes    ",
1662 	"TxUnicastFrames   ",
1663 	"TxDroppedFrames   ",
1664 	"TxOffloadBytes    ",
1665 	"TxOffloadFrames   ",
1666 	"RxBroadcastBytes  ",
1667 	"RxBroadcastFrames ",
1668 	"RxMulticastBytes  ",
1669 	"RxMulticastFrames ",
1670 	"RxUnicastBytes    ",
1671 	"RxUnicastFrames   ",
1672 	"RxErrorFrames     ",
1673 
1674 	/*
1675 	 * These are accumulated per-queue statistics and must match the
1676 	 * order of the fields in the queue_port_stats structure.
1677 	 */
1678 	"TSO               ",
1679 	"TxCsumOffload     ",
1680 	"RxCsumGood        ",
1681 	"VLANextractions   ",
1682 	"VLANinsertions    ",
1683 	"GROPackets        ",
1684 	"GROMerged         ",
1685 };
1686 
1687 /*
1688  * Return the number of statistics in the specified statistics set.
1689  */
1690 static int cxgb4vf_get_sset_count(struct net_device *dev, int sset)
1691 {
1692 	switch (sset) {
1693 	case ETH_SS_STATS:
1694 		return ARRAY_SIZE(stats_strings);
1695 	default:
1696 		return -EOPNOTSUPP;
1697 	}
1698 	/*NOTREACHED*/
1699 }
1700 
1701 /*
1702  * Return the strings for the specified statistics set.
1703  */
1704 static void cxgb4vf_get_strings(struct net_device *dev,
1705 				u32 sset,
1706 				u8 *data)
1707 {
1708 	switch (sset) {
1709 	case ETH_SS_STATS:
1710 		memcpy(data, stats_strings, sizeof(stats_strings));
1711 		break;
1712 	}
1713 }
1714 
1715 /*
1716  * Small utility routine to accumulate queue statistics across the queues of
1717  * a "port".
1718  */
1719 static void collect_sge_port_stats(const struct adapter *adapter,
1720 				   const struct port_info *pi,
1721 				   struct queue_port_stats *stats)
1722 {
1723 	const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset];
1724 	const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
1725 	int qs;
1726 
1727 	memset(stats, 0, sizeof(*stats));
1728 	for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
1729 		stats->tso += txq->tso;
1730 		stats->tx_csum += txq->tx_cso;
1731 		stats->rx_csum += rxq->stats.rx_cso;
1732 		stats->vlan_ex += rxq->stats.vlan_ex;
1733 		stats->vlan_ins += txq->vlan_ins;
1734 		stats->lro_pkts += rxq->stats.lro_pkts;
1735 		stats->lro_merged += rxq->stats.lro_merged;
1736 	}
1737 }
1738 
1739 /*
1740  * Return the ETH_SS_STATS statistics set.
1741  */
1742 static void cxgb4vf_get_ethtool_stats(struct net_device *dev,
1743 				      struct ethtool_stats *stats,
1744 				      u64 *data)
1745 {
1746 	struct port_info *pi = netdev2pinfo(dev);
1747 	struct adapter *adapter = pi->adapter;
1748 	int err = t4vf_get_port_stats(adapter, pi->pidx,
1749 				      (struct t4vf_port_stats *)data);
1750 	if (err)
1751 		memset(data, 0, sizeof(struct t4vf_port_stats));
1752 
1753 	data += sizeof(struct t4vf_port_stats) / sizeof(u64);
1754 	collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1755 }
1756 
1757 /*
1758  * Return the size of our register map.
1759  */
1760 static int cxgb4vf_get_regs_len(struct net_device *dev)
1761 {
1762 	return T4VF_REGMAP_SIZE;
1763 }
1764 
1765 /*
1766  * Dump a block of registers, start to end inclusive, into a buffer.
1767  */
1768 static void reg_block_dump(struct adapter *adapter, void *regbuf,
1769 			   unsigned int start, unsigned int end)
1770 {
1771 	u32 *bp = regbuf + start - T4VF_REGMAP_START;
1772 
1773 	for ( ; start <= end; start += sizeof(u32)) {
1774 		/*
1775 		 * Avoid reading the Mailbox Control register since that
1776 		 * can trigger a Mailbox Ownership Arbitration cycle and
1777 		 * interfere with communication with the firmware.
1778 		 */
1779 		if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL)
1780 			*bp++ = 0xffff;
1781 		else
1782 			*bp++ = t4_read_reg(adapter, start);
1783 	}
1784 }
1785 
1786 /*
1787  * Copy our entire register map into the provided buffer.
1788  */
1789 static void cxgb4vf_get_regs(struct net_device *dev,
1790 			     struct ethtool_regs *regs,
1791 			     void *regbuf)
1792 {
1793 	struct adapter *adapter = netdev2adap(dev);
1794 
1795 	regs->version = mk_adap_vers(adapter);
1796 
1797 	/*
1798 	 * Fill in register buffer with our register map.
1799 	 */
1800 	memset(regbuf, 0, T4VF_REGMAP_SIZE);
1801 
1802 	reg_block_dump(adapter, regbuf,
1803 		       T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST,
1804 		       T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST);
1805 	reg_block_dump(adapter, regbuf,
1806 		       T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST,
1807 		       T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST);
1808 
1809 	/* T5 adds new registers in the PL Register map.
1810 	 */
1811 	reg_block_dump(adapter, regbuf,
1812 		       T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
1813 		       T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
1814 		       ? PL_VF_WHOAMI_A : PL_VF_REVISION_A));
1815 	reg_block_dump(adapter, regbuf,
1816 		       T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
1817 		       T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
1818 
1819 	reg_block_dump(adapter, regbuf,
1820 		       T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST,
1821 		       T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST);
1822 }
1823 
1824 /*
1825  * Report current Wake On LAN settings.
1826  */
1827 static void cxgb4vf_get_wol(struct net_device *dev,
1828 			    struct ethtool_wolinfo *wol)
1829 {
1830 	wol->supported = 0;
1831 	wol->wolopts = 0;
1832 	memset(&wol->sopass, 0, sizeof(wol->sopass));
1833 }
1834 
1835 /*
1836  * TCP Segmentation Offload flags which we support.
1837  */
1838 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1839 
1840 static const struct ethtool_ops cxgb4vf_ethtool_ops = {
1841 	.get_link_ksettings	= cxgb4vf_get_link_ksettings,
1842 	.get_fecparam		= cxgb4vf_get_fecparam,
1843 	.get_drvinfo		= cxgb4vf_get_drvinfo,
1844 	.get_msglevel		= cxgb4vf_get_msglevel,
1845 	.set_msglevel		= cxgb4vf_set_msglevel,
1846 	.get_ringparam		= cxgb4vf_get_ringparam,
1847 	.set_ringparam		= cxgb4vf_set_ringparam,
1848 	.get_coalesce		= cxgb4vf_get_coalesce,
1849 	.set_coalesce		= cxgb4vf_set_coalesce,
1850 	.get_pauseparam		= cxgb4vf_get_pauseparam,
1851 	.get_link		= ethtool_op_get_link,
1852 	.get_strings		= cxgb4vf_get_strings,
1853 	.set_phys_id		= cxgb4vf_phys_id,
1854 	.get_sset_count		= cxgb4vf_get_sset_count,
1855 	.get_ethtool_stats	= cxgb4vf_get_ethtool_stats,
1856 	.get_regs_len		= cxgb4vf_get_regs_len,
1857 	.get_regs		= cxgb4vf_get_regs,
1858 	.get_wol		= cxgb4vf_get_wol,
1859 };
1860 
1861 /*
1862  * /sys/kernel/debug/cxgb4vf support code and data.
1863  * ================================================
1864  */
1865 
1866 /*
1867  * Show Firmware Mailbox Command/Reply Log
1868  *
1869  * Note that we don't do any locking when dumping the Firmware Mailbox Log so
1870  * it's possible that we can catch things during a log update and therefore
1871  * see partially corrupted log entries.  But i9t's probably Good Enough(tm).
1872  * If we ever decide that we want to make sure that we're dumping a coherent
1873  * log, we'd need to perform locking in the mailbox logging and in
1874  * mboxlog_open() where we'd need to grab the entire mailbox log in one go
1875  * like we do for the Firmware Device Log.  But as stated above, meh ...
1876  */
1877 static int mboxlog_show(struct seq_file *seq, void *v)
1878 {
1879 	struct adapter *adapter = seq->private;
1880 	struct mbox_cmd_log *log = adapter->mbox_log;
1881 	struct mbox_cmd *entry;
1882 	int entry_idx, i;
1883 
1884 	if (v == SEQ_START_TOKEN) {
1885 		seq_printf(seq,
1886 			   "%10s  %15s  %5s  %5s  %s\n",
1887 			   "Seq#", "Tstamp", "Atime", "Etime",
1888 			   "Command/Reply");
1889 		return 0;
1890 	}
1891 
1892 	entry_idx = log->cursor + ((uintptr_t)v - 2);
1893 	if (entry_idx >= log->size)
1894 		entry_idx -= log->size;
1895 	entry = mbox_cmd_log_entry(log, entry_idx);
1896 
1897 	/* skip over unused entries */
1898 	if (entry->timestamp == 0)
1899 		return 0;
1900 
1901 	seq_printf(seq, "%10u  %15llu  %5d  %5d",
1902 		   entry->seqno, entry->timestamp,
1903 		   entry->access, entry->execute);
1904 	for (i = 0; i < MBOX_LEN / 8; i++) {
1905 		u64 flit = entry->cmd[i];
1906 		u32 hi = (u32)(flit >> 32);
1907 		u32 lo = (u32)flit;
1908 
1909 		seq_printf(seq, "  %08x %08x", hi, lo);
1910 	}
1911 	seq_puts(seq, "\n");
1912 	return 0;
1913 }
1914 
1915 static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos)
1916 {
1917 	struct adapter *adapter = seq->private;
1918 	struct mbox_cmd_log *log = adapter->mbox_log;
1919 
1920 	return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL);
1921 }
1922 
1923 static void *mboxlog_start(struct seq_file *seq, loff_t *pos)
1924 {
1925 	return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN;
1926 }
1927 
1928 static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos)
1929 {
1930 	++*pos;
1931 	return mboxlog_get_idx(seq, *pos);
1932 }
1933 
1934 static void mboxlog_stop(struct seq_file *seq, void *v)
1935 {
1936 }
1937 
1938 static const struct seq_operations mboxlog_seq_ops = {
1939 	.start = mboxlog_start,
1940 	.next  = mboxlog_next,
1941 	.stop  = mboxlog_stop,
1942 	.show  = mboxlog_show
1943 };
1944 
1945 static int mboxlog_open(struct inode *inode, struct file *file)
1946 {
1947 	int res = seq_open(file, &mboxlog_seq_ops);
1948 
1949 	if (!res) {
1950 		struct seq_file *seq = file->private_data;
1951 
1952 		seq->private = inode->i_private;
1953 	}
1954 	return res;
1955 }
1956 
1957 static const struct file_operations mboxlog_fops = {
1958 	.owner   = THIS_MODULE,
1959 	.open    = mboxlog_open,
1960 	.read    = seq_read,
1961 	.llseek  = seq_lseek,
1962 	.release = seq_release,
1963 };
1964 
1965 /*
1966  * Show SGE Queue Set information.  We display QPL Queues Sets per line.
1967  */
1968 #define QPL	4
1969 
1970 static int sge_qinfo_show(struct seq_file *seq, void *v)
1971 {
1972 	struct adapter *adapter = seq->private;
1973 	int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
1974 	int qs, r = (uintptr_t)v - 1;
1975 
1976 	if (r)
1977 		seq_putc(seq, '\n');
1978 
1979 	#define S3(fmt_spec, s, v) \
1980 		do {\
1981 			seq_printf(seq, "%-12s", s); \
1982 			for (qs = 0; qs < n; ++qs) \
1983 				seq_printf(seq, " %16" fmt_spec, v); \
1984 			seq_putc(seq, '\n'); \
1985 		} while (0)
1986 	#define S(s, v)		S3("s", s, v)
1987 	#define T(s, v)		S3("u", s, txq[qs].v)
1988 	#define R(s, v)		S3("u", s, rxq[qs].v)
1989 
1990 	if (r < eth_entries) {
1991 		const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
1992 		const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
1993 		int n = min(QPL, adapter->sge.ethqsets - QPL * r);
1994 
1995 		S("QType:", "Ethernet");
1996 		S("Interface:",
1997 		  (rxq[qs].rspq.netdev
1998 		   ? rxq[qs].rspq.netdev->name
1999 		   : "N/A"));
2000 		S3("d", "Port:",
2001 		   (rxq[qs].rspq.netdev
2002 		    ? ((struct port_info *)
2003 		       netdev_priv(rxq[qs].rspq.netdev))->port_id
2004 		    : -1));
2005 		T("TxQ ID:", q.abs_id);
2006 		T("TxQ size:", q.size);
2007 		T("TxQ inuse:", q.in_use);
2008 		T("TxQ PIdx:", q.pidx);
2009 		T("TxQ CIdx:", q.cidx);
2010 		R("RspQ ID:", rspq.abs_id);
2011 		R("RspQ size:", rspq.size);
2012 		R("RspQE size:", rspq.iqe_len);
2013 		S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq));
2014 		S3("u", "Intr pktcnt:",
2015 		   adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]);
2016 		R("RspQ CIdx:", rspq.cidx);
2017 		R("RspQ Gen:", rspq.gen);
2018 		R("FL ID:", fl.abs_id);
2019 		R("FL size:", fl.size - MIN_FL_RESID);
2020 		R("FL avail:", fl.avail);
2021 		R("FL PIdx:", fl.pidx);
2022 		R("FL CIdx:", fl.cidx);
2023 		return 0;
2024 	}
2025 
2026 	r -= eth_entries;
2027 	if (r == 0) {
2028 		const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
2029 
2030 		seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
2031 		seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
2032 		seq_printf(seq, "%-12s %16u\n", "Intr delay:",
2033 			   qtimer_val(adapter, evtq));
2034 		seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
2035 			   adapter->sge.counter_val[evtq->pktcnt_idx]);
2036 		seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx);
2037 		seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
2038 	} else if (r == 1) {
2039 		const struct sge_rspq *intrq = &adapter->sge.intrq;
2040 
2041 		seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue");
2042 		seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id);
2043 		seq_printf(seq, "%-12s %16u\n", "Intr delay:",
2044 			   qtimer_val(adapter, intrq));
2045 		seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
2046 			   adapter->sge.counter_val[intrq->pktcnt_idx]);
2047 		seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx);
2048 		seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen);
2049 	}
2050 
2051 	#undef R
2052 	#undef T
2053 	#undef S
2054 	#undef S3
2055 
2056 	return 0;
2057 }
2058 
2059 /*
2060  * Return the number of "entries" in our "file".  We group the multi-Queue
2061  * sections with QPL Queue Sets per "entry".  The sections of the output are:
2062  *
2063  *     Ethernet RX/TX Queue Sets
2064  *     Firmware Event Queue
2065  *     Forwarded Interrupt Queue (if in MSI mode)
2066  */
2067 static int sge_queue_entries(const struct adapter *adapter)
2068 {
2069 	return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
2070 		((adapter->flags & USING_MSI) != 0);
2071 }
2072 
2073 static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
2074 {
2075 	int entries = sge_queue_entries(seq->private);
2076 
2077 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2078 }
2079 
2080 static void sge_queue_stop(struct seq_file *seq, void *v)
2081 {
2082 }
2083 
2084 static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
2085 {
2086 	int entries = sge_queue_entries(seq->private);
2087 
2088 	++*pos;
2089 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2090 }
2091 
2092 static const struct seq_operations sge_qinfo_seq_ops = {
2093 	.start = sge_queue_start,
2094 	.next  = sge_queue_next,
2095 	.stop  = sge_queue_stop,
2096 	.show  = sge_qinfo_show
2097 };
2098 
2099 static int sge_qinfo_open(struct inode *inode, struct file *file)
2100 {
2101 	int res = seq_open(file, &sge_qinfo_seq_ops);
2102 
2103 	if (!res) {
2104 		struct seq_file *seq = file->private_data;
2105 		seq->private = inode->i_private;
2106 	}
2107 	return res;
2108 }
2109 
2110 static const struct file_operations sge_qinfo_debugfs_fops = {
2111 	.owner   = THIS_MODULE,
2112 	.open    = sge_qinfo_open,
2113 	.read    = seq_read,
2114 	.llseek  = seq_lseek,
2115 	.release = seq_release,
2116 };
2117 
2118 /*
2119  * Show SGE Queue Set statistics.  We display QPL Queues Sets per line.
2120  */
2121 #define QPL	4
2122 
2123 static int sge_qstats_show(struct seq_file *seq, void *v)
2124 {
2125 	struct adapter *adapter = seq->private;
2126 	int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
2127 	int qs, r = (uintptr_t)v - 1;
2128 
2129 	if (r)
2130 		seq_putc(seq, '\n');
2131 
2132 	#define S3(fmt, s, v) \
2133 		do { \
2134 			seq_printf(seq, "%-16s", s); \
2135 			for (qs = 0; qs < n; ++qs) \
2136 				seq_printf(seq, " %8" fmt, v); \
2137 			seq_putc(seq, '\n'); \
2138 		} while (0)
2139 	#define S(s, v)		S3("s", s, v)
2140 
2141 	#define T3(fmt, s, v)	S3(fmt, s, txq[qs].v)
2142 	#define T(s, v)		T3("lu", s, v)
2143 
2144 	#define R3(fmt, s, v)	S3(fmt, s, rxq[qs].v)
2145 	#define R(s, v)		R3("lu", s, v)
2146 
2147 	if (r < eth_entries) {
2148 		const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
2149 		const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
2150 		int n = min(QPL, adapter->sge.ethqsets - QPL * r);
2151 
2152 		S("QType:", "Ethernet");
2153 		S("Interface:",
2154 		  (rxq[qs].rspq.netdev
2155 		   ? rxq[qs].rspq.netdev->name
2156 		   : "N/A"));
2157 		R3("u", "RspQNullInts:", rspq.unhandled_irqs);
2158 		R("RxPackets:", stats.pkts);
2159 		R("RxCSO:", stats.rx_cso);
2160 		R("VLANxtract:", stats.vlan_ex);
2161 		R("LROmerged:", stats.lro_merged);
2162 		R("LROpackets:", stats.lro_pkts);
2163 		R("RxDrops:", stats.rx_drops);
2164 		T("TSO:", tso);
2165 		T("TxCSO:", tx_cso);
2166 		T("VLANins:", vlan_ins);
2167 		T("TxQFull:", q.stops);
2168 		T("TxQRestarts:", q.restarts);
2169 		T("TxMapErr:", mapping_err);
2170 		R("FLAllocErr:", fl.alloc_failed);
2171 		R("FLLrgAlcErr:", fl.large_alloc_failed);
2172 		R("FLStarving:", fl.starving);
2173 		return 0;
2174 	}
2175 
2176 	r -= eth_entries;
2177 	if (r == 0) {
2178 		const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
2179 
2180 		seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue");
2181 		seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
2182 			   evtq->unhandled_irqs);
2183 		seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx);
2184 		seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen);
2185 	} else if (r == 1) {
2186 		const struct sge_rspq *intrq = &adapter->sge.intrq;
2187 
2188 		seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue");
2189 		seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
2190 			   intrq->unhandled_irqs);
2191 		seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx);
2192 		seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen);
2193 	}
2194 
2195 	#undef R
2196 	#undef T
2197 	#undef S
2198 	#undef R3
2199 	#undef T3
2200 	#undef S3
2201 
2202 	return 0;
2203 }
2204 
2205 /*
2206  * Return the number of "entries" in our "file".  We group the multi-Queue
2207  * sections with QPL Queue Sets per "entry".  The sections of the output are:
2208  *
2209  *     Ethernet RX/TX Queue Sets
2210  *     Firmware Event Queue
2211  *     Forwarded Interrupt Queue (if in MSI mode)
2212  */
2213 static int sge_qstats_entries(const struct adapter *adapter)
2214 {
2215 	return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
2216 		((adapter->flags & USING_MSI) != 0);
2217 }
2218 
2219 static void *sge_qstats_start(struct seq_file *seq, loff_t *pos)
2220 {
2221 	int entries = sge_qstats_entries(seq->private);
2222 
2223 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2224 }
2225 
2226 static void sge_qstats_stop(struct seq_file *seq, void *v)
2227 {
2228 }
2229 
2230 static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos)
2231 {
2232 	int entries = sge_qstats_entries(seq->private);
2233 
2234 	(*pos)++;
2235 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2236 }
2237 
2238 static const struct seq_operations sge_qstats_seq_ops = {
2239 	.start = sge_qstats_start,
2240 	.next  = sge_qstats_next,
2241 	.stop  = sge_qstats_stop,
2242 	.show  = sge_qstats_show
2243 };
2244 
2245 static int sge_qstats_open(struct inode *inode, struct file *file)
2246 {
2247 	int res = seq_open(file, &sge_qstats_seq_ops);
2248 
2249 	if (res == 0) {
2250 		struct seq_file *seq = file->private_data;
2251 		seq->private = inode->i_private;
2252 	}
2253 	return res;
2254 }
2255 
2256 static const struct file_operations sge_qstats_proc_fops = {
2257 	.owner   = THIS_MODULE,
2258 	.open    = sge_qstats_open,
2259 	.read    = seq_read,
2260 	.llseek  = seq_lseek,
2261 	.release = seq_release,
2262 };
2263 
2264 /*
2265  * Show PCI-E SR-IOV Virtual Function Resource Limits.
2266  */
2267 static int resources_show(struct seq_file *seq, void *v)
2268 {
2269 	struct adapter *adapter = seq->private;
2270 	struct vf_resources *vfres = &adapter->params.vfres;
2271 
2272 	#define S(desc, fmt, var) \
2273 		seq_printf(seq, "%-60s " fmt "\n", \
2274 			   desc " (" #var "):", vfres->var)
2275 
2276 	S("Virtual Interfaces", "%d", nvi);
2277 	S("Egress Queues", "%d", neq);
2278 	S("Ethernet Control", "%d", nethctrl);
2279 	S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
2280 	S("Ingress Queues", "%d", niq);
2281 	S("Traffic Class", "%d", tc);
2282 	S("Port Access Rights Mask", "%#x", pmask);
2283 	S("MAC Address Filters", "%d", nexactf);
2284 	S("Firmware Command Read Capabilities", "%#x", r_caps);
2285 	S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
2286 
2287 	#undef S
2288 
2289 	return 0;
2290 }
2291 
2292 static int resources_open(struct inode *inode, struct file *file)
2293 {
2294 	return single_open(file, resources_show, inode->i_private);
2295 }
2296 
2297 static const struct file_operations resources_proc_fops = {
2298 	.owner   = THIS_MODULE,
2299 	.open    = resources_open,
2300 	.read    = seq_read,
2301 	.llseek  = seq_lseek,
2302 	.release = single_release,
2303 };
2304 
2305 /*
2306  * Show Virtual Interfaces.
2307  */
2308 static int interfaces_show(struct seq_file *seq, void *v)
2309 {
2310 	if (v == SEQ_START_TOKEN) {
2311 		seq_puts(seq, "Interface  Port   VIID\n");
2312 	} else {
2313 		struct adapter *adapter = seq->private;
2314 		int pidx = (uintptr_t)v - 2;
2315 		struct net_device *dev = adapter->port[pidx];
2316 		struct port_info *pi = netdev_priv(dev);
2317 
2318 		seq_printf(seq, "%9s  %4d  %#5x\n",
2319 			   dev->name, pi->port_id, pi->viid);
2320 	}
2321 	return 0;
2322 }
2323 
2324 static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos)
2325 {
2326 	return pos <= adapter->params.nports
2327 		? (void *)(uintptr_t)(pos + 1)
2328 		: NULL;
2329 }
2330 
2331 static void *interfaces_start(struct seq_file *seq, loff_t *pos)
2332 {
2333 	return *pos
2334 		? interfaces_get_idx(seq->private, *pos)
2335 		: SEQ_START_TOKEN;
2336 }
2337 
2338 static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos)
2339 {
2340 	(*pos)++;
2341 	return interfaces_get_idx(seq->private, *pos);
2342 }
2343 
2344 static void interfaces_stop(struct seq_file *seq, void *v)
2345 {
2346 }
2347 
2348 static const struct seq_operations interfaces_seq_ops = {
2349 	.start = interfaces_start,
2350 	.next  = interfaces_next,
2351 	.stop  = interfaces_stop,
2352 	.show  = interfaces_show
2353 };
2354 
2355 static int interfaces_open(struct inode *inode, struct file *file)
2356 {
2357 	int res = seq_open(file, &interfaces_seq_ops);
2358 
2359 	if (res == 0) {
2360 		struct seq_file *seq = file->private_data;
2361 		seq->private = inode->i_private;
2362 	}
2363 	return res;
2364 }
2365 
2366 static const struct file_operations interfaces_proc_fops = {
2367 	.owner   = THIS_MODULE,
2368 	.open    = interfaces_open,
2369 	.read    = seq_read,
2370 	.llseek  = seq_lseek,
2371 	.release = seq_release,
2372 };
2373 
2374 /*
2375  * /sys/kernel/debugfs/cxgb4vf/ files list.
2376  */
2377 struct cxgb4vf_debugfs_entry {
2378 	const char *name;		/* name of debugfs node */
2379 	umode_t mode;			/* file system mode */
2380 	const struct file_operations *fops;
2381 };
2382 
2383 static struct cxgb4vf_debugfs_entry debugfs_files[] = {
2384 	{ "mboxlog",    S_IRUGO, &mboxlog_fops },
2385 	{ "sge_qinfo",  S_IRUGO, &sge_qinfo_debugfs_fops },
2386 	{ "sge_qstats", S_IRUGO, &sge_qstats_proc_fops },
2387 	{ "resources",  S_IRUGO, &resources_proc_fops },
2388 	{ "interfaces", S_IRUGO, &interfaces_proc_fops },
2389 };
2390 
2391 /*
2392  * Module and device initialization and cleanup code.
2393  * ==================================================
2394  */
2395 
2396 /*
2397  * Set up out /sys/kernel/debug/cxgb4vf sub-nodes.  We assume that the
2398  * directory (debugfs_root) has already been set up.
2399  */
2400 static int setup_debugfs(struct adapter *adapter)
2401 {
2402 	int i;
2403 
2404 	BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2405 
2406 	/*
2407 	 * Debugfs support is best effort.
2408 	 */
2409 	for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
2410 		(void)debugfs_create_file(debugfs_files[i].name,
2411 				  debugfs_files[i].mode,
2412 				  adapter->debugfs_root,
2413 				  (void *)adapter,
2414 				  debugfs_files[i].fops);
2415 
2416 	return 0;
2417 }
2418 
2419 /*
2420  * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above.  We leave
2421  * it to our caller to tear down the directory (debugfs_root).
2422  */
2423 static void cleanup_debugfs(struct adapter *adapter)
2424 {
2425 	BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2426 
2427 	/*
2428 	 * Unlike our sister routine cleanup_proc(), we don't need to remove
2429 	 * individual entries because a call will be made to
2430 	 * debugfs_remove_recursive().  We just need to clean up any ancillary
2431 	 * persistent state.
2432 	 */
2433 	/* nothing to do */
2434 }
2435 
2436 /* Figure out how many Ports and Queue Sets we can support.  This depends on
2437  * knowing our Virtual Function Resources and may be called a second time if
2438  * we fall back from MSI-X to MSI Interrupt Mode.
2439  */
2440 static void size_nports_qsets(struct adapter *adapter)
2441 {
2442 	struct vf_resources *vfres = &adapter->params.vfres;
2443 	unsigned int ethqsets, pmask_nports;
2444 
2445 	/* The number of "ports" which we support is equal to the number of
2446 	 * Virtual Interfaces with which we've been provisioned.
2447 	 */
2448 	adapter->params.nports = vfres->nvi;
2449 	if (adapter->params.nports > MAX_NPORTS) {
2450 		dev_warn(adapter->pdev_dev, "only using %d of %d maximum"
2451 			 " allowed virtual interfaces\n", MAX_NPORTS,
2452 			 adapter->params.nports);
2453 		adapter->params.nports = MAX_NPORTS;
2454 	}
2455 
2456 	/* We may have been provisioned with more VIs than the number of
2457 	 * ports we're allowed to access (our Port Access Rights Mask).
2458 	 * This is obviously a configuration conflict but we don't want to
2459 	 * crash the kernel or anything silly just because of that.
2460 	 */
2461 	pmask_nports = hweight32(adapter->params.vfres.pmask);
2462 	if (pmask_nports < adapter->params.nports) {
2463 		dev_warn(adapter->pdev_dev, "only using %d of %d provisioned"
2464 			 " virtual interfaces; limited by Port Access Rights"
2465 			 " mask %#x\n", pmask_nports, adapter->params.nports,
2466 			 adapter->params.vfres.pmask);
2467 		adapter->params.nports = pmask_nports;
2468 	}
2469 
2470 	/* We need to reserve an Ingress Queue for the Asynchronous Firmware
2471 	 * Event Queue.  And if we're using MSI Interrupts, we'll also need to
2472 	 * reserve an Ingress Queue for a Forwarded Interrupts.
2473 	 *
2474 	 * The rest of the FL/Intr-capable ingress queues will be matched up
2475 	 * one-for-one with Ethernet/Control egress queues in order to form
2476 	 * "Queue Sets" which will be aportioned between the "ports".  For
2477 	 * each Queue Set, we'll need the ability to allocate two Egress
2478 	 * Contexts -- one for the Ingress Queue Free List and one for the TX
2479 	 * Ethernet Queue.
2480 	 *
2481 	 * Note that even if we're currently configured to use MSI-X
2482 	 * Interrupts (module variable msi == MSI_MSIX) we may get downgraded
2483 	 * to MSI Interrupts if we can't get enough MSI-X Interrupts.  If that
2484 	 * happens we'll need to adjust things later.
2485 	 */
2486 	ethqsets = vfres->niqflint - 1 - (msi == MSI_MSI);
2487 	if (vfres->nethctrl != ethqsets)
2488 		ethqsets = min(vfres->nethctrl, ethqsets);
2489 	if (vfres->neq < ethqsets*2)
2490 		ethqsets = vfres->neq/2;
2491 	if (ethqsets > MAX_ETH_QSETS)
2492 		ethqsets = MAX_ETH_QSETS;
2493 	adapter->sge.max_ethqsets = ethqsets;
2494 
2495 	if (adapter->sge.max_ethqsets < adapter->params.nports) {
2496 		dev_warn(adapter->pdev_dev, "only using %d of %d available"
2497 			 " virtual interfaces (too few Queue Sets)\n",
2498 			 adapter->sge.max_ethqsets, adapter->params.nports);
2499 		adapter->params.nports = adapter->sge.max_ethqsets;
2500 	}
2501 }
2502 
2503 /*
2504  * Perform early "adapter" initialization.  This is where we discover what
2505  * adapter parameters we're going to be using and initialize basic adapter
2506  * hardware support.
2507  */
2508 static int adap_init0(struct adapter *adapter)
2509 {
2510 	struct sge_params *sge_params = &adapter->params.sge;
2511 	struct sge *s = &adapter->sge;
2512 	int err;
2513 	u32 param, val = 0;
2514 
2515 	/*
2516 	 * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
2517 	 * 2.6.31 and later we can't call pci_reset_function() in order to
2518 	 * issue an FLR because of a self- deadlock on the device semaphore.
2519 	 * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
2520 	 * cases where they're needed -- for instance, some versions of KVM
2521 	 * fail to reset "Assigned Devices" when the VM reboots.  Therefore we
2522 	 * use the firmware based reset in order to reset any per function
2523 	 * state.
2524 	 */
2525 	err = t4vf_fw_reset(adapter);
2526 	if (err < 0) {
2527 		dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
2528 		return err;
2529 	}
2530 
2531 	/*
2532 	 * Grab basic operational parameters.  These will predominantly have
2533 	 * been set up by the Physical Function Driver or will be hard coded
2534 	 * into the adapter.  We just have to live with them ...  Note that
2535 	 * we _must_ get our VPD parameters before our SGE parameters because
2536 	 * we need to know the adapter's core clock from the VPD in order to
2537 	 * properly decode the SGE Timer Values.
2538 	 */
2539 	err = t4vf_get_dev_params(adapter);
2540 	if (err) {
2541 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2542 			" device parameters: err=%d\n", err);
2543 		return err;
2544 	}
2545 	err = t4vf_get_vpd_params(adapter);
2546 	if (err) {
2547 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2548 			" VPD parameters: err=%d\n", err);
2549 		return err;
2550 	}
2551 	err = t4vf_get_sge_params(adapter);
2552 	if (err) {
2553 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2554 			" SGE parameters: err=%d\n", err);
2555 		return err;
2556 	}
2557 	err = t4vf_get_rss_glb_config(adapter);
2558 	if (err) {
2559 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2560 			" RSS parameters: err=%d\n", err);
2561 		return err;
2562 	}
2563 	if (adapter->params.rss.mode !=
2564 	    FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2565 		dev_err(adapter->pdev_dev, "unable to operate with global RSS"
2566 			" mode %d\n", adapter->params.rss.mode);
2567 		return -EINVAL;
2568 	}
2569 	err = t4vf_sge_init(adapter);
2570 	if (err) {
2571 		dev_err(adapter->pdev_dev, "unable to use adapter parameters:"
2572 			" err=%d\n", err);
2573 		return err;
2574 	}
2575 
2576 	/* If we're running on newer firmware, let it know that we're
2577 	 * prepared to deal with encapsulated CPL messages.  Older
2578 	 * firmware won't understand this and we'll just get
2579 	 * unencapsulated messages ...
2580 	 */
2581 	param = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
2582 		FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
2583 	val = 1;
2584 	(void) t4vf_set_params(adapter, 1, &param, &val);
2585 
2586 	/*
2587 	 * Retrieve our RX interrupt holdoff timer values and counter
2588 	 * threshold values from the SGE parameters.
2589 	 */
2590 	s->timer_val[0] = core_ticks_to_us(adapter,
2591 		TIMERVALUE0_G(sge_params->sge_timer_value_0_and_1));
2592 	s->timer_val[1] = core_ticks_to_us(adapter,
2593 		TIMERVALUE1_G(sge_params->sge_timer_value_0_and_1));
2594 	s->timer_val[2] = core_ticks_to_us(adapter,
2595 		TIMERVALUE0_G(sge_params->sge_timer_value_2_and_3));
2596 	s->timer_val[3] = core_ticks_to_us(adapter,
2597 		TIMERVALUE1_G(sge_params->sge_timer_value_2_and_3));
2598 	s->timer_val[4] = core_ticks_to_us(adapter,
2599 		TIMERVALUE0_G(sge_params->sge_timer_value_4_and_5));
2600 	s->timer_val[5] = core_ticks_to_us(adapter,
2601 		TIMERVALUE1_G(sge_params->sge_timer_value_4_and_5));
2602 
2603 	s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold);
2604 	s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold);
2605 	s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold);
2606 	s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold);
2607 
2608 	/*
2609 	 * Grab our Virtual Interface resource allocation, extract the
2610 	 * features that we're interested in and do a bit of sanity testing on
2611 	 * what we discover.
2612 	 */
2613 	err = t4vf_get_vfres(adapter);
2614 	if (err) {
2615 		dev_err(adapter->pdev_dev, "unable to get virtual interface"
2616 			" resources: err=%d\n", err);
2617 		return err;
2618 	}
2619 
2620 	/* Check for various parameter sanity issues */
2621 	if (adapter->params.vfres.pmask == 0) {
2622 		dev_err(adapter->pdev_dev, "no port access configured\n"
2623 			"usable!\n");
2624 		return -EINVAL;
2625 	}
2626 	if (adapter->params.vfres.nvi == 0) {
2627 		dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
2628 			"usable!\n");
2629 		return -EINVAL;
2630 	}
2631 
2632 	/* Initialize nports and max_ethqsets now that we have our Virtual
2633 	 * Function Resources.
2634 	 */
2635 	size_nports_qsets(adapter);
2636 
2637 	return 0;
2638 }
2639 
2640 static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
2641 			     u8 pkt_cnt_idx, unsigned int size,
2642 			     unsigned int iqe_size)
2643 {
2644 	rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
2645 			     (pkt_cnt_idx < SGE_NCOUNTERS ?
2646 			      QINTR_CNT_EN_F : 0));
2647 	rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
2648 			    ? pkt_cnt_idx
2649 			    : 0);
2650 	rspq->iqe_len = iqe_size;
2651 	rspq->size = size;
2652 }
2653 
2654 /*
2655  * Perform default configuration of DMA queues depending on the number and
2656  * type of ports we found and the number of available CPUs.  Most settings can
2657  * be modified by the admin via ethtool and cxgbtool prior to the adapter
2658  * being brought up for the first time.
2659  */
2660 static void cfg_queues(struct adapter *adapter)
2661 {
2662 	struct sge *s = &adapter->sge;
2663 	int q10g, n10g, qidx, pidx, qs;
2664 	size_t iqe_size;
2665 
2666 	/*
2667 	 * We should not be called till we know how many Queue Sets we can
2668 	 * support.  In particular, this means that we need to know what kind
2669 	 * of interrupts we'll be using ...
2670 	 */
2671 	BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
2672 
2673 	/*
2674 	 * Count the number of 10GbE Virtual Interfaces that we have.
2675 	 */
2676 	n10g = 0;
2677 	for_each_port(adapter, pidx)
2678 		n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
2679 
2680 	/*
2681 	 * We default to 1 queue per non-10G port and up to # of cores queues
2682 	 * per 10G port.
2683 	 */
2684 	if (n10g == 0)
2685 		q10g = 0;
2686 	else {
2687 		int n1g = (adapter->params.nports - n10g);
2688 		q10g = (adapter->sge.max_ethqsets - n1g) / n10g;
2689 		if (q10g > num_online_cpus())
2690 			q10g = num_online_cpus();
2691 	}
2692 
2693 	/*
2694 	 * Allocate the "Queue Sets" to the various Virtual Interfaces.
2695 	 * The layout will be established in setup_sge_queues() when the
2696 	 * adapter is brough up for the first time.
2697 	 */
2698 	qidx = 0;
2699 	for_each_port(adapter, pidx) {
2700 		struct port_info *pi = adap2pinfo(adapter, pidx);
2701 
2702 		pi->first_qset = qidx;
2703 		pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
2704 		qidx += pi->nqsets;
2705 	}
2706 	s->ethqsets = qidx;
2707 
2708 	/*
2709 	 * The Ingress Queue Entry Size for our various Response Queues needs
2710 	 * to be big enough to accommodate the largest message we can receive
2711 	 * from the chip/firmware; which is 64 bytes ...
2712 	 */
2713 	iqe_size = 64;
2714 
2715 	/*
2716 	 * Set up default Queue Set parameters ...  Start off with the
2717 	 * shortest interrupt holdoff timer.
2718 	 */
2719 	for (qs = 0; qs < s->max_ethqsets; qs++) {
2720 		struct sge_eth_rxq *rxq = &s->ethrxq[qs];
2721 		struct sge_eth_txq *txq = &s->ethtxq[qs];
2722 
2723 		init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
2724 		rxq->fl.size = 72;
2725 		txq->q.size = 1024;
2726 	}
2727 
2728 	/*
2729 	 * The firmware event queue is used for link state changes and
2730 	 * notifications of TX DMA completions.
2731 	 */
2732 	init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size);
2733 
2734 	/*
2735 	 * The forwarded interrupt queue is used when we're in MSI interrupt
2736 	 * mode.  In this mode all interrupts associated with RX queues will
2737 	 * be forwarded to a single queue which we'll associate with our MSI
2738 	 * interrupt vector.  The messages dropped in the forwarded interrupt
2739 	 * queue will indicate which ingress queue needs servicing ...  This
2740 	 * queue needs to be large enough to accommodate all of the ingress
2741 	 * queues which are forwarding their interrupt (+1 to prevent the PIDX
2742 	 * from equalling the CIDX if every ingress queue has an outstanding
2743 	 * interrupt).  The queue doesn't need to be any larger because no
2744 	 * ingress queue will ever have more than one outstanding interrupt at
2745 	 * any time ...
2746 	 */
2747 	init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
2748 		  iqe_size);
2749 }
2750 
2751 /*
2752  * Reduce the number of Ethernet queues across all ports to at most n.
2753  * n provides at least one queue per port.
2754  */
2755 static void reduce_ethqs(struct adapter *adapter, int n)
2756 {
2757 	int i;
2758 	struct port_info *pi;
2759 
2760 	/*
2761 	 * While we have too many active Ether Queue Sets, interate across the
2762 	 * "ports" and reduce their individual Queue Set allocations.
2763 	 */
2764 	BUG_ON(n < adapter->params.nports);
2765 	while (n < adapter->sge.ethqsets)
2766 		for_each_port(adapter, i) {
2767 			pi = adap2pinfo(adapter, i);
2768 			if (pi->nqsets > 1) {
2769 				pi->nqsets--;
2770 				adapter->sge.ethqsets--;
2771 				if (adapter->sge.ethqsets <= n)
2772 					break;
2773 			}
2774 		}
2775 
2776 	/*
2777 	 * Reassign the starting Queue Sets for each of the "ports" ...
2778 	 */
2779 	n = 0;
2780 	for_each_port(adapter, i) {
2781 		pi = adap2pinfo(adapter, i);
2782 		pi->first_qset = n;
2783 		n += pi->nqsets;
2784 	}
2785 }
2786 
2787 /*
2788  * We need to grab enough MSI-X vectors to cover our interrupt needs.  Ideally
2789  * we get a separate MSI-X vector for every "Queue Set" plus any extras we
2790  * need.  Minimally we need one for every Virtual Interface plus those needed
2791  * for our "extras".  Note that this process may lower the maximum number of
2792  * allowed Queue Sets ...
2793  */
2794 static int enable_msix(struct adapter *adapter)
2795 {
2796 	int i, want, need, nqsets;
2797 	struct msix_entry entries[MSIX_ENTRIES];
2798 	struct sge *s = &adapter->sge;
2799 
2800 	for (i = 0; i < MSIX_ENTRIES; ++i)
2801 		entries[i].entry = i;
2802 
2803 	/*
2804 	 * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets"
2805 	 * plus those needed for our "extras" (for example, the firmware
2806 	 * message queue).  We _need_ at least one "Queue Set" per Virtual
2807 	 * Interface plus those needed for our "extras".  So now we get to see
2808 	 * if the song is right ...
2809 	 */
2810 	want = s->max_ethqsets + MSIX_EXTRAS;
2811 	need = adapter->params.nports + MSIX_EXTRAS;
2812 
2813 	want = pci_enable_msix_range(adapter->pdev, entries, need, want);
2814 	if (want < 0)
2815 		return want;
2816 
2817 	nqsets = want - MSIX_EXTRAS;
2818 	if (nqsets < s->max_ethqsets) {
2819 		dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
2820 			 " for %d Queue Sets\n", nqsets);
2821 		s->max_ethqsets = nqsets;
2822 		if (nqsets < s->ethqsets)
2823 			reduce_ethqs(adapter, nqsets);
2824 	}
2825 	for (i = 0; i < want; ++i)
2826 		adapter->msix_info[i].vec = entries[i].vector;
2827 
2828 	return 0;
2829 }
2830 
2831 static const struct net_device_ops cxgb4vf_netdev_ops	= {
2832 	.ndo_open		= cxgb4vf_open,
2833 	.ndo_stop		= cxgb4vf_stop,
2834 	.ndo_start_xmit		= t4vf_eth_xmit,
2835 	.ndo_get_stats		= cxgb4vf_get_stats,
2836 	.ndo_set_rx_mode	= cxgb4vf_set_rxmode,
2837 	.ndo_set_mac_address	= cxgb4vf_set_mac_addr,
2838 	.ndo_validate_addr	= eth_validate_addr,
2839 	.ndo_do_ioctl		= cxgb4vf_do_ioctl,
2840 	.ndo_change_mtu		= cxgb4vf_change_mtu,
2841 	.ndo_fix_features	= cxgb4vf_fix_features,
2842 	.ndo_set_features	= cxgb4vf_set_features,
2843 #ifdef CONFIG_NET_POLL_CONTROLLER
2844 	.ndo_poll_controller	= cxgb4vf_poll_controller,
2845 #endif
2846 };
2847 
2848 /*
2849  * "Probe" a device: initialize a device and construct all kernel and driver
2850  * state needed to manage the device.  This routine is called "init_one" in
2851  * the PF Driver ...
2852  */
2853 static int cxgb4vf_pci_probe(struct pci_dev *pdev,
2854 			     const struct pci_device_id *ent)
2855 {
2856 	int pci_using_dac;
2857 	int err, pidx;
2858 	unsigned int pmask;
2859 	struct adapter *adapter;
2860 	struct port_info *pi;
2861 	struct net_device *netdev;
2862 	unsigned int pf;
2863 
2864 	/*
2865 	 * Print our driver banner the first time we're called to initialize a
2866 	 * device.
2867 	 */
2868 	pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
2869 
2870 	/*
2871 	 * Initialize generic PCI device state.
2872 	 */
2873 	err = pci_enable_device(pdev);
2874 	if (err) {
2875 		dev_err(&pdev->dev, "cannot enable PCI device\n");
2876 		return err;
2877 	}
2878 
2879 	/*
2880 	 * Reserve PCI resources for the device.  If we can't get them some
2881 	 * other driver may have already claimed the device ...
2882 	 */
2883 	err = pci_request_regions(pdev, KBUILD_MODNAME);
2884 	if (err) {
2885 		dev_err(&pdev->dev, "cannot obtain PCI resources\n");
2886 		goto err_disable_device;
2887 	}
2888 
2889 	/*
2890 	 * Set up our DMA mask: try for 64-bit address masking first and
2891 	 * fall back to 32-bit if we can't get 64 bits ...
2892 	 */
2893 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2894 	if (err == 0) {
2895 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2896 		if (err) {
2897 			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for"
2898 				" coherent allocations\n");
2899 			goto err_release_regions;
2900 		}
2901 		pci_using_dac = 1;
2902 	} else {
2903 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2904 		if (err != 0) {
2905 			dev_err(&pdev->dev, "no usable DMA configuration\n");
2906 			goto err_release_regions;
2907 		}
2908 		pci_using_dac = 0;
2909 	}
2910 
2911 	/*
2912 	 * Enable bus mastering for the device ...
2913 	 */
2914 	pci_set_master(pdev);
2915 
2916 	/*
2917 	 * Allocate our adapter data structure and attach it to the device.
2918 	 */
2919 	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2920 	if (!adapter) {
2921 		err = -ENOMEM;
2922 		goto err_release_regions;
2923 	}
2924 	pci_set_drvdata(pdev, adapter);
2925 	adapter->pdev = pdev;
2926 	adapter->pdev_dev = &pdev->dev;
2927 
2928 	adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
2929 				    (sizeof(struct mbox_cmd) *
2930 				     T4VF_OS_LOG_MBOX_CMDS),
2931 				    GFP_KERNEL);
2932 	if (!adapter->mbox_log) {
2933 		err = -ENOMEM;
2934 		goto err_free_adapter;
2935 	}
2936 	adapter->mbox_log->size = T4VF_OS_LOG_MBOX_CMDS;
2937 
2938 	/*
2939 	 * Initialize SMP data synchronization resources.
2940 	 */
2941 	spin_lock_init(&adapter->stats_lock);
2942 	spin_lock_init(&adapter->mbox_lock);
2943 	INIT_LIST_HEAD(&adapter->mlist.list);
2944 
2945 	/*
2946 	 * Map our I/O registers in BAR0.
2947 	 */
2948 	adapter->regs = pci_ioremap_bar(pdev, 0);
2949 	if (!adapter->regs) {
2950 		dev_err(&pdev->dev, "cannot map device registers\n");
2951 		err = -ENOMEM;
2952 		goto err_free_adapter;
2953 	}
2954 
2955 	/* Wait for the device to become ready before proceeding ...
2956 	 */
2957 	err = t4vf_prep_adapter(adapter);
2958 	if (err) {
2959 		dev_err(adapter->pdev_dev, "device didn't become ready:"
2960 			" err=%d\n", err);
2961 		goto err_unmap_bar0;
2962 	}
2963 
2964 	/* For T5 and later we want to use the new BAR-based User Doorbells,
2965 	 * so we need to map BAR2 here ...
2966 	 */
2967 	if (!is_t4(adapter->params.chip)) {
2968 		adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
2969 					   pci_resource_len(pdev, 2));
2970 		if (!adapter->bar2) {
2971 			dev_err(adapter->pdev_dev, "cannot map BAR2 doorbells\n");
2972 			err = -ENOMEM;
2973 			goto err_unmap_bar0;
2974 		}
2975 	}
2976 	/*
2977 	 * Initialize adapter level features.
2978 	 */
2979 	adapter->name = pci_name(pdev);
2980 	adapter->msg_enable = DFLT_MSG_ENABLE;
2981 
2982 	/* If possible, we use PCIe Relaxed Ordering Attribute to deliver
2983 	 * Ingress Packet Data to Free List Buffers in order to allow for
2984 	 * chipset performance optimizations between the Root Complex and
2985 	 * Memory Controllers.  (Messages to the associated Ingress Queue
2986 	 * notifying new Packet Placement in the Free Lists Buffers will be
2987 	 * send without the Relaxed Ordering Attribute thus guaranteeing that
2988 	 * all preceding PCIe Transaction Layer Packets will be processed
2989 	 * first.)  But some Root Complexes have various issues with Upstream
2990 	 * Transaction Layer Packets with the Relaxed Ordering Attribute set.
2991 	 * The PCIe devices which under the Root Complexes will be cleared the
2992 	 * Relaxed Ordering bit in the configuration space, So we check our
2993 	 * PCIe configuration space to see if it's flagged with advice against
2994 	 * using Relaxed Ordering.
2995 	 */
2996 	if (!pcie_relaxed_ordering_enabled(pdev))
2997 		adapter->flags |= ROOT_NO_RELAXED_ORDERING;
2998 
2999 	err = adap_init0(adapter);
3000 	if (err)
3001 		goto err_unmap_bar;
3002 
3003 	/*
3004 	 * Allocate our "adapter ports" and stitch everything together.
3005 	 */
3006 	pmask = adapter->params.vfres.pmask;
3007 	pf = t4vf_get_pf_from_vf(adapter);
3008 	for_each_port(adapter, pidx) {
3009 		int port_id, viid;
3010 		u8 mac[ETH_ALEN];
3011 		unsigned int naddr = 1;
3012 
3013 		/*
3014 		 * We simplistically allocate our virtual interfaces
3015 		 * sequentially across the port numbers to which we have
3016 		 * access rights.  This should be configurable in some manner
3017 		 * ...
3018 		 */
3019 		if (pmask == 0)
3020 			break;
3021 		port_id = ffs(pmask) - 1;
3022 		pmask &= ~(1 << port_id);
3023 		viid = t4vf_alloc_vi(adapter, port_id);
3024 		if (viid < 0) {
3025 			dev_err(&pdev->dev, "cannot allocate VI for port %d:"
3026 				" err=%d\n", port_id, viid);
3027 			err = viid;
3028 			goto err_free_dev;
3029 		}
3030 
3031 		/*
3032 		 * Allocate our network device and stitch things together.
3033 		 */
3034 		netdev = alloc_etherdev_mq(sizeof(struct port_info),
3035 					   MAX_PORT_QSETS);
3036 		if (netdev == NULL) {
3037 			t4vf_free_vi(adapter, viid);
3038 			err = -ENOMEM;
3039 			goto err_free_dev;
3040 		}
3041 		adapter->port[pidx] = netdev;
3042 		SET_NETDEV_DEV(netdev, &pdev->dev);
3043 		pi = netdev_priv(netdev);
3044 		pi->adapter = adapter;
3045 		pi->pidx = pidx;
3046 		pi->port_id = port_id;
3047 		pi->viid = viid;
3048 
3049 		/*
3050 		 * Initialize the starting state of our "port" and register
3051 		 * it.
3052 		 */
3053 		pi->xact_addr_filt = -1;
3054 		netif_carrier_off(netdev);
3055 		netdev->irq = pdev->irq;
3056 
3057 		netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
3058 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3059 			NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
3060 		netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
3061 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3062 			NETIF_F_HIGHDMA;
3063 		netdev->features = netdev->hw_features |
3064 				   NETIF_F_HW_VLAN_CTAG_TX;
3065 		if (pci_using_dac)
3066 			netdev->features |= NETIF_F_HIGHDMA;
3067 
3068 		netdev->priv_flags |= IFF_UNICAST_FLT;
3069 		netdev->min_mtu = 81;
3070 		netdev->max_mtu = ETH_MAX_MTU;
3071 
3072 		netdev->netdev_ops = &cxgb4vf_netdev_ops;
3073 		netdev->ethtool_ops = &cxgb4vf_ethtool_ops;
3074 		netdev->dev_port = pi->port_id;
3075 
3076 		/*
3077 		 * Initialize the hardware/software state for the port.
3078 		 */
3079 		err = t4vf_port_init(adapter, pidx);
3080 		if (err) {
3081 			dev_err(&pdev->dev, "cannot initialize port %d\n",
3082 				pidx);
3083 			goto err_free_dev;
3084 		}
3085 
3086 		err = t4vf_get_vf_mac_acl(adapter, pf, &naddr, mac);
3087 		if (err) {
3088 			dev_err(&pdev->dev,
3089 				"unable to determine MAC ACL address, "
3090 				"continuing anyway.. (status %d)\n", err);
3091 		} else if (naddr && adapter->params.vfres.nvi == 1) {
3092 			struct sockaddr addr;
3093 
3094 			ether_addr_copy(addr.sa_data, mac);
3095 			err = cxgb4vf_set_mac_addr(netdev, &addr);
3096 			if (err) {
3097 				dev_err(&pdev->dev,
3098 					"unable to set MAC address %pM\n",
3099 					mac);
3100 				goto err_free_dev;
3101 			}
3102 			dev_info(&pdev->dev,
3103 				 "Using assigned MAC ACL: %pM\n", mac);
3104 		}
3105 	}
3106 
3107 	/* See what interrupts we'll be using.  If we've been configured to
3108 	 * use MSI-X interrupts, try to enable them but fall back to using
3109 	 * MSI interrupts if we can't enable MSI-X interrupts.  If we can't
3110 	 * get MSI interrupts we bail with the error.
3111 	 */
3112 	if (msi == MSI_MSIX && enable_msix(adapter) == 0)
3113 		adapter->flags |= USING_MSIX;
3114 	else {
3115 		if (msi == MSI_MSIX) {
3116 			dev_info(adapter->pdev_dev,
3117 				 "Unable to use MSI-X Interrupts; falling "
3118 				 "back to MSI Interrupts\n");
3119 
3120 			/* We're going to need a Forwarded Interrupt Queue so
3121 			 * that may cut into how many Queue Sets we can
3122 			 * support.
3123 			 */
3124 			msi = MSI_MSI;
3125 			size_nports_qsets(adapter);
3126 		}
3127 		err = pci_enable_msi(pdev);
3128 		if (err) {
3129 			dev_err(&pdev->dev, "Unable to allocate MSI Interrupts;"
3130 				" err=%d\n", err);
3131 			goto err_free_dev;
3132 		}
3133 		adapter->flags |= USING_MSI;
3134 	}
3135 
3136 	/* Now that we know how many "ports" we have and what interrupt
3137 	 * mechanism we're going to use, we can configure our queue resources.
3138 	 */
3139 	cfg_queues(adapter);
3140 
3141 	/*
3142 	 * The "card" is now ready to go.  If any errors occur during device
3143 	 * registration we do not fail the whole "card" but rather proceed
3144 	 * only with the ports we manage to register successfully.  However we
3145 	 * must register at least one net device.
3146 	 */
3147 	for_each_port(adapter, pidx) {
3148 		struct port_info *pi = netdev_priv(adapter->port[pidx]);
3149 		netdev = adapter->port[pidx];
3150 		if (netdev == NULL)
3151 			continue;
3152 
3153 		netif_set_real_num_tx_queues(netdev, pi->nqsets);
3154 		netif_set_real_num_rx_queues(netdev, pi->nqsets);
3155 
3156 		err = register_netdev(netdev);
3157 		if (err) {
3158 			dev_warn(&pdev->dev, "cannot register net device %s,"
3159 				 " skipping\n", netdev->name);
3160 			continue;
3161 		}
3162 
3163 		set_bit(pidx, &adapter->registered_device_map);
3164 	}
3165 	if (adapter->registered_device_map == 0) {
3166 		dev_err(&pdev->dev, "could not register any net devices\n");
3167 		goto err_disable_interrupts;
3168 	}
3169 
3170 	/*
3171 	 * Set up our debugfs entries.
3172 	 */
3173 	if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
3174 		adapter->debugfs_root =
3175 			debugfs_create_dir(pci_name(pdev),
3176 					   cxgb4vf_debugfs_root);
3177 		if (IS_ERR_OR_NULL(adapter->debugfs_root))
3178 			dev_warn(&pdev->dev, "could not create debugfs"
3179 				 " directory");
3180 		else
3181 			setup_debugfs(adapter);
3182 	}
3183 
3184 	/*
3185 	 * Print a short notice on the existence and configuration of the new
3186 	 * VF network device ...
3187 	 */
3188 	for_each_port(adapter, pidx) {
3189 		dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n",
3190 			 adapter->port[pidx]->name,
3191 			 (adapter->flags & USING_MSIX) ? "MSI-X" :
3192 			 (adapter->flags & USING_MSI)  ? "MSI" : "");
3193 	}
3194 
3195 	/*
3196 	 * Return success!
3197 	 */
3198 	return 0;
3199 
3200 	/*
3201 	 * Error recovery and exit code.  Unwind state that's been created
3202 	 * so far and return the error.
3203 	 */
3204 err_disable_interrupts:
3205 	if (adapter->flags & USING_MSIX) {
3206 		pci_disable_msix(adapter->pdev);
3207 		adapter->flags &= ~USING_MSIX;
3208 	} else if (adapter->flags & USING_MSI) {
3209 		pci_disable_msi(adapter->pdev);
3210 		adapter->flags &= ~USING_MSI;
3211 	}
3212 
3213 err_free_dev:
3214 	for_each_port(adapter, pidx) {
3215 		netdev = adapter->port[pidx];
3216 		if (netdev == NULL)
3217 			continue;
3218 		pi = netdev_priv(netdev);
3219 		t4vf_free_vi(adapter, pi->viid);
3220 		if (test_bit(pidx, &adapter->registered_device_map))
3221 			unregister_netdev(netdev);
3222 		free_netdev(netdev);
3223 	}
3224 
3225 err_unmap_bar:
3226 	if (!is_t4(adapter->params.chip))
3227 		iounmap(adapter->bar2);
3228 
3229 err_unmap_bar0:
3230 	iounmap(adapter->regs);
3231 
3232 err_free_adapter:
3233 	kfree(adapter->mbox_log);
3234 	kfree(adapter);
3235 
3236 err_release_regions:
3237 	pci_release_regions(pdev);
3238 	pci_clear_master(pdev);
3239 
3240 err_disable_device:
3241 	pci_disable_device(pdev);
3242 
3243 	return err;
3244 }
3245 
3246 /*
3247  * "Remove" a device: tear down all kernel and driver state created in the
3248  * "probe" routine and quiesce the device (disable interrupts, etc.).  (Note
3249  * that this is called "remove_one" in the PF Driver.)
3250  */
3251 static void cxgb4vf_pci_remove(struct pci_dev *pdev)
3252 {
3253 	struct adapter *adapter = pci_get_drvdata(pdev);
3254 
3255 	/*
3256 	 * Tear down driver state associated with device.
3257 	 */
3258 	if (adapter) {
3259 		int pidx;
3260 
3261 		/*
3262 		 * Stop all of our activity.  Unregister network port,
3263 		 * disable interrupts, etc.
3264 		 */
3265 		for_each_port(adapter, pidx)
3266 			if (test_bit(pidx, &adapter->registered_device_map))
3267 				unregister_netdev(adapter->port[pidx]);
3268 		t4vf_sge_stop(adapter);
3269 		if (adapter->flags & USING_MSIX) {
3270 			pci_disable_msix(adapter->pdev);
3271 			adapter->flags &= ~USING_MSIX;
3272 		} else if (adapter->flags & USING_MSI) {
3273 			pci_disable_msi(adapter->pdev);
3274 			adapter->flags &= ~USING_MSI;
3275 		}
3276 
3277 		/*
3278 		 * Tear down our debugfs entries.
3279 		 */
3280 		if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
3281 			cleanup_debugfs(adapter);
3282 			debugfs_remove_recursive(adapter->debugfs_root);
3283 		}
3284 
3285 		/*
3286 		 * Free all of the various resources which we've acquired ...
3287 		 */
3288 		t4vf_free_sge_resources(adapter);
3289 		for_each_port(adapter, pidx) {
3290 			struct net_device *netdev = adapter->port[pidx];
3291 			struct port_info *pi;
3292 
3293 			if (netdev == NULL)
3294 				continue;
3295 
3296 			pi = netdev_priv(netdev);
3297 			t4vf_free_vi(adapter, pi->viid);
3298 			free_netdev(netdev);
3299 		}
3300 		iounmap(adapter->regs);
3301 		if (!is_t4(adapter->params.chip))
3302 			iounmap(adapter->bar2);
3303 		kfree(adapter->mbox_log);
3304 		kfree(adapter);
3305 	}
3306 
3307 	/*
3308 	 * Disable the device and release its PCI resources.
3309 	 */
3310 	pci_disable_device(pdev);
3311 	pci_clear_master(pdev);
3312 	pci_release_regions(pdev);
3313 }
3314 
3315 /*
3316  * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
3317  * delivery.
3318  */
3319 static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
3320 {
3321 	struct adapter *adapter;
3322 	int pidx;
3323 
3324 	adapter = pci_get_drvdata(pdev);
3325 	if (!adapter)
3326 		return;
3327 
3328 	/* Disable all Virtual Interfaces.  This will shut down the
3329 	 * delivery of all ingress packets into the chip for these
3330 	 * Virtual Interfaces.
3331 	 */
3332 	for_each_port(adapter, pidx)
3333 		if (test_bit(pidx, &adapter->registered_device_map))
3334 			unregister_netdev(adapter->port[pidx]);
3335 
3336 	/* Free up all Queues which will prevent further DMA and
3337 	 * Interrupts allowing various internal pathways to drain.
3338 	 */
3339 	t4vf_sge_stop(adapter);
3340 	if (adapter->flags & USING_MSIX) {
3341 		pci_disable_msix(adapter->pdev);
3342 		adapter->flags &= ~USING_MSIX;
3343 	} else if (adapter->flags & USING_MSI) {
3344 		pci_disable_msi(adapter->pdev);
3345 		adapter->flags &= ~USING_MSI;
3346 	}
3347 
3348 	/*
3349 	 * Free up all Queues which will prevent further DMA and
3350 	 * Interrupts allowing various internal pathways to drain.
3351 	 */
3352 	t4vf_free_sge_resources(adapter);
3353 	pci_set_drvdata(pdev, NULL);
3354 }
3355 
3356 /* Macros needed to support the PCI Device ID Table ...
3357  */
3358 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
3359 	static const struct pci_device_id cxgb4vf_pci_tbl[] = {
3360 #define CH_PCI_DEVICE_ID_FUNCTION	0x8
3361 
3362 #define CH_PCI_ID_TABLE_ENTRY(devid) \
3363 		{ PCI_VDEVICE(CHELSIO, (devid)), 0 }
3364 
3365 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } }
3366 
3367 #include "../cxgb4/t4_pci_id_tbl.h"
3368 
3369 MODULE_DESCRIPTION(DRV_DESC);
3370 MODULE_AUTHOR("Chelsio Communications");
3371 MODULE_LICENSE("Dual BSD/GPL");
3372 MODULE_VERSION(DRV_VERSION);
3373 MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl);
3374 
3375 static struct pci_driver cxgb4vf_driver = {
3376 	.name		= KBUILD_MODNAME,
3377 	.id_table	= cxgb4vf_pci_tbl,
3378 	.probe		= cxgb4vf_pci_probe,
3379 	.remove		= cxgb4vf_pci_remove,
3380 	.shutdown	= cxgb4vf_pci_shutdown,
3381 };
3382 
3383 /*
3384  * Initialize global driver state.
3385  */
3386 static int __init cxgb4vf_module_init(void)
3387 {
3388 	int ret;
3389 
3390 	/*
3391 	 * Vet our module parameters.
3392 	 */
3393 	if (msi != MSI_MSIX && msi != MSI_MSI) {
3394 		pr_warn("bad module parameter msi=%d; must be %d (MSI-X or MSI) or %d (MSI)\n",
3395 			msi, MSI_MSIX, MSI_MSI);
3396 		return -EINVAL;
3397 	}
3398 
3399 	/* Debugfs support is optional, just warn if this fails */
3400 	cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
3401 	if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
3402 		pr_warn("could not create debugfs entry, continuing\n");
3403 
3404 	ret = pci_register_driver(&cxgb4vf_driver);
3405 	if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
3406 		debugfs_remove(cxgb4vf_debugfs_root);
3407 	return ret;
3408 }
3409 
3410 /*
3411  * Tear down global driver state.
3412  */
3413 static void __exit cxgb4vf_module_exit(void)
3414 {
3415 	pci_unregister_driver(&cxgb4vf_driver);
3416 	debugfs_remove(cxgb4vf_debugfs_root);
3417 }
3418 
3419 module_init(cxgb4vf_module_init);
3420 module_exit(cxgb4vf_module_exit);
3421