1 /*
2  * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3  * driver for Linux.
4  *
5  * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 
38 #include <linux/module.h>
39 #include <linux/moduleparam.h>
40 #include <linux/init.h>
41 #include <linux/pci.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/debugfs.h>
46 #include <linux/ethtool.h>
47 #include <linux/mdio.h>
48 
49 #include "t4vf_common.h"
50 #include "t4vf_defs.h"
51 
52 #include "../cxgb4/t4_regs.h"
53 #include "../cxgb4/t4_msg.h"
54 
55 /*
56  * Generic information about the driver.
57  */
58 #define DRV_VERSION "2.0.0-ko"
59 #define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver"
60 
61 /*
62  * Module Parameters.
63  * ==================
64  */
65 
66 /*
67  * Default ethtool "message level" for adapters.
68  */
69 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
70 			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
71 			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
72 
73 /*
74  * The driver uses the best interrupt scheme available on a platform in the
75  * order MSI-X then MSI.  This parameter determines which of these schemes the
76  * driver may consider as follows:
77  *
78  *     msi = 2: choose from among MSI-X and MSI
79  *     msi = 1: only consider MSI interrupts
80  *
81  * Note that unlike the Physical Function driver, this Virtual Function driver
82  * does _not_ support legacy INTx interrupts (this limitation is mandated by
83  * the PCI-E SR-IOV standard).
84  */
85 #define MSI_MSIX	2
86 #define MSI_MSI		1
87 #define MSI_DEFAULT	MSI_MSIX
88 
89 static int msi = MSI_DEFAULT;
90 
91 module_param(msi, int, 0644);
92 MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI");
93 
94 /*
95  * Fundamental constants.
96  * ======================
97  */
98 
99 enum {
100 	MAX_TXQ_ENTRIES		= 16384,
101 	MAX_RSPQ_ENTRIES	= 16384,
102 	MAX_RX_BUFFERS		= 16384,
103 
104 	MIN_TXQ_ENTRIES		= 32,
105 	MIN_RSPQ_ENTRIES	= 128,
106 	MIN_FL_ENTRIES		= 16,
107 
108 	/*
109 	 * For purposes of manipulating the Free List size we need to
110 	 * recognize that Free Lists are actually Egress Queues (the host
111 	 * produces free buffers which the hardware consumes), Egress Queues
112 	 * indices are all in units of Egress Context Units bytes, and free
113 	 * list entries are 64-bit PCI DMA addresses.  And since the state of
114 	 * the Producer Index == the Consumer Index implies an EMPTY list, we
115 	 * always have at least one Egress Unit's worth of Free List entries
116 	 * unused.  See sge.c for more details ...
117 	 */
118 	EQ_UNIT = SGE_EQ_IDXSIZE,
119 	FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
120 	MIN_FL_RESID = FL_PER_EQ_UNIT,
121 };
122 
123 /*
124  * Global driver state.
125  * ====================
126  */
127 
128 static struct dentry *cxgb4vf_debugfs_root;
129 
130 /*
131  * OS "Callback" functions.
132  * ========================
133  */
134 
135 /*
136  * The link status has changed on the indicated "port" (Virtual Interface).
137  */
138 void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
139 {
140 	struct net_device *dev = adapter->port[pidx];
141 
142 	/*
143 	 * If the port is disabled or the current recorded "link up"
144 	 * status matches the new status, just return.
145 	 */
146 	if (!netif_running(dev) || link_ok == netif_carrier_ok(dev))
147 		return;
148 
149 	/*
150 	 * Tell the OS that the link status has changed and print a short
151 	 * informative message on the console about the event.
152 	 */
153 	if (link_ok) {
154 		const char *s;
155 		const char *fc;
156 		const struct port_info *pi = netdev_priv(dev);
157 
158 		netif_carrier_on(dev);
159 
160 		switch (pi->link_cfg.speed) {
161 		case 100:
162 			s = "100Mbps";
163 			break;
164 		case 1000:
165 			s = "1Gbps";
166 			break;
167 		case 10000:
168 			s = "10Gbps";
169 			break;
170 		case 25000:
171 			s = "25Gbps";
172 			break;
173 		case 40000:
174 			s = "40Gbps";
175 			break;
176 		case 100000:
177 			s = "100Gbps";
178 			break;
179 
180 		default:
181 			s = "unknown";
182 			break;
183 		}
184 
185 		switch ((int)pi->link_cfg.fc) {
186 		case PAUSE_RX:
187 			fc = "RX";
188 			break;
189 
190 		case PAUSE_TX:
191 			fc = "TX";
192 			break;
193 
194 		case PAUSE_RX | PAUSE_TX:
195 			fc = "RX/TX";
196 			break;
197 
198 		default:
199 			fc = "no";
200 			break;
201 		}
202 
203 		netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc);
204 	} else {
205 		netif_carrier_off(dev);
206 		netdev_info(dev, "link down\n");
207 	}
208 }
209 
210 /*
211  * THe port module type has changed on the indicated "port" (Virtual
212  * Interface).
213  */
214 void t4vf_os_portmod_changed(struct adapter *adapter, int pidx)
215 {
216 	static const char * const mod_str[] = {
217 		NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
218 	};
219 	const struct net_device *dev = adapter->port[pidx];
220 	const struct port_info *pi = netdev_priv(dev);
221 
222 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
223 		dev_info(adapter->pdev_dev, "%s: port module unplugged\n",
224 			 dev->name);
225 	else if (pi->mod_type < ARRAY_SIZE(mod_str))
226 		dev_info(adapter->pdev_dev, "%s: %s port module inserted\n",
227 			 dev->name, mod_str[pi->mod_type]);
228 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
229 		dev_info(adapter->pdev_dev, "%s: unsupported optical port "
230 			 "module inserted\n", dev->name);
231 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
232 		dev_info(adapter->pdev_dev, "%s: unknown port module inserted,"
233 			 "forcing TWINAX\n", dev->name);
234 	else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
235 		dev_info(adapter->pdev_dev, "%s: transceiver module error\n",
236 			 dev->name);
237 	else
238 		dev_info(adapter->pdev_dev, "%s: unknown module type %d "
239 			 "inserted\n", dev->name, pi->mod_type);
240 }
241 
242 /*
243  * Net device operations.
244  * ======================
245  */
246 
247 
248 
249 
250 /*
251  * Perform the MAC and PHY actions needed to enable a "port" (Virtual
252  * Interface).
253  */
254 static int link_start(struct net_device *dev)
255 {
256 	int ret;
257 	struct port_info *pi = netdev_priv(dev);
258 
259 	/*
260 	 * We do not set address filters and promiscuity here, the stack does
261 	 * that step explicitly. Enable vlan accel.
262 	 */
263 	ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1,
264 			      true);
265 	if (ret == 0) {
266 		ret = t4vf_change_mac(pi->adapter, pi->viid,
267 				      pi->xact_addr_filt, dev->dev_addr, true);
268 		if (ret >= 0) {
269 			pi->xact_addr_filt = ret;
270 			ret = 0;
271 		}
272 	}
273 
274 	/*
275 	 * We don't need to actually "start the link" itself since the
276 	 * firmware will do that for us when the first Virtual Interface
277 	 * is enabled on a port.
278 	 */
279 	if (ret == 0)
280 		ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true);
281 	return ret;
282 }
283 
284 /*
285  * Name the MSI-X interrupts.
286  */
287 static void name_msix_vecs(struct adapter *adapter)
288 {
289 	int namelen = sizeof(adapter->msix_info[0].desc) - 1;
290 	int pidx;
291 
292 	/*
293 	 * Firmware events.
294 	 */
295 	snprintf(adapter->msix_info[MSIX_FW].desc, namelen,
296 		 "%s-FWeventq", adapter->name);
297 	adapter->msix_info[MSIX_FW].desc[namelen] = 0;
298 
299 	/*
300 	 * Ethernet queues.
301 	 */
302 	for_each_port(adapter, pidx) {
303 		struct net_device *dev = adapter->port[pidx];
304 		const struct port_info *pi = netdev_priv(dev);
305 		int qs, msi;
306 
307 		for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
308 			snprintf(adapter->msix_info[msi].desc, namelen,
309 				 "%s-%d", dev->name, qs);
310 			adapter->msix_info[msi].desc[namelen] = 0;
311 		}
312 	}
313 }
314 
315 /*
316  * Request all of our MSI-X resources.
317  */
318 static int request_msix_queue_irqs(struct adapter *adapter)
319 {
320 	struct sge *s = &adapter->sge;
321 	int rxq, msi, err;
322 
323 	/*
324 	 * Firmware events.
325 	 */
326 	err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix,
327 			  0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq);
328 	if (err)
329 		return err;
330 
331 	/*
332 	 * Ethernet queues.
333 	 */
334 	msi = MSIX_IQFLINT;
335 	for_each_ethrxq(s, rxq) {
336 		err = request_irq(adapter->msix_info[msi].vec,
337 				  t4vf_sge_intr_msix, 0,
338 				  adapter->msix_info[msi].desc,
339 				  &s->ethrxq[rxq].rspq);
340 		if (err)
341 			goto err_free_irqs;
342 		msi++;
343 	}
344 	return 0;
345 
346 err_free_irqs:
347 	while (--rxq >= 0)
348 		free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
349 	free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
350 	return err;
351 }
352 
353 /*
354  * Free our MSI-X resources.
355  */
356 static void free_msix_queue_irqs(struct adapter *adapter)
357 {
358 	struct sge *s = &adapter->sge;
359 	int rxq, msi;
360 
361 	free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
362 	msi = MSIX_IQFLINT;
363 	for_each_ethrxq(s, rxq)
364 		free_irq(adapter->msix_info[msi++].vec,
365 			 &s->ethrxq[rxq].rspq);
366 }
367 
368 /*
369  * Turn on NAPI and start up interrupts on a response queue.
370  */
371 static void qenable(struct sge_rspq *rspq)
372 {
373 	napi_enable(&rspq->napi);
374 
375 	/*
376 	 * 0-increment the Going To Sleep register to start the timer and
377 	 * enable interrupts.
378 	 */
379 	t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
380 		     CIDXINC_V(0) |
381 		     SEINTARM_V(rspq->intr_params) |
382 		     INGRESSQID_V(rspq->cntxt_id));
383 }
384 
385 /*
386  * Enable NAPI scheduling and interrupt generation for all Receive Queues.
387  */
388 static void enable_rx(struct adapter *adapter)
389 {
390 	int rxq;
391 	struct sge *s = &adapter->sge;
392 
393 	for_each_ethrxq(s, rxq)
394 		qenable(&s->ethrxq[rxq].rspq);
395 	qenable(&s->fw_evtq);
396 
397 	/*
398 	 * The interrupt queue doesn't use NAPI so we do the 0-increment of
399 	 * its Going To Sleep register here to get it started.
400 	 */
401 	if (adapter->flags & USING_MSI)
402 		t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
403 			     CIDXINC_V(0) |
404 			     SEINTARM_V(s->intrq.intr_params) |
405 			     INGRESSQID_V(s->intrq.cntxt_id));
406 
407 }
408 
409 /*
410  * Wait until all NAPI handlers are descheduled.
411  */
412 static void quiesce_rx(struct adapter *adapter)
413 {
414 	struct sge *s = &adapter->sge;
415 	int rxq;
416 
417 	for_each_ethrxq(s, rxq)
418 		napi_disable(&s->ethrxq[rxq].rspq.napi);
419 	napi_disable(&s->fw_evtq.napi);
420 }
421 
422 /*
423  * Response queue handler for the firmware event queue.
424  */
425 static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
426 			  const struct pkt_gl *gl)
427 {
428 	/*
429 	 * Extract response opcode and get pointer to CPL message body.
430 	 */
431 	struct adapter *adapter = rspq->adapter;
432 	u8 opcode = ((const struct rss_header *)rsp)->opcode;
433 	void *cpl = (void *)(rsp + 1);
434 
435 	switch (opcode) {
436 	case CPL_FW6_MSG: {
437 		/*
438 		 * We've received an asynchronous message from the firmware.
439 		 */
440 		const struct cpl_fw6_msg *fw_msg = cpl;
441 		if (fw_msg->type == FW6_TYPE_CMD_RPL)
442 			t4vf_handle_fw_rpl(adapter, fw_msg->data);
443 		break;
444 	}
445 
446 	case CPL_FW4_MSG: {
447 		/* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
448 		 */
449 		const struct cpl_sge_egr_update *p = (void *)(rsp + 3);
450 		opcode = CPL_OPCODE_G(ntohl(p->opcode_qid));
451 		if (opcode != CPL_SGE_EGR_UPDATE) {
452 			dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
453 				, opcode);
454 			break;
455 		}
456 		cpl = (void *)p;
457 		/*FALLTHROUGH*/
458 	}
459 
460 	case CPL_SGE_EGR_UPDATE: {
461 		/*
462 		 * We've received an Egress Queue Status Update message.  We
463 		 * get these, if the SGE is configured to send these when the
464 		 * firmware passes certain points in processing our TX
465 		 * Ethernet Queue or if we make an explicit request for one.
466 		 * We use these updates to determine when we may need to
467 		 * restart a TX Ethernet Queue which was stopped for lack of
468 		 * free TX Queue Descriptors ...
469 		 */
470 		const struct cpl_sge_egr_update *p = cpl;
471 		unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid));
472 		struct sge *s = &adapter->sge;
473 		struct sge_txq *tq;
474 		struct sge_eth_txq *txq;
475 		unsigned int eq_idx;
476 
477 		/*
478 		 * Perform sanity checking on the Queue ID to make sure it
479 		 * really refers to one of our TX Ethernet Egress Queues which
480 		 * is active and matches the queue's ID.  None of these error
481 		 * conditions should ever happen so we may want to either make
482 		 * them fatal and/or conditionalized under DEBUG.
483 		 */
484 		eq_idx = EQ_IDX(s, qid);
485 		if (unlikely(eq_idx >= MAX_EGRQ)) {
486 			dev_err(adapter->pdev_dev,
487 				"Egress Update QID %d out of range\n", qid);
488 			break;
489 		}
490 		tq = s->egr_map[eq_idx];
491 		if (unlikely(tq == NULL)) {
492 			dev_err(adapter->pdev_dev,
493 				"Egress Update QID %d TXQ=NULL\n", qid);
494 			break;
495 		}
496 		txq = container_of(tq, struct sge_eth_txq, q);
497 		if (unlikely(tq->abs_id != qid)) {
498 			dev_err(adapter->pdev_dev,
499 				"Egress Update QID %d refers to TXQ %d\n",
500 				qid, tq->abs_id);
501 			break;
502 		}
503 
504 		/*
505 		 * Restart a stopped TX Queue which has less than half of its
506 		 * TX ring in use ...
507 		 */
508 		txq->q.restarts++;
509 		netif_tx_wake_queue(txq->txq);
510 		break;
511 	}
512 
513 	default:
514 		dev_err(adapter->pdev_dev,
515 			"unexpected CPL %#x on FW event queue\n", opcode);
516 	}
517 
518 	return 0;
519 }
520 
521 /*
522  * Allocate SGE TX/RX response queues.  Determine how many sets of SGE queues
523  * to use and initializes them.  We support multiple "Queue Sets" per port if
524  * we have MSI-X, otherwise just one queue set per port.
525  */
526 static int setup_sge_queues(struct adapter *adapter)
527 {
528 	struct sge *s = &adapter->sge;
529 	int err, pidx, msix;
530 
531 	/*
532 	 * Clear "Queue Set" Free List Starving and TX Queue Mapping Error
533 	 * state.
534 	 */
535 	bitmap_zero(s->starving_fl, MAX_EGRQ);
536 
537 	/*
538 	 * If we're using MSI interrupt mode we need to set up a "forwarded
539 	 * interrupt" queue which we'll set up with our MSI vector.  The rest
540 	 * of the ingress queues will be set up to forward their interrupts to
541 	 * this queue ...  This must be first since t4vf_sge_alloc_rxq() uses
542 	 * the intrq's queue ID as the interrupt forwarding queue for the
543 	 * subsequent calls ...
544 	 */
545 	if (adapter->flags & USING_MSI) {
546 		err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false,
547 					 adapter->port[0], 0, NULL, NULL);
548 		if (err)
549 			goto err_free_queues;
550 	}
551 
552 	/*
553 	 * Allocate our ingress queue for asynchronous firmware messages.
554 	 */
555 	err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0],
556 				 MSIX_FW, NULL, fwevtq_handler);
557 	if (err)
558 		goto err_free_queues;
559 
560 	/*
561 	 * Allocate each "port"'s initial Queue Sets.  These can be changed
562 	 * later on ... up to the point where any interface on the adapter is
563 	 * brought up at which point lots of things get nailed down
564 	 * permanently ...
565 	 */
566 	msix = MSIX_IQFLINT;
567 	for_each_port(adapter, pidx) {
568 		struct net_device *dev = adapter->port[pidx];
569 		struct port_info *pi = netdev_priv(dev);
570 		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
571 		struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
572 		int qs;
573 
574 		for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
575 			err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false,
576 						 dev, msix++,
577 						 &rxq->fl, t4vf_ethrx_handler);
578 			if (err)
579 				goto err_free_queues;
580 
581 			err = t4vf_sge_alloc_eth_txq(adapter, txq, dev,
582 					     netdev_get_tx_queue(dev, qs),
583 					     s->fw_evtq.cntxt_id);
584 			if (err)
585 				goto err_free_queues;
586 
587 			rxq->rspq.idx = qs;
588 			memset(&rxq->stats, 0, sizeof(rxq->stats));
589 		}
590 	}
591 
592 	/*
593 	 * Create the reverse mappings for the queues.
594 	 */
595 	s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id;
596 	s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id;
597 	IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq;
598 	for_each_port(adapter, pidx) {
599 		struct net_device *dev = adapter->port[pidx];
600 		struct port_info *pi = netdev_priv(dev);
601 		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
602 		struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
603 		int qs;
604 
605 		for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
606 			IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq;
607 			EQ_MAP(s, txq->q.abs_id) = &txq->q;
608 
609 			/*
610 			 * The FW_IQ_CMD doesn't return the Absolute Queue IDs
611 			 * for Free Lists but since all of the Egress Queues
612 			 * (including Free Lists) have Relative Queue IDs
613 			 * which are computed as Absolute - Base Queue ID, we
614 			 * can synthesize the Absolute Queue IDs for the Free
615 			 * Lists.  This is useful for debugging purposes when
616 			 * we want to dump Queue Contexts via the PF Driver.
617 			 */
618 			rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base;
619 			EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl;
620 		}
621 	}
622 	return 0;
623 
624 err_free_queues:
625 	t4vf_free_sge_resources(adapter);
626 	return err;
627 }
628 
629 /*
630  * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive
631  * queues.  We configure the RSS CPU lookup table to distribute to the number
632  * of HW receive queues, and the response queue lookup table to narrow that
633  * down to the response queues actually configured for each "port" (Virtual
634  * Interface).  We always configure the RSS mapping for all ports since the
635  * mapping table has plenty of entries.
636  */
637 static int setup_rss(struct adapter *adapter)
638 {
639 	int pidx;
640 
641 	for_each_port(adapter, pidx) {
642 		struct port_info *pi = adap2pinfo(adapter, pidx);
643 		struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
644 		u16 rss[MAX_PORT_QSETS];
645 		int qs, err;
646 
647 		for (qs = 0; qs < pi->nqsets; qs++)
648 			rss[qs] = rxq[qs].rspq.abs_id;
649 
650 		err = t4vf_config_rss_range(adapter, pi->viid,
651 					    0, pi->rss_size, rss, pi->nqsets);
652 		if (err)
653 			return err;
654 
655 		/*
656 		 * Perform Global RSS Mode-specific initialization.
657 		 */
658 		switch (adapter->params.rss.mode) {
659 		case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL:
660 			/*
661 			 * If Tunnel All Lookup isn't specified in the global
662 			 * RSS Configuration, then we need to specify a
663 			 * default Ingress Queue for any ingress packets which
664 			 * aren't hashed.  We'll use our first ingress queue
665 			 * ...
666 			 */
667 			if (!adapter->params.rss.u.basicvirtual.tnlalllookup) {
668 				union rss_vi_config config;
669 				err = t4vf_read_rss_vi_config(adapter,
670 							      pi->viid,
671 							      &config);
672 				if (err)
673 					return err;
674 				config.basicvirtual.defaultq =
675 					rxq[0].rspq.abs_id;
676 				err = t4vf_write_rss_vi_config(adapter,
677 							       pi->viid,
678 							       &config);
679 				if (err)
680 					return err;
681 			}
682 			break;
683 		}
684 	}
685 
686 	return 0;
687 }
688 
689 /*
690  * Bring the adapter up.  Called whenever we go from no "ports" open to having
691  * one open.  This function performs the actions necessary to make an adapter
692  * operational, such as completing the initialization of HW modules, and
693  * enabling interrupts.  Must be called with the rtnl lock held.  (Note that
694  * this is called "cxgb_up" in the PF Driver.)
695  */
696 static int adapter_up(struct adapter *adapter)
697 {
698 	int err;
699 
700 	/*
701 	 * If this is the first time we've been called, perform basic
702 	 * adapter setup.  Once we've done this, many of our adapter
703 	 * parameters can no longer be changed ...
704 	 */
705 	if ((adapter->flags & FULL_INIT_DONE) == 0) {
706 		err = setup_sge_queues(adapter);
707 		if (err)
708 			return err;
709 		err = setup_rss(adapter);
710 		if (err) {
711 			t4vf_free_sge_resources(adapter);
712 			return err;
713 		}
714 
715 		if (adapter->flags & USING_MSIX)
716 			name_msix_vecs(adapter);
717 		adapter->flags |= FULL_INIT_DONE;
718 	}
719 
720 	/*
721 	 * Acquire our interrupt resources.  We only support MSI-X and MSI.
722 	 */
723 	BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
724 	if (adapter->flags & USING_MSIX)
725 		err = request_msix_queue_irqs(adapter);
726 	else
727 		err = request_irq(adapter->pdev->irq,
728 				  t4vf_intr_handler(adapter), 0,
729 				  adapter->name, adapter);
730 	if (err) {
731 		dev_err(adapter->pdev_dev, "request_irq failed, err %d\n",
732 			err);
733 		return err;
734 	}
735 
736 	/*
737 	 * Enable NAPI ingress processing and return success.
738 	 */
739 	enable_rx(adapter);
740 	t4vf_sge_start(adapter);
741 
742 	/* Initialize hash mac addr list*/
743 	INIT_LIST_HEAD(&adapter->mac_hlist);
744 	return 0;
745 }
746 
747 /*
748  * Bring the adapter down.  Called whenever the last "port" (Virtual
749  * Interface) closed.  (Note that this routine is called "cxgb_down" in the PF
750  * Driver.)
751  */
752 static void adapter_down(struct adapter *adapter)
753 {
754 	/*
755 	 * Free interrupt resources.
756 	 */
757 	if (adapter->flags & USING_MSIX)
758 		free_msix_queue_irqs(adapter);
759 	else
760 		free_irq(adapter->pdev->irq, adapter);
761 
762 	/*
763 	 * Wait for NAPI handlers to finish.
764 	 */
765 	quiesce_rx(adapter);
766 }
767 
768 /*
769  * Start up a net device.
770  */
771 static int cxgb4vf_open(struct net_device *dev)
772 {
773 	int err;
774 	struct port_info *pi = netdev_priv(dev);
775 	struct adapter *adapter = pi->adapter;
776 
777 	/*
778 	 * If this is the first interface that we're opening on the "adapter",
779 	 * bring the "adapter" up now.
780 	 */
781 	if (adapter->open_device_map == 0) {
782 		err = adapter_up(adapter);
783 		if (err)
784 			return err;
785 	}
786 
787 	/*
788 	 * Note that this interface is up and start everything up ...
789 	 */
790 	err = link_start(dev);
791 	if (err)
792 		goto err_unwind;
793 
794 	netif_tx_start_all_queues(dev);
795 	set_bit(pi->port_id, &adapter->open_device_map);
796 	return 0;
797 
798 err_unwind:
799 	if (adapter->open_device_map == 0)
800 		adapter_down(adapter);
801 	return err;
802 }
803 
804 /*
805  * Shut down a net device.  This routine is called "cxgb_close" in the PF
806  * Driver ...
807  */
808 static int cxgb4vf_stop(struct net_device *dev)
809 {
810 	struct port_info *pi = netdev_priv(dev);
811 	struct adapter *adapter = pi->adapter;
812 
813 	netif_tx_stop_all_queues(dev);
814 	netif_carrier_off(dev);
815 	t4vf_enable_vi(adapter, pi->viid, false, false);
816 	pi->link_cfg.link_ok = 0;
817 
818 	clear_bit(pi->port_id, &adapter->open_device_map);
819 	if (adapter->open_device_map == 0)
820 		adapter_down(adapter);
821 	return 0;
822 }
823 
824 /*
825  * Translate our basic statistics into the standard "ifconfig" statistics.
826  */
827 static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
828 {
829 	struct t4vf_port_stats stats;
830 	struct port_info *pi = netdev2pinfo(dev);
831 	struct adapter *adapter = pi->adapter;
832 	struct net_device_stats *ns = &dev->stats;
833 	int err;
834 
835 	spin_lock(&adapter->stats_lock);
836 	err = t4vf_get_port_stats(adapter, pi->pidx, &stats);
837 	spin_unlock(&adapter->stats_lock);
838 
839 	memset(ns, 0, sizeof(*ns));
840 	if (err)
841 		return ns;
842 
843 	ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes +
844 			stats.tx_ucast_bytes + stats.tx_offload_bytes);
845 	ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames +
846 			  stats.tx_ucast_frames + stats.tx_offload_frames);
847 	ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes +
848 			stats.rx_ucast_bytes);
849 	ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames +
850 			  stats.rx_ucast_frames);
851 	ns->multicast = stats.rx_mcast_frames;
852 	ns->tx_errors = stats.tx_drop_frames;
853 	ns->rx_errors = stats.rx_err_frames;
854 
855 	return ns;
856 }
857 
858 static inline int cxgb4vf_set_addr_hash(struct port_info *pi)
859 {
860 	struct adapter *adapter = pi->adapter;
861 	u64 vec = 0;
862 	bool ucast = false;
863 	struct hash_mac_addr *entry;
864 
865 	/* Calculate the hash vector for the updated list and program it */
866 	list_for_each_entry(entry, &adapter->mac_hlist, list) {
867 		ucast |= is_unicast_ether_addr(entry->addr);
868 		vec |= (1ULL << hash_mac_addr(entry->addr));
869 	}
870 	return t4vf_set_addr_hash(adapter, pi->viid, ucast, vec, false);
871 }
872 
873 static int cxgb4vf_mac_sync(struct net_device *netdev, const u8 *mac_addr)
874 {
875 	struct port_info *pi = netdev_priv(netdev);
876 	struct adapter *adapter = pi->adapter;
877 	int ret;
878 	u64 mhash = 0;
879 	u64 uhash = 0;
880 	bool free = false;
881 	bool ucast = is_unicast_ether_addr(mac_addr);
882 	const u8 *maclist[1] = {mac_addr};
883 	struct hash_mac_addr *new_entry;
884 
885 	ret = t4vf_alloc_mac_filt(adapter, pi->viid, free, 1, maclist,
886 				  NULL, ucast ? &uhash : &mhash, false);
887 	if (ret < 0)
888 		goto out;
889 	/* if hash != 0, then add the addr to hash addr list
890 	 * so on the end we will calculate the hash for the
891 	 * list and program it
892 	 */
893 	if (uhash || mhash) {
894 		new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
895 		if (!new_entry)
896 			return -ENOMEM;
897 		ether_addr_copy(new_entry->addr, mac_addr);
898 		list_add_tail(&new_entry->list, &adapter->mac_hlist);
899 		ret = cxgb4vf_set_addr_hash(pi);
900 	}
901 out:
902 	return ret < 0 ? ret : 0;
903 }
904 
905 static int cxgb4vf_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
906 {
907 	struct port_info *pi = netdev_priv(netdev);
908 	struct adapter *adapter = pi->adapter;
909 	int ret;
910 	const u8 *maclist[1] = {mac_addr};
911 	struct hash_mac_addr *entry, *tmp;
912 
913 	/* If the MAC address to be removed is in the hash addr
914 	 * list, delete it from the list and update hash vector
915 	 */
916 	list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) {
917 		if (ether_addr_equal(entry->addr, mac_addr)) {
918 			list_del(&entry->list);
919 			kfree(entry);
920 			return cxgb4vf_set_addr_hash(pi);
921 		}
922 	}
923 
924 	ret = t4vf_free_mac_filt(adapter, pi->viid, 1, maclist, false);
925 	return ret < 0 ? -EINVAL : 0;
926 }
927 
928 /*
929  * Set RX properties of a port, such as promiscruity, address filters, and MTU.
930  * If @mtu is -1 it is left unchanged.
931  */
932 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
933 {
934 	struct port_info *pi = netdev_priv(dev);
935 
936 	__dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
937 	__dev_mc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
938 	return t4vf_set_rxmode(pi->adapter, pi->viid, -1,
939 			       (dev->flags & IFF_PROMISC) != 0,
940 			       (dev->flags & IFF_ALLMULTI) != 0,
941 			       1, -1, sleep_ok);
942 }
943 
944 /*
945  * Set the current receive modes on the device.
946  */
947 static void cxgb4vf_set_rxmode(struct net_device *dev)
948 {
949 	/* unfortunately we can't return errors to the stack */
950 	set_rxmode(dev, -1, false);
951 }
952 
953 /*
954  * Find the entry in the interrupt holdoff timer value array which comes
955  * closest to the specified interrupt holdoff value.
956  */
957 static int closest_timer(const struct sge *s, int us)
958 {
959 	int i, timer_idx = 0, min_delta = INT_MAX;
960 
961 	for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
962 		int delta = us - s->timer_val[i];
963 		if (delta < 0)
964 			delta = -delta;
965 		if (delta < min_delta) {
966 			min_delta = delta;
967 			timer_idx = i;
968 		}
969 	}
970 	return timer_idx;
971 }
972 
973 static int closest_thres(const struct sge *s, int thres)
974 {
975 	int i, delta, pktcnt_idx = 0, min_delta = INT_MAX;
976 
977 	for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
978 		delta = thres - s->counter_val[i];
979 		if (delta < 0)
980 			delta = -delta;
981 		if (delta < min_delta) {
982 			min_delta = delta;
983 			pktcnt_idx = i;
984 		}
985 	}
986 	return pktcnt_idx;
987 }
988 
989 /*
990  * Return a queue's interrupt hold-off time in us.  0 means no timer.
991  */
992 static unsigned int qtimer_val(const struct adapter *adapter,
993 			       const struct sge_rspq *rspq)
994 {
995 	unsigned int timer_idx = QINTR_TIMER_IDX_G(rspq->intr_params);
996 
997 	return timer_idx < SGE_NTIMERS
998 		? adapter->sge.timer_val[timer_idx]
999 		: 0;
1000 }
1001 
1002 /**
1003  *	set_rxq_intr_params - set a queue's interrupt holdoff parameters
1004  *	@adapter: the adapter
1005  *	@rspq: the RX response queue
1006  *	@us: the hold-off time in us, or 0 to disable timer
1007  *	@cnt: the hold-off packet count, or 0 to disable counter
1008  *
1009  *	Sets an RX response queue's interrupt hold-off time and packet count.
1010  *	At least one of the two needs to be enabled for the queue to generate
1011  *	interrupts.
1012  */
1013 static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
1014 			       unsigned int us, unsigned int cnt)
1015 {
1016 	unsigned int timer_idx;
1017 
1018 	/*
1019 	 * If both the interrupt holdoff timer and count are specified as
1020 	 * zero, default to a holdoff count of 1 ...
1021 	 */
1022 	if ((us | cnt) == 0)
1023 		cnt = 1;
1024 
1025 	/*
1026 	 * If an interrupt holdoff count has been specified, then find the
1027 	 * closest configured holdoff count and use that.  If the response
1028 	 * queue has already been created, then update its queue context
1029 	 * parameters ...
1030 	 */
1031 	if (cnt) {
1032 		int err;
1033 		u32 v, pktcnt_idx;
1034 
1035 		pktcnt_idx = closest_thres(&adapter->sge, cnt);
1036 		if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) {
1037 			v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1038 			    FW_PARAMS_PARAM_X_V(
1039 					FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1040 			    FW_PARAMS_PARAM_YZ_V(rspq->cntxt_id);
1041 			err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx);
1042 			if (err)
1043 				return err;
1044 		}
1045 		rspq->pktcnt_idx = pktcnt_idx;
1046 	}
1047 
1048 	/*
1049 	 * Compute the closest holdoff timer index from the supplied holdoff
1050 	 * timer value.
1051 	 */
1052 	timer_idx = (us == 0
1053 		     ? SGE_TIMER_RSTRT_CNTR
1054 		     : closest_timer(&adapter->sge, us));
1055 
1056 	/*
1057 	 * Update the response queue's interrupt coalescing parameters and
1058 	 * return success.
1059 	 */
1060 	rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
1061 			     QINTR_CNT_EN_V(cnt > 0));
1062 	return 0;
1063 }
1064 
1065 /*
1066  * Return a version number to identify the type of adapter.  The scheme is:
1067  * - bits 0..9: chip version
1068  * - bits 10..15: chip revision
1069  */
1070 static inline unsigned int mk_adap_vers(const struct adapter *adapter)
1071 {
1072 	/*
1073 	 * Chip version 4, revision 0x3f (cxgb4vf).
1074 	 */
1075 	return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10);
1076 }
1077 
1078 /*
1079  * Execute the specified ioctl command.
1080  */
1081 static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1082 {
1083 	int ret = 0;
1084 
1085 	switch (cmd) {
1086 	    /*
1087 	     * The VF Driver doesn't have access to any of the other
1088 	     * common Ethernet device ioctl()'s (like reading/writing
1089 	     * PHY registers, etc.
1090 	     */
1091 
1092 	default:
1093 		ret = -EOPNOTSUPP;
1094 		break;
1095 	}
1096 	return ret;
1097 }
1098 
1099 /*
1100  * Change the device's MTU.
1101  */
1102 static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
1103 {
1104 	int ret;
1105 	struct port_info *pi = netdev_priv(dev);
1106 
1107 	ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
1108 			      -1, -1, -1, -1, true);
1109 	if (!ret)
1110 		dev->mtu = new_mtu;
1111 	return ret;
1112 }
1113 
1114 static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
1115 	netdev_features_t features)
1116 {
1117 	/*
1118 	 * Since there is no support for separate rx/tx vlan accel
1119 	 * enable/disable make sure tx flag is always in same state as rx.
1120 	 */
1121 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
1122 		features |= NETIF_F_HW_VLAN_CTAG_TX;
1123 	else
1124 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
1125 
1126 	return features;
1127 }
1128 
1129 static int cxgb4vf_set_features(struct net_device *dev,
1130 	netdev_features_t features)
1131 {
1132 	struct port_info *pi = netdev_priv(dev);
1133 	netdev_features_t changed = dev->features ^ features;
1134 
1135 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1136 		t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
1137 				features & NETIF_F_HW_VLAN_CTAG_TX, 0);
1138 
1139 	return 0;
1140 }
1141 
1142 /*
1143  * Change the devices MAC address.
1144  */
1145 static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
1146 {
1147 	int ret;
1148 	struct sockaddr *addr = _addr;
1149 	struct port_info *pi = netdev_priv(dev);
1150 
1151 	if (!is_valid_ether_addr(addr->sa_data))
1152 		return -EADDRNOTAVAIL;
1153 
1154 	ret = t4vf_change_mac(pi->adapter, pi->viid, pi->xact_addr_filt,
1155 			      addr->sa_data, true);
1156 	if (ret < 0)
1157 		return ret;
1158 
1159 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1160 	pi->xact_addr_filt = ret;
1161 	return 0;
1162 }
1163 
1164 #ifdef CONFIG_NET_POLL_CONTROLLER
1165 /*
1166  * Poll all of our receive queues.  This is called outside of normal interrupt
1167  * context.
1168  */
1169 static void cxgb4vf_poll_controller(struct net_device *dev)
1170 {
1171 	struct port_info *pi = netdev_priv(dev);
1172 	struct adapter *adapter = pi->adapter;
1173 
1174 	if (adapter->flags & USING_MSIX) {
1175 		struct sge_eth_rxq *rxq;
1176 		int nqsets;
1177 
1178 		rxq = &adapter->sge.ethrxq[pi->first_qset];
1179 		for (nqsets = pi->nqsets; nqsets; nqsets--) {
1180 			t4vf_sge_intr_msix(0, &rxq->rspq);
1181 			rxq++;
1182 		}
1183 	} else
1184 		t4vf_intr_handler(adapter)(0, adapter);
1185 }
1186 #endif
1187 
1188 /*
1189  * Ethtool operations.
1190  * ===================
1191  *
1192  * Note that we don't support any ethtool operations which change the physical
1193  * state of the port to which we're linked.
1194  */
1195 
1196 /**
1197  *	from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool
1198  *	@port_type: Firmware Port Type
1199  *	@mod_type: Firmware Module Type
1200  *
1201  *	Translate Firmware Port/Module type to Ethtool Port Type.
1202  */
1203 static int from_fw_port_mod_type(enum fw_port_type port_type,
1204 				 enum fw_port_module_type mod_type)
1205 {
1206 	if (port_type == FW_PORT_TYPE_BT_SGMII ||
1207 	    port_type == FW_PORT_TYPE_BT_XFI ||
1208 	    port_type == FW_PORT_TYPE_BT_XAUI) {
1209 		return PORT_TP;
1210 	} else if (port_type == FW_PORT_TYPE_FIBER_XFI ||
1211 		   port_type == FW_PORT_TYPE_FIBER_XAUI) {
1212 		return PORT_FIBRE;
1213 	} else if (port_type == FW_PORT_TYPE_SFP ||
1214 		   port_type == FW_PORT_TYPE_QSFP_10G ||
1215 		   port_type == FW_PORT_TYPE_QSA ||
1216 		   port_type == FW_PORT_TYPE_QSFP ||
1217 		   port_type == FW_PORT_TYPE_CR4_QSFP ||
1218 		   port_type == FW_PORT_TYPE_CR_QSFP ||
1219 		   port_type == FW_PORT_TYPE_CR2_QSFP ||
1220 		   port_type == FW_PORT_TYPE_SFP28) {
1221 		if (mod_type == FW_PORT_MOD_TYPE_LR ||
1222 		    mod_type == FW_PORT_MOD_TYPE_SR ||
1223 		    mod_type == FW_PORT_MOD_TYPE_ER ||
1224 		    mod_type == FW_PORT_MOD_TYPE_LRM)
1225 			return PORT_FIBRE;
1226 		else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1227 			 mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1228 			return PORT_DA;
1229 		else
1230 			return PORT_OTHER;
1231 	} else if (port_type == FW_PORT_TYPE_KR4_100G ||
1232 		   port_type == FW_PORT_TYPE_KR_SFP28) {
1233 		return PORT_NONE;
1234 	}
1235 
1236 	return PORT_OTHER;
1237 }
1238 
1239 /**
1240  *	fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask
1241  *	@port_type: Firmware Port Type
1242  *	@fw_caps: Firmware Port Capabilities
1243  *	@link_mode_mask: ethtool Link Mode Mask
1244  *
1245  *	Translate a Firmware Port Capabilities specification to an ethtool
1246  *	Link Mode Mask.
1247  */
1248 static void fw_caps_to_lmm(enum fw_port_type port_type,
1249 			   unsigned int fw_caps,
1250 			   unsigned long *link_mode_mask)
1251 {
1252 	#define SET_LMM(__lmm_name) \
1253 		__set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
1254 			  link_mode_mask)
1255 
1256 	#define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
1257 		do { \
1258 			if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
1259 				SET_LMM(__lmm_name); \
1260 		} while (0)
1261 
1262 	switch (port_type) {
1263 	case FW_PORT_TYPE_BT_SGMII:
1264 	case FW_PORT_TYPE_BT_XFI:
1265 	case FW_PORT_TYPE_BT_XAUI:
1266 		SET_LMM(TP);
1267 		FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full);
1268 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1269 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
1270 		break;
1271 
1272 	case FW_PORT_TYPE_KX4:
1273 	case FW_PORT_TYPE_KX:
1274 		SET_LMM(Backplane);
1275 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
1276 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
1277 		break;
1278 
1279 	case FW_PORT_TYPE_KR:
1280 		SET_LMM(Backplane);
1281 		SET_LMM(10000baseKR_Full);
1282 		break;
1283 
1284 	case FW_PORT_TYPE_BP_AP:
1285 		SET_LMM(Backplane);
1286 		SET_LMM(10000baseR_FEC);
1287 		SET_LMM(10000baseKR_Full);
1288 		SET_LMM(1000baseKX_Full);
1289 		break;
1290 
1291 	case FW_PORT_TYPE_BP4_AP:
1292 		SET_LMM(Backplane);
1293 		SET_LMM(10000baseR_FEC);
1294 		SET_LMM(10000baseKR_Full);
1295 		SET_LMM(1000baseKX_Full);
1296 		SET_LMM(10000baseKX4_Full);
1297 		break;
1298 
1299 	case FW_PORT_TYPE_FIBER_XFI:
1300 	case FW_PORT_TYPE_FIBER_XAUI:
1301 	case FW_PORT_TYPE_SFP:
1302 	case FW_PORT_TYPE_QSFP_10G:
1303 	case FW_PORT_TYPE_QSA:
1304 		SET_LMM(FIBRE);
1305 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1306 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
1307 		break;
1308 
1309 	case FW_PORT_TYPE_BP40_BA:
1310 	case FW_PORT_TYPE_QSFP:
1311 		SET_LMM(FIBRE);
1312 		SET_LMM(40000baseSR4_Full);
1313 		break;
1314 
1315 	case FW_PORT_TYPE_CR_QSFP:
1316 	case FW_PORT_TYPE_SFP28:
1317 		SET_LMM(FIBRE);
1318 		SET_LMM(25000baseCR_Full);
1319 		break;
1320 
1321 	case FW_PORT_TYPE_KR_SFP28:
1322 		SET_LMM(Backplane);
1323 		SET_LMM(25000baseKR_Full);
1324 		break;
1325 
1326 	case FW_PORT_TYPE_CR2_QSFP:
1327 		SET_LMM(FIBRE);
1328 		SET_LMM(50000baseSR2_Full);
1329 		break;
1330 
1331 	case FW_PORT_TYPE_KR4_100G:
1332 	case FW_PORT_TYPE_CR4_QSFP:
1333 		SET_LMM(FIBRE);
1334 		SET_LMM(100000baseCR4_Full);
1335 		break;
1336 
1337 	default:
1338 		break;
1339 	}
1340 
1341 	FW_CAPS_TO_LMM(ANEG, Autoneg);
1342 	FW_CAPS_TO_LMM(802_3_PAUSE, Pause);
1343 	FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause);
1344 
1345 	#undef FW_CAPS_TO_LMM
1346 	#undef SET_LMM
1347 }
1348 
1349 static int cxgb4vf_get_link_ksettings(struct net_device *dev,
1350 				  struct ethtool_link_ksettings *link_ksettings)
1351 {
1352 	struct port_info *pi = netdev_priv(dev);
1353 	struct ethtool_link_settings *base = &link_ksettings->base;
1354 
1355 	/* For the nonce, the Firmware doesn't send up Port State changes
1356 	 * when the Virtual Interface attached to the Port is down.  So
1357 	 * if it's down, let's grab any changes.
1358 	 */
1359 	if (!netif_running(dev))
1360 		(void)t4vf_update_port_info(pi);
1361 
1362 	ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
1363 	ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
1364 	ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
1365 
1366 	base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type);
1367 
1368 	if (pi->mdio_addr >= 0) {
1369 		base->phy_address = pi->mdio_addr;
1370 		base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII
1371 				      ? ETH_MDIO_SUPPORTS_C22
1372 				      : ETH_MDIO_SUPPORTS_C45);
1373 	} else {
1374 		base->phy_address = 255;
1375 		base->mdio_support = 0;
1376 	}
1377 
1378 	fw_caps_to_lmm(pi->port_type, pi->link_cfg.pcaps,
1379 		       link_ksettings->link_modes.supported);
1380 	fw_caps_to_lmm(pi->port_type, pi->link_cfg.acaps,
1381 		       link_ksettings->link_modes.advertising);
1382 	fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
1383 		       link_ksettings->link_modes.lp_advertising);
1384 
1385 	if (netif_carrier_ok(dev)) {
1386 		base->speed = pi->link_cfg.speed;
1387 		base->duplex = DUPLEX_FULL;
1388 	} else {
1389 		base->speed = SPEED_UNKNOWN;
1390 		base->duplex = DUPLEX_UNKNOWN;
1391 	}
1392 
1393 	base->autoneg = pi->link_cfg.autoneg;
1394 	if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)
1395 		ethtool_link_ksettings_add_link_mode(link_ksettings,
1396 						     supported, Autoneg);
1397 	if (pi->link_cfg.autoneg)
1398 		ethtool_link_ksettings_add_link_mode(link_ksettings,
1399 						     advertising, Autoneg);
1400 
1401 	return 0;
1402 }
1403 
1404 /* Translate the Firmware FEC value into the ethtool value. */
1405 static inline unsigned int fwcap_to_eth_fec(unsigned int fw_fec)
1406 {
1407 	unsigned int eth_fec = 0;
1408 
1409 	if (fw_fec & FW_PORT_CAP32_FEC_RS)
1410 		eth_fec |= ETHTOOL_FEC_RS;
1411 	if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
1412 		eth_fec |= ETHTOOL_FEC_BASER;
1413 
1414 	/* if nothing is set, then FEC is off */
1415 	if (!eth_fec)
1416 		eth_fec = ETHTOOL_FEC_OFF;
1417 
1418 	return eth_fec;
1419 }
1420 
1421 /* Translate Common Code FEC value into ethtool value. */
1422 static inline unsigned int cc_to_eth_fec(unsigned int cc_fec)
1423 {
1424 	unsigned int eth_fec = 0;
1425 
1426 	if (cc_fec & FEC_AUTO)
1427 		eth_fec |= ETHTOOL_FEC_AUTO;
1428 	if (cc_fec & FEC_RS)
1429 		eth_fec |= ETHTOOL_FEC_RS;
1430 	if (cc_fec & FEC_BASER_RS)
1431 		eth_fec |= ETHTOOL_FEC_BASER;
1432 
1433 	/* if nothing is set, then FEC is off */
1434 	if (!eth_fec)
1435 		eth_fec = ETHTOOL_FEC_OFF;
1436 
1437 	return eth_fec;
1438 }
1439 
1440 static int cxgb4vf_get_fecparam(struct net_device *dev,
1441 				struct ethtool_fecparam *fec)
1442 {
1443 	const struct port_info *pi = netdev_priv(dev);
1444 	const struct link_config *lc = &pi->link_cfg;
1445 
1446 	/* Translate the Firmware FEC Support into the ethtool value.  We
1447 	 * always support IEEE 802.3 "automatic" selection of Link FEC type if
1448 	 * any FEC is supported.
1449 	 */
1450 	fec->fec = fwcap_to_eth_fec(lc->pcaps);
1451 	if (fec->fec != ETHTOOL_FEC_OFF)
1452 		fec->fec |= ETHTOOL_FEC_AUTO;
1453 
1454 	/* Translate the current internal FEC parameters into the
1455 	 * ethtool values.
1456 	 */
1457 	fec->active_fec = cc_to_eth_fec(lc->fec);
1458 	return 0;
1459 }
1460 
1461 /*
1462  * Return our driver information.
1463  */
1464 static void cxgb4vf_get_drvinfo(struct net_device *dev,
1465 				struct ethtool_drvinfo *drvinfo)
1466 {
1467 	struct adapter *adapter = netdev2adap(dev);
1468 
1469 	strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
1470 	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
1471 	strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
1472 		sizeof(drvinfo->bus_info));
1473 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1474 		 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1475 		 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.fwrev),
1476 		 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.fwrev),
1477 		 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.fwrev),
1478 		 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.fwrev),
1479 		 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.tprev),
1480 		 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.tprev),
1481 		 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.tprev),
1482 		 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.tprev));
1483 }
1484 
1485 /*
1486  * Return current adapter message level.
1487  */
1488 static u32 cxgb4vf_get_msglevel(struct net_device *dev)
1489 {
1490 	return netdev2adap(dev)->msg_enable;
1491 }
1492 
1493 /*
1494  * Set current adapter message level.
1495  */
1496 static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
1497 {
1498 	netdev2adap(dev)->msg_enable = msglevel;
1499 }
1500 
1501 /*
1502  * Return the device's current Queue Set ring size parameters along with the
1503  * allowed maximum values.  Since ethtool doesn't understand the concept of
1504  * multi-queue devices, we just return the current values associated with the
1505  * first Queue Set.
1506  */
1507 static void cxgb4vf_get_ringparam(struct net_device *dev,
1508 				  struct ethtool_ringparam *rp)
1509 {
1510 	const struct port_info *pi = netdev_priv(dev);
1511 	const struct sge *s = &pi->adapter->sge;
1512 
1513 	rp->rx_max_pending = MAX_RX_BUFFERS;
1514 	rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1515 	rp->rx_jumbo_max_pending = 0;
1516 	rp->tx_max_pending = MAX_TXQ_ENTRIES;
1517 
1518 	rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID;
1519 	rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1520 	rp->rx_jumbo_pending = 0;
1521 	rp->tx_pending = s->ethtxq[pi->first_qset].q.size;
1522 }
1523 
1524 /*
1525  * Set the Queue Set ring size parameters for the device.  Again, since
1526  * ethtool doesn't allow for the concept of multiple queues per device, we'll
1527  * apply these new values across all of the Queue Sets associated with the
1528  * device -- after vetting them of course!
1529  */
1530 static int cxgb4vf_set_ringparam(struct net_device *dev,
1531 				 struct ethtool_ringparam *rp)
1532 {
1533 	const struct port_info *pi = netdev_priv(dev);
1534 	struct adapter *adapter = pi->adapter;
1535 	struct sge *s = &adapter->sge;
1536 	int qs;
1537 
1538 	if (rp->rx_pending > MAX_RX_BUFFERS ||
1539 	    rp->rx_jumbo_pending ||
1540 	    rp->tx_pending > MAX_TXQ_ENTRIES ||
1541 	    rp->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1542 	    rp->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1543 	    rp->rx_pending < MIN_FL_ENTRIES ||
1544 	    rp->tx_pending < MIN_TXQ_ENTRIES)
1545 		return -EINVAL;
1546 
1547 	if (adapter->flags & FULL_INIT_DONE)
1548 		return -EBUSY;
1549 
1550 	for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) {
1551 		s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID;
1552 		s->ethrxq[qs].rspq.size = rp->rx_mini_pending;
1553 		s->ethtxq[qs].q.size = rp->tx_pending;
1554 	}
1555 	return 0;
1556 }
1557 
1558 /*
1559  * Return the interrupt holdoff timer and count for the first Queue Set on the
1560  * device.  Our extension ioctl() (the cxgbtool interface) allows the
1561  * interrupt holdoff timer to be read on all of the device's Queue Sets.
1562  */
1563 static int cxgb4vf_get_coalesce(struct net_device *dev,
1564 				struct ethtool_coalesce *coalesce)
1565 {
1566 	const struct port_info *pi = netdev_priv(dev);
1567 	const struct adapter *adapter = pi->adapter;
1568 	const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq;
1569 
1570 	coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
1571 	coalesce->rx_max_coalesced_frames =
1572 		((rspq->intr_params & QINTR_CNT_EN_F)
1573 		 ? adapter->sge.counter_val[rspq->pktcnt_idx]
1574 		 : 0);
1575 	return 0;
1576 }
1577 
1578 /*
1579  * Set the RX interrupt holdoff timer and count for the first Queue Set on the
1580  * interface.  Our extension ioctl() (the cxgbtool interface) allows us to set
1581  * the interrupt holdoff timer on any of the device's Queue Sets.
1582  */
1583 static int cxgb4vf_set_coalesce(struct net_device *dev,
1584 				struct ethtool_coalesce *coalesce)
1585 {
1586 	const struct port_info *pi = netdev_priv(dev);
1587 	struct adapter *adapter = pi->adapter;
1588 
1589 	return set_rxq_intr_params(adapter,
1590 				   &adapter->sge.ethrxq[pi->first_qset].rspq,
1591 				   coalesce->rx_coalesce_usecs,
1592 				   coalesce->rx_max_coalesced_frames);
1593 }
1594 
1595 /*
1596  * Report current port link pause parameter settings.
1597  */
1598 static void cxgb4vf_get_pauseparam(struct net_device *dev,
1599 				   struct ethtool_pauseparam *pauseparam)
1600 {
1601 	struct port_info *pi = netdev_priv(dev);
1602 
1603 	pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1604 	pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0;
1605 	pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0;
1606 }
1607 
1608 /*
1609  * Identify the port by blinking the port's LED.
1610  */
1611 static int cxgb4vf_phys_id(struct net_device *dev,
1612 			   enum ethtool_phys_id_state state)
1613 {
1614 	unsigned int val;
1615 	struct port_info *pi = netdev_priv(dev);
1616 
1617 	if (state == ETHTOOL_ID_ACTIVE)
1618 		val = 0xffff;
1619 	else if (state == ETHTOOL_ID_INACTIVE)
1620 		val = 0;
1621 	else
1622 		return -EINVAL;
1623 
1624 	return t4vf_identify_port(pi->adapter, pi->viid, val);
1625 }
1626 
1627 /*
1628  * Port stats maintained per queue of the port.
1629  */
1630 struct queue_port_stats {
1631 	u64 tso;
1632 	u64 tx_csum;
1633 	u64 rx_csum;
1634 	u64 vlan_ex;
1635 	u64 vlan_ins;
1636 	u64 lro_pkts;
1637 	u64 lro_merged;
1638 };
1639 
1640 /*
1641  * Strings for the ETH_SS_STATS statistics set ("ethtool -S").  Note that
1642  * these need to match the order of statistics returned by
1643  * t4vf_get_port_stats().
1644  */
1645 static const char stats_strings[][ETH_GSTRING_LEN] = {
1646 	/*
1647 	 * These must match the layout of the t4vf_port_stats structure.
1648 	 */
1649 	"TxBroadcastBytes  ",
1650 	"TxBroadcastFrames ",
1651 	"TxMulticastBytes  ",
1652 	"TxMulticastFrames ",
1653 	"TxUnicastBytes    ",
1654 	"TxUnicastFrames   ",
1655 	"TxDroppedFrames   ",
1656 	"TxOffloadBytes    ",
1657 	"TxOffloadFrames   ",
1658 	"RxBroadcastBytes  ",
1659 	"RxBroadcastFrames ",
1660 	"RxMulticastBytes  ",
1661 	"RxMulticastFrames ",
1662 	"RxUnicastBytes    ",
1663 	"RxUnicastFrames   ",
1664 	"RxErrorFrames     ",
1665 
1666 	/*
1667 	 * These are accumulated per-queue statistics and must match the
1668 	 * order of the fields in the queue_port_stats structure.
1669 	 */
1670 	"TSO               ",
1671 	"TxCsumOffload     ",
1672 	"RxCsumGood        ",
1673 	"VLANextractions   ",
1674 	"VLANinsertions    ",
1675 	"GROPackets        ",
1676 	"GROMerged         ",
1677 };
1678 
1679 /*
1680  * Return the number of statistics in the specified statistics set.
1681  */
1682 static int cxgb4vf_get_sset_count(struct net_device *dev, int sset)
1683 {
1684 	switch (sset) {
1685 	case ETH_SS_STATS:
1686 		return ARRAY_SIZE(stats_strings);
1687 	default:
1688 		return -EOPNOTSUPP;
1689 	}
1690 	/*NOTREACHED*/
1691 }
1692 
1693 /*
1694  * Return the strings for the specified statistics set.
1695  */
1696 static void cxgb4vf_get_strings(struct net_device *dev,
1697 				u32 sset,
1698 				u8 *data)
1699 {
1700 	switch (sset) {
1701 	case ETH_SS_STATS:
1702 		memcpy(data, stats_strings, sizeof(stats_strings));
1703 		break;
1704 	}
1705 }
1706 
1707 /*
1708  * Small utility routine to accumulate queue statistics across the queues of
1709  * a "port".
1710  */
1711 static void collect_sge_port_stats(const struct adapter *adapter,
1712 				   const struct port_info *pi,
1713 				   struct queue_port_stats *stats)
1714 {
1715 	const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset];
1716 	const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
1717 	int qs;
1718 
1719 	memset(stats, 0, sizeof(*stats));
1720 	for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
1721 		stats->tso += txq->tso;
1722 		stats->tx_csum += txq->tx_cso;
1723 		stats->rx_csum += rxq->stats.rx_cso;
1724 		stats->vlan_ex += rxq->stats.vlan_ex;
1725 		stats->vlan_ins += txq->vlan_ins;
1726 		stats->lro_pkts += rxq->stats.lro_pkts;
1727 		stats->lro_merged += rxq->stats.lro_merged;
1728 	}
1729 }
1730 
1731 /*
1732  * Return the ETH_SS_STATS statistics set.
1733  */
1734 static void cxgb4vf_get_ethtool_stats(struct net_device *dev,
1735 				      struct ethtool_stats *stats,
1736 				      u64 *data)
1737 {
1738 	struct port_info *pi = netdev2pinfo(dev);
1739 	struct adapter *adapter = pi->adapter;
1740 	int err = t4vf_get_port_stats(adapter, pi->pidx,
1741 				      (struct t4vf_port_stats *)data);
1742 	if (err)
1743 		memset(data, 0, sizeof(struct t4vf_port_stats));
1744 
1745 	data += sizeof(struct t4vf_port_stats) / sizeof(u64);
1746 	collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1747 }
1748 
1749 /*
1750  * Return the size of our register map.
1751  */
1752 static int cxgb4vf_get_regs_len(struct net_device *dev)
1753 {
1754 	return T4VF_REGMAP_SIZE;
1755 }
1756 
1757 /*
1758  * Dump a block of registers, start to end inclusive, into a buffer.
1759  */
1760 static void reg_block_dump(struct adapter *adapter, void *regbuf,
1761 			   unsigned int start, unsigned int end)
1762 {
1763 	u32 *bp = regbuf + start - T4VF_REGMAP_START;
1764 
1765 	for ( ; start <= end; start += sizeof(u32)) {
1766 		/*
1767 		 * Avoid reading the Mailbox Control register since that
1768 		 * can trigger a Mailbox Ownership Arbitration cycle and
1769 		 * interfere with communication with the firmware.
1770 		 */
1771 		if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL)
1772 			*bp++ = 0xffff;
1773 		else
1774 			*bp++ = t4_read_reg(adapter, start);
1775 	}
1776 }
1777 
1778 /*
1779  * Copy our entire register map into the provided buffer.
1780  */
1781 static void cxgb4vf_get_regs(struct net_device *dev,
1782 			     struct ethtool_regs *regs,
1783 			     void *regbuf)
1784 {
1785 	struct adapter *adapter = netdev2adap(dev);
1786 
1787 	regs->version = mk_adap_vers(adapter);
1788 
1789 	/*
1790 	 * Fill in register buffer with our register map.
1791 	 */
1792 	memset(regbuf, 0, T4VF_REGMAP_SIZE);
1793 
1794 	reg_block_dump(adapter, regbuf,
1795 		       T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST,
1796 		       T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST);
1797 	reg_block_dump(adapter, regbuf,
1798 		       T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST,
1799 		       T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST);
1800 
1801 	/* T5 adds new registers in the PL Register map.
1802 	 */
1803 	reg_block_dump(adapter, regbuf,
1804 		       T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
1805 		       T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
1806 		       ? PL_VF_WHOAMI_A : PL_VF_REVISION_A));
1807 	reg_block_dump(adapter, regbuf,
1808 		       T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
1809 		       T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
1810 
1811 	reg_block_dump(adapter, regbuf,
1812 		       T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST,
1813 		       T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST);
1814 }
1815 
1816 /*
1817  * Report current Wake On LAN settings.
1818  */
1819 static void cxgb4vf_get_wol(struct net_device *dev,
1820 			    struct ethtool_wolinfo *wol)
1821 {
1822 	wol->supported = 0;
1823 	wol->wolopts = 0;
1824 	memset(&wol->sopass, 0, sizeof(wol->sopass));
1825 }
1826 
1827 /*
1828  * TCP Segmentation Offload flags which we support.
1829  */
1830 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1831 
1832 static const struct ethtool_ops cxgb4vf_ethtool_ops = {
1833 	.get_link_ksettings	= cxgb4vf_get_link_ksettings,
1834 	.get_fecparam		= cxgb4vf_get_fecparam,
1835 	.get_drvinfo		= cxgb4vf_get_drvinfo,
1836 	.get_msglevel		= cxgb4vf_get_msglevel,
1837 	.set_msglevel		= cxgb4vf_set_msglevel,
1838 	.get_ringparam		= cxgb4vf_get_ringparam,
1839 	.set_ringparam		= cxgb4vf_set_ringparam,
1840 	.get_coalesce		= cxgb4vf_get_coalesce,
1841 	.set_coalesce		= cxgb4vf_set_coalesce,
1842 	.get_pauseparam		= cxgb4vf_get_pauseparam,
1843 	.get_link		= ethtool_op_get_link,
1844 	.get_strings		= cxgb4vf_get_strings,
1845 	.set_phys_id		= cxgb4vf_phys_id,
1846 	.get_sset_count		= cxgb4vf_get_sset_count,
1847 	.get_ethtool_stats	= cxgb4vf_get_ethtool_stats,
1848 	.get_regs_len		= cxgb4vf_get_regs_len,
1849 	.get_regs		= cxgb4vf_get_regs,
1850 	.get_wol		= cxgb4vf_get_wol,
1851 };
1852 
1853 /*
1854  * /sys/kernel/debug/cxgb4vf support code and data.
1855  * ================================================
1856  */
1857 
1858 /*
1859  * Show Firmware Mailbox Command/Reply Log
1860  *
1861  * Note that we don't do any locking when dumping the Firmware Mailbox Log so
1862  * it's possible that we can catch things during a log update and therefore
1863  * see partially corrupted log entries.  But i9t's probably Good Enough(tm).
1864  * If we ever decide that we want to make sure that we're dumping a coherent
1865  * log, we'd need to perform locking in the mailbox logging and in
1866  * mboxlog_open() where we'd need to grab the entire mailbox log in one go
1867  * like we do for the Firmware Device Log.  But as stated above, meh ...
1868  */
1869 static int mboxlog_show(struct seq_file *seq, void *v)
1870 {
1871 	struct adapter *adapter = seq->private;
1872 	struct mbox_cmd_log *log = adapter->mbox_log;
1873 	struct mbox_cmd *entry;
1874 	int entry_idx, i;
1875 
1876 	if (v == SEQ_START_TOKEN) {
1877 		seq_printf(seq,
1878 			   "%10s  %15s  %5s  %5s  %s\n",
1879 			   "Seq#", "Tstamp", "Atime", "Etime",
1880 			   "Command/Reply");
1881 		return 0;
1882 	}
1883 
1884 	entry_idx = log->cursor + ((uintptr_t)v - 2);
1885 	if (entry_idx >= log->size)
1886 		entry_idx -= log->size;
1887 	entry = mbox_cmd_log_entry(log, entry_idx);
1888 
1889 	/* skip over unused entries */
1890 	if (entry->timestamp == 0)
1891 		return 0;
1892 
1893 	seq_printf(seq, "%10u  %15llu  %5d  %5d",
1894 		   entry->seqno, entry->timestamp,
1895 		   entry->access, entry->execute);
1896 	for (i = 0; i < MBOX_LEN / 8; i++) {
1897 		u64 flit = entry->cmd[i];
1898 		u32 hi = (u32)(flit >> 32);
1899 		u32 lo = (u32)flit;
1900 
1901 		seq_printf(seq, "  %08x %08x", hi, lo);
1902 	}
1903 	seq_puts(seq, "\n");
1904 	return 0;
1905 }
1906 
1907 static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos)
1908 {
1909 	struct adapter *adapter = seq->private;
1910 	struct mbox_cmd_log *log = adapter->mbox_log;
1911 
1912 	return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL);
1913 }
1914 
1915 static void *mboxlog_start(struct seq_file *seq, loff_t *pos)
1916 {
1917 	return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN;
1918 }
1919 
1920 static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos)
1921 {
1922 	++*pos;
1923 	return mboxlog_get_idx(seq, *pos);
1924 }
1925 
1926 static void mboxlog_stop(struct seq_file *seq, void *v)
1927 {
1928 }
1929 
1930 static const struct seq_operations mboxlog_seq_ops = {
1931 	.start = mboxlog_start,
1932 	.next  = mboxlog_next,
1933 	.stop  = mboxlog_stop,
1934 	.show  = mboxlog_show
1935 };
1936 
1937 static int mboxlog_open(struct inode *inode, struct file *file)
1938 {
1939 	int res = seq_open(file, &mboxlog_seq_ops);
1940 
1941 	if (!res) {
1942 		struct seq_file *seq = file->private_data;
1943 
1944 		seq->private = inode->i_private;
1945 	}
1946 	return res;
1947 }
1948 
1949 static const struct file_operations mboxlog_fops = {
1950 	.owner   = THIS_MODULE,
1951 	.open    = mboxlog_open,
1952 	.read    = seq_read,
1953 	.llseek  = seq_lseek,
1954 	.release = seq_release,
1955 };
1956 
1957 /*
1958  * Show SGE Queue Set information.  We display QPL Queues Sets per line.
1959  */
1960 #define QPL	4
1961 
1962 static int sge_qinfo_show(struct seq_file *seq, void *v)
1963 {
1964 	struct adapter *adapter = seq->private;
1965 	int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
1966 	int qs, r = (uintptr_t)v - 1;
1967 
1968 	if (r)
1969 		seq_putc(seq, '\n');
1970 
1971 	#define S3(fmt_spec, s, v) \
1972 		do {\
1973 			seq_printf(seq, "%-12s", s); \
1974 			for (qs = 0; qs < n; ++qs) \
1975 				seq_printf(seq, " %16" fmt_spec, v); \
1976 			seq_putc(seq, '\n'); \
1977 		} while (0)
1978 	#define S(s, v)		S3("s", s, v)
1979 	#define T(s, v)		S3("u", s, txq[qs].v)
1980 	#define R(s, v)		S3("u", s, rxq[qs].v)
1981 
1982 	if (r < eth_entries) {
1983 		const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
1984 		const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
1985 		int n = min(QPL, adapter->sge.ethqsets - QPL * r);
1986 
1987 		S("QType:", "Ethernet");
1988 		S("Interface:",
1989 		  (rxq[qs].rspq.netdev
1990 		   ? rxq[qs].rspq.netdev->name
1991 		   : "N/A"));
1992 		S3("d", "Port:",
1993 		   (rxq[qs].rspq.netdev
1994 		    ? ((struct port_info *)
1995 		       netdev_priv(rxq[qs].rspq.netdev))->port_id
1996 		    : -1));
1997 		T("TxQ ID:", q.abs_id);
1998 		T("TxQ size:", q.size);
1999 		T("TxQ inuse:", q.in_use);
2000 		T("TxQ PIdx:", q.pidx);
2001 		T("TxQ CIdx:", q.cidx);
2002 		R("RspQ ID:", rspq.abs_id);
2003 		R("RspQ size:", rspq.size);
2004 		R("RspQE size:", rspq.iqe_len);
2005 		S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq));
2006 		S3("u", "Intr pktcnt:",
2007 		   adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]);
2008 		R("RspQ CIdx:", rspq.cidx);
2009 		R("RspQ Gen:", rspq.gen);
2010 		R("FL ID:", fl.abs_id);
2011 		R("FL size:", fl.size - MIN_FL_RESID);
2012 		R("FL avail:", fl.avail);
2013 		R("FL PIdx:", fl.pidx);
2014 		R("FL CIdx:", fl.cidx);
2015 		return 0;
2016 	}
2017 
2018 	r -= eth_entries;
2019 	if (r == 0) {
2020 		const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
2021 
2022 		seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
2023 		seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
2024 		seq_printf(seq, "%-12s %16u\n", "Intr delay:",
2025 			   qtimer_val(adapter, evtq));
2026 		seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
2027 			   adapter->sge.counter_val[evtq->pktcnt_idx]);
2028 		seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx);
2029 		seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
2030 	} else if (r == 1) {
2031 		const struct sge_rspq *intrq = &adapter->sge.intrq;
2032 
2033 		seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue");
2034 		seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id);
2035 		seq_printf(seq, "%-12s %16u\n", "Intr delay:",
2036 			   qtimer_val(adapter, intrq));
2037 		seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
2038 			   adapter->sge.counter_val[intrq->pktcnt_idx]);
2039 		seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx);
2040 		seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen);
2041 	}
2042 
2043 	#undef R
2044 	#undef T
2045 	#undef S
2046 	#undef S3
2047 
2048 	return 0;
2049 }
2050 
2051 /*
2052  * Return the number of "entries" in our "file".  We group the multi-Queue
2053  * sections with QPL Queue Sets per "entry".  The sections of the output are:
2054  *
2055  *     Ethernet RX/TX Queue Sets
2056  *     Firmware Event Queue
2057  *     Forwarded Interrupt Queue (if in MSI mode)
2058  */
2059 static int sge_queue_entries(const struct adapter *adapter)
2060 {
2061 	return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
2062 		((adapter->flags & USING_MSI) != 0);
2063 }
2064 
2065 static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
2066 {
2067 	int entries = sge_queue_entries(seq->private);
2068 
2069 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2070 }
2071 
2072 static void sge_queue_stop(struct seq_file *seq, void *v)
2073 {
2074 }
2075 
2076 static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
2077 {
2078 	int entries = sge_queue_entries(seq->private);
2079 
2080 	++*pos;
2081 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2082 }
2083 
2084 static const struct seq_operations sge_qinfo_seq_ops = {
2085 	.start = sge_queue_start,
2086 	.next  = sge_queue_next,
2087 	.stop  = sge_queue_stop,
2088 	.show  = sge_qinfo_show
2089 };
2090 
2091 static int sge_qinfo_open(struct inode *inode, struct file *file)
2092 {
2093 	int res = seq_open(file, &sge_qinfo_seq_ops);
2094 
2095 	if (!res) {
2096 		struct seq_file *seq = file->private_data;
2097 		seq->private = inode->i_private;
2098 	}
2099 	return res;
2100 }
2101 
2102 static const struct file_operations sge_qinfo_debugfs_fops = {
2103 	.owner   = THIS_MODULE,
2104 	.open    = sge_qinfo_open,
2105 	.read    = seq_read,
2106 	.llseek  = seq_lseek,
2107 	.release = seq_release,
2108 };
2109 
2110 /*
2111  * Show SGE Queue Set statistics.  We display QPL Queues Sets per line.
2112  */
2113 #define QPL	4
2114 
2115 static int sge_qstats_show(struct seq_file *seq, void *v)
2116 {
2117 	struct adapter *adapter = seq->private;
2118 	int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
2119 	int qs, r = (uintptr_t)v - 1;
2120 
2121 	if (r)
2122 		seq_putc(seq, '\n');
2123 
2124 	#define S3(fmt, s, v) \
2125 		do { \
2126 			seq_printf(seq, "%-16s", s); \
2127 			for (qs = 0; qs < n; ++qs) \
2128 				seq_printf(seq, " %8" fmt, v); \
2129 			seq_putc(seq, '\n'); \
2130 		} while (0)
2131 	#define S(s, v)		S3("s", s, v)
2132 
2133 	#define T3(fmt, s, v)	S3(fmt, s, txq[qs].v)
2134 	#define T(s, v)		T3("lu", s, v)
2135 
2136 	#define R3(fmt, s, v)	S3(fmt, s, rxq[qs].v)
2137 	#define R(s, v)		R3("lu", s, v)
2138 
2139 	if (r < eth_entries) {
2140 		const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
2141 		const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
2142 		int n = min(QPL, adapter->sge.ethqsets - QPL * r);
2143 
2144 		S("QType:", "Ethernet");
2145 		S("Interface:",
2146 		  (rxq[qs].rspq.netdev
2147 		   ? rxq[qs].rspq.netdev->name
2148 		   : "N/A"));
2149 		R3("u", "RspQNullInts:", rspq.unhandled_irqs);
2150 		R("RxPackets:", stats.pkts);
2151 		R("RxCSO:", stats.rx_cso);
2152 		R("VLANxtract:", stats.vlan_ex);
2153 		R("LROmerged:", stats.lro_merged);
2154 		R("LROpackets:", stats.lro_pkts);
2155 		R("RxDrops:", stats.rx_drops);
2156 		T("TSO:", tso);
2157 		T("TxCSO:", tx_cso);
2158 		T("VLANins:", vlan_ins);
2159 		T("TxQFull:", q.stops);
2160 		T("TxQRestarts:", q.restarts);
2161 		T("TxMapErr:", mapping_err);
2162 		R("FLAllocErr:", fl.alloc_failed);
2163 		R("FLLrgAlcErr:", fl.large_alloc_failed);
2164 		R("FLStarving:", fl.starving);
2165 		return 0;
2166 	}
2167 
2168 	r -= eth_entries;
2169 	if (r == 0) {
2170 		const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
2171 
2172 		seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue");
2173 		seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
2174 			   evtq->unhandled_irqs);
2175 		seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx);
2176 		seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen);
2177 	} else if (r == 1) {
2178 		const struct sge_rspq *intrq = &adapter->sge.intrq;
2179 
2180 		seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue");
2181 		seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
2182 			   intrq->unhandled_irqs);
2183 		seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx);
2184 		seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen);
2185 	}
2186 
2187 	#undef R
2188 	#undef T
2189 	#undef S
2190 	#undef R3
2191 	#undef T3
2192 	#undef S3
2193 
2194 	return 0;
2195 }
2196 
2197 /*
2198  * Return the number of "entries" in our "file".  We group the multi-Queue
2199  * sections with QPL Queue Sets per "entry".  The sections of the output are:
2200  *
2201  *     Ethernet RX/TX Queue Sets
2202  *     Firmware Event Queue
2203  *     Forwarded Interrupt Queue (if in MSI mode)
2204  */
2205 static int sge_qstats_entries(const struct adapter *adapter)
2206 {
2207 	return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
2208 		((adapter->flags & USING_MSI) != 0);
2209 }
2210 
2211 static void *sge_qstats_start(struct seq_file *seq, loff_t *pos)
2212 {
2213 	int entries = sge_qstats_entries(seq->private);
2214 
2215 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2216 }
2217 
2218 static void sge_qstats_stop(struct seq_file *seq, void *v)
2219 {
2220 }
2221 
2222 static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos)
2223 {
2224 	int entries = sge_qstats_entries(seq->private);
2225 
2226 	(*pos)++;
2227 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2228 }
2229 
2230 static const struct seq_operations sge_qstats_seq_ops = {
2231 	.start = sge_qstats_start,
2232 	.next  = sge_qstats_next,
2233 	.stop  = sge_qstats_stop,
2234 	.show  = sge_qstats_show
2235 };
2236 
2237 static int sge_qstats_open(struct inode *inode, struct file *file)
2238 {
2239 	int res = seq_open(file, &sge_qstats_seq_ops);
2240 
2241 	if (res == 0) {
2242 		struct seq_file *seq = file->private_data;
2243 		seq->private = inode->i_private;
2244 	}
2245 	return res;
2246 }
2247 
2248 static const struct file_operations sge_qstats_proc_fops = {
2249 	.owner   = THIS_MODULE,
2250 	.open    = sge_qstats_open,
2251 	.read    = seq_read,
2252 	.llseek  = seq_lseek,
2253 	.release = seq_release,
2254 };
2255 
2256 /*
2257  * Show PCI-E SR-IOV Virtual Function Resource Limits.
2258  */
2259 static int resources_show(struct seq_file *seq, void *v)
2260 {
2261 	struct adapter *adapter = seq->private;
2262 	struct vf_resources *vfres = &adapter->params.vfres;
2263 
2264 	#define S(desc, fmt, var) \
2265 		seq_printf(seq, "%-60s " fmt "\n", \
2266 			   desc " (" #var "):", vfres->var)
2267 
2268 	S("Virtual Interfaces", "%d", nvi);
2269 	S("Egress Queues", "%d", neq);
2270 	S("Ethernet Control", "%d", nethctrl);
2271 	S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
2272 	S("Ingress Queues", "%d", niq);
2273 	S("Traffic Class", "%d", tc);
2274 	S("Port Access Rights Mask", "%#x", pmask);
2275 	S("MAC Address Filters", "%d", nexactf);
2276 	S("Firmware Command Read Capabilities", "%#x", r_caps);
2277 	S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
2278 
2279 	#undef S
2280 
2281 	return 0;
2282 }
2283 
2284 static int resources_open(struct inode *inode, struct file *file)
2285 {
2286 	return single_open(file, resources_show, inode->i_private);
2287 }
2288 
2289 static const struct file_operations resources_proc_fops = {
2290 	.owner   = THIS_MODULE,
2291 	.open    = resources_open,
2292 	.read    = seq_read,
2293 	.llseek  = seq_lseek,
2294 	.release = single_release,
2295 };
2296 
2297 /*
2298  * Show Virtual Interfaces.
2299  */
2300 static int interfaces_show(struct seq_file *seq, void *v)
2301 {
2302 	if (v == SEQ_START_TOKEN) {
2303 		seq_puts(seq, "Interface  Port   VIID\n");
2304 	} else {
2305 		struct adapter *adapter = seq->private;
2306 		int pidx = (uintptr_t)v - 2;
2307 		struct net_device *dev = adapter->port[pidx];
2308 		struct port_info *pi = netdev_priv(dev);
2309 
2310 		seq_printf(seq, "%9s  %4d  %#5x\n",
2311 			   dev->name, pi->port_id, pi->viid);
2312 	}
2313 	return 0;
2314 }
2315 
2316 static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos)
2317 {
2318 	return pos <= adapter->params.nports
2319 		? (void *)(uintptr_t)(pos + 1)
2320 		: NULL;
2321 }
2322 
2323 static void *interfaces_start(struct seq_file *seq, loff_t *pos)
2324 {
2325 	return *pos
2326 		? interfaces_get_idx(seq->private, *pos)
2327 		: SEQ_START_TOKEN;
2328 }
2329 
2330 static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos)
2331 {
2332 	(*pos)++;
2333 	return interfaces_get_idx(seq->private, *pos);
2334 }
2335 
2336 static void interfaces_stop(struct seq_file *seq, void *v)
2337 {
2338 }
2339 
2340 static const struct seq_operations interfaces_seq_ops = {
2341 	.start = interfaces_start,
2342 	.next  = interfaces_next,
2343 	.stop  = interfaces_stop,
2344 	.show  = interfaces_show
2345 };
2346 
2347 static int interfaces_open(struct inode *inode, struct file *file)
2348 {
2349 	int res = seq_open(file, &interfaces_seq_ops);
2350 
2351 	if (res == 0) {
2352 		struct seq_file *seq = file->private_data;
2353 		seq->private = inode->i_private;
2354 	}
2355 	return res;
2356 }
2357 
2358 static const struct file_operations interfaces_proc_fops = {
2359 	.owner   = THIS_MODULE,
2360 	.open    = interfaces_open,
2361 	.read    = seq_read,
2362 	.llseek  = seq_lseek,
2363 	.release = seq_release,
2364 };
2365 
2366 /*
2367  * /sys/kernel/debugfs/cxgb4vf/ files list.
2368  */
2369 struct cxgb4vf_debugfs_entry {
2370 	const char *name;		/* name of debugfs node */
2371 	umode_t mode;			/* file system mode */
2372 	const struct file_operations *fops;
2373 };
2374 
2375 static struct cxgb4vf_debugfs_entry debugfs_files[] = {
2376 	{ "mboxlog",    S_IRUGO, &mboxlog_fops },
2377 	{ "sge_qinfo",  S_IRUGO, &sge_qinfo_debugfs_fops },
2378 	{ "sge_qstats", S_IRUGO, &sge_qstats_proc_fops },
2379 	{ "resources",  S_IRUGO, &resources_proc_fops },
2380 	{ "interfaces", S_IRUGO, &interfaces_proc_fops },
2381 };
2382 
2383 /*
2384  * Module and device initialization and cleanup code.
2385  * ==================================================
2386  */
2387 
2388 /*
2389  * Set up out /sys/kernel/debug/cxgb4vf sub-nodes.  We assume that the
2390  * directory (debugfs_root) has already been set up.
2391  */
2392 static int setup_debugfs(struct adapter *adapter)
2393 {
2394 	int i;
2395 
2396 	BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2397 
2398 	/*
2399 	 * Debugfs support is best effort.
2400 	 */
2401 	for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
2402 		(void)debugfs_create_file(debugfs_files[i].name,
2403 				  debugfs_files[i].mode,
2404 				  adapter->debugfs_root,
2405 				  (void *)adapter,
2406 				  debugfs_files[i].fops);
2407 
2408 	return 0;
2409 }
2410 
2411 /*
2412  * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above.  We leave
2413  * it to our caller to tear down the directory (debugfs_root).
2414  */
2415 static void cleanup_debugfs(struct adapter *adapter)
2416 {
2417 	BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2418 
2419 	/*
2420 	 * Unlike our sister routine cleanup_proc(), we don't need to remove
2421 	 * individual entries because a call will be made to
2422 	 * debugfs_remove_recursive().  We just need to clean up any ancillary
2423 	 * persistent state.
2424 	 */
2425 	/* nothing to do */
2426 }
2427 
2428 /* Figure out how many Ports and Queue Sets we can support.  This depends on
2429  * knowing our Virtual Function Resources and may be called a second time if
2430  * we fall back from MSI-X to MSI Interrupt Mode.
2431  */
2432 static void size_nports_qsets(struct adapter *adapter)
2433 {
2434 	struct vf_resources *vfres = &adapter->params.vfres;
2435 	unsigned int ethqsets, pmask_nports;
2436 
2437 	/* The number of "ports" which we support is equal to the number of
2438 	 * Virtual Interfaces with which we've been provisioned.
2439 	 */
2440 	adapter->params.nports = vfres->nvi;
2441 	if (adapter->params.nports > MAX_NPORTS) {
2442 		dev_warn(adapter->pdev_dev, "only using %d of %d maximum"
2443 			 " allowed virtual interfaces\n", MAX_NPORTS,
2444 			 adapter->params.nports);
2445 		adapter->params.nports = MAX_NPORTS;
2446 	}
2447 
2448 	/* We may have been provisioned with more VIs than the number of
2449 	 * ports we're allowed to access (our Port Access Rights Mask).
2450 	 * This is obviously a configuration conflict but we don't want to
2451 	 * crash the kernel or anything silly just because of that.
2452 	 */
2453 	pmask_nports = hweight32(adapter->params.vfres.pmask);
2454 	if (pmask_nports < adapter->params.nports) {
2455 		dev_warn(adapter->pdev_dev, "only using %d of %d provisioned"
2456 			 " virtual interfaces; limited by Port Access Rights"
2457 			 " mask %#x\n", pmask_nports, adapter->params.nports,
2458 			 adapter->params.vfres.pmask);
2459 		adapter->params.nports = pmask_nports;
2460 	}
2461 
2462 	/* We need to reserve an Ingress Queue for the Asynchronous Firmware
2463 	 * Event Queue.  And if we're using MSI Interrupts, we'll also need to
2464 	 * reserve an Ingress Queue for a Forwarded Interrupts.
2465 	 *
2466 	 * The rest of the FL/Intr-capable ingress queues will be matched up
2467 	 * one-for-one with Ethernet/Control egress queues in order to form
2468 	 * "Queue Sets" which will be aportioned between the "ports".  For
2469 	 * each Queue Set, we'll need the ability to allocate two Egress
2470 	 * Contexts -- one for the Ingress Queue Free List and one for the TX
2471 	 * Ethernet Queue.
2472 	 *
2473 	 * Note that even if we're currently configured to use MSI-X
2474 	 * Interrupts (module variable msi == MSI_MSIX) we may get downgraded
2475 	 * to MSI Interrupts if we can't get enough MSI-X Interrupts.  If that
2476 	 * happens we'll need to adjust things later.
2477 	 */
2478 	ethqsets = vfres->niqflint - 1 - (msi == MSI_MSI);
2479 	if (vfres->nethctrl != ethqsets)
2480 		ethqsets = min(vfres->nethctrl, ethqsets);
2481 	if (vfres->neq < ethqsets*2)
2482 		ethqsets = vfres->neq/2;
2483 	if (ethqsets > MAX_ETH_QSETS)
2484 		ethqsets = MAX_ETH_QSETS;
2485 	adapter->sge.max_ethqsets = ethqsets;
2486 
2487 	if (adapter->sge.max_ethqsets < adapter->params.nports) {
2488 		dev_warn(adapter->pdev_dev, "only using %d of %d available"
2489 			 " virtual interfaces (too few Queue Sets)\n",
2490 			 adapter->sge.max_ethqsets, adapter->params.nports);
2491 		adapter->params.nports = adapter->sge.max_ethqsets;
2492 	}
2493 }
2494 
2495 /*
2496  * Perform early "adapter" initialization.  This is where we discover what
2497  * adapter parameters we're going to be using and initialize basic adapter
2498  * hardware support.
2499  */
2500 static int adap_init0(struct adapter *adapter)
2501 {
2502 	struct sge_params *sge_params = &adapter->params.sge;
2503 	struct sge *s = &adapter->sge;
2504 	int err;
2505 	u32 param, val = 0;
2506 
2507 	/*
2508 	 * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
2509 	 * 2.6.31 and later we can't call pci_reset_function() in order to
2510 	 * issue an FLR because of a self- deadlock on the device semaphore.
2511 	 * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
2512 	 * cases where they're needed -- for instance, some versions of KVM
2513 	 * fail to reset "Assigned Devices" when the VM reboots.  Therefore we
2514 	 * use the firmware based reset in order to reset any per function
2515 	 * state.
2516 	 */
2517 	err = t4vf_fw_reset(adapter);
2518 	if (err < 0) {
2519 		dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
2520 		return err;
2521 	}
2522 
2523 	/*
2524 	 * Grab basic operational parameters.  These will predominantly have
2525 	 * been set up by the Physical Function Driver or will be hard coded
2526 	 * into the adapter.  We just have to live with them ...  Note that
2527 	 * we _must_ get our VPD parameters before our SGE parameters because
2528 	 * we need to know the adapter's core clock from the VPD in order to
2529 	 * properly decode the SGE Timer Values.
2530 	 */
2531 	err = t4vf_get_dev_params(adapter);
2532 	if (err) {
2533 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2534 			" device parameters: err=%d\n", err);
2535 		return err;
2536 	}
2537 	err = t4vf_get_vpd_params(adapter);
2538 	if (err) {
2539 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2540 			" VPD parameters: err=%d\n", err);
2541 		return err;
2542 	}
2543 	err = t4vf_get_sge_params(adapter);
2544 	if (err) {
2545 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2546 			" SGE parameters: err=%d\n", err);
2547 		return err;
2548 	}
2549 	err = t4vf_get_rss_glb_config(adapter);
2550 	if (err) {
2551 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2552 			" RSS parameters: err=%d\n", err);
2553 		return err;
2554 	}
2555 	if (adapter->params.rss.mode !=
2556 	    FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2557 		dev_err(adapter->pdev_dev, "unable to operate with global RSS"
2558 			" mode %d\n", adapter->params.rss.mode);
2559 		return -EINVAL;
2560 	}
2561 	err = t4vf_sge_init(adapter);
2562 	if (err) {
2563 		dev_err(adapter->pdev_dev, "unable to use adapter parameters:"
2564 			" err=%d\n", err);
2565 		return err;
2566 	}
2567 
2568 	/* If we're running on newer firmware, let it know that we're
2569 	 * prepared to deal with encapsulated CPL messages.  Older
2570 	 * firmware won't understand this and we'll just get
2571 	 * unencapsulated messages ...
2572 	 */
2573 	param = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
2574 		FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
2575 	val = 1;
2576 	(void) t4vf_set_params(adapter, 1, &param, &val);
2577 
2578 	/*
2579 	 * Retrieve our RX interrupt holdoff timer values and counter
2580 	 * threshold values from the SGE parameters.
2581 	 */
2582 	s->timer_val[0] = core_ticks_to_us(adapter,
2583 		TIMERVALUE0_G(sge_params->sge_timer_value_0_and_1));
2584 	s->timer_val[1] = core_ticks_to_us(adapter,
2585 		TIMERVALUE1_G(sge_params->sge_timer_value_0_and_1));
2586 	s->timer_val[2] = core_ticks_to_us(adapter,
2587 		TIMERVALUE0_G(sge_params->sge_timer_value_2_and_3));
2588 	s->timer_val[3] = core_ticks_to_us(adapter,
2589 		TIMERVALUE1_G(sge_params->sge_timer_value_2_and_3));
2590 	s->timer_val[4] = core_ticks_to_us(adapter,
2591 		TIMERVALUE0_G(sge_params->sge_timer_value_4_and_5));
2592 	s->timer_val[5] = core_ticks_to_us(adapter,
2593 		TIMERVALUE1_G(sge_params->sge_timer_value_4_and_5));
2594 
2595 	s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold);
2596 	s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold);
2597 	s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold);
2598 	s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold);
2599 
2600 	/*
2601 	 * Grab our Virtual Interface resource allocation, extract the
2602 	 * features that we're interested in and do a bit of sanity testing on
2603 	 * what we discover.
2604 	 */
2605 	err = t4vf_get_vfres(adapter);
2606 	if (err) {
2607 		dev_err(adapter->pdev_dev, "unable to get virtual interface"
2608 			" resources: err=%d\n", err);
2609 		return err;
2610 	}
2611 
2612 	/* Check for various parameter sanity issues */
2613 	if (adapter->params.vfres.pmask == 0) {
2614 		dev_err(adapter->pdev_dev, "no port access configured\n"
2615 			"usable!\n");
2616 		return -EINVAL;
2617 	}
2618 	if (adapter->params.vfres.nvi == 0) {
2619 		dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
2620 			"usable!\n");
2621 		return -EINVAL;
2622 	}
2623 
2624 	/* Initialize nports and max_ethqsets now that we have our Virtual
2625 	 * Function Resources.
2626 	 */
2627 	size_nports_qsets(adapter);
2628 
2629 	return 0;
2630 }
2631 
2632 static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
2633 			     u8 pkt_cnt_idx, unsigned int size,
2634 			     unsigned int iqe_size)
2635 {
2636 	rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
2637 			     (pkt_cnt_idx < SGE_NCOUNTERS ?
2638 			      QINTR_CNT_EN_F : 0));
2639 	rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
2640 			    ? pkt_cnt_idx
2641 			    : 0);
2642 	rspq->iqe_len = iqe_size;
2643 	rspq->size = size;
2644 }
2645 
2646 /*
2647  * Perform default configuration of DMA queues depending on the number and
2648  * type of ports we found and the number of available CPUs.  Most settings can
2649  * be modified by the admin via ethtool and cxgbtool prior to the adapter
2650  * being brought up for the first time.
2651  */
2652 static void cfg_queues(struct adapter *adapter)
2653 {
2654 	struct sge *s = &adapter->sge;
2655 	int q10g, n10g, qidx, pidx, qs;
2656 	size_t iqe_size;
2657 
2658 	/*
2659 	 * We should not be called till we know how many Queue Sets we can
2660 	 * support.  In particular, this means that we need to know what kind
2661 	 * of interrupts we'll be using ...
2662 	 */
2663 	BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
2664 
2665 	/*
2666 	 * Count the number of 10GbE Virtual Interfaces that we have.
2667 	 */
2668 	n10g = 0;
2669 	for_each_port(adapter, pidx)
2670 		n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
2671 
2672 	/*
2673 	 * We default to 1 queue per non-10G port and up to # of cores queues
2674 	 * per 10G port.
2675 	 */
2676 	if (n10g == 0)
2677 		q10g = 0;
2678 	else {
2679 		int n1g = (adapter->params.nports - n10g);
2680 		q10g = (adapter->sge.max_ethqsets - n1g) / n10g;
2681 		if (q10g > num_online_cpus())
2682 			q10g = num_online_cpus();
2683 	}
2684 
2685 	/*
2686 	 * Allocate the "Queue Sets" to the various Virtual Interfaces.
2687 	 * The layout will be established in setup_sge_queues() when the
2688 	 * adapter is brough up for the first time.
2689 	 */
2690 	qidx = 0;
2691 	for_each_port(adapter, pidx) {
2692 		struct port_info *pi = adap2pinfo(adapter, pidx);
2693 
2694 		pi->first_qset = qidx;
2695 		pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
2696 		qidx += pi->nqsets;
2697 	}
2698 	s->ethqsets = qidx;
2699 
2700 	/*
2701 	 * The Ingress Queue Entry Size for our various Response Queues needs
2702 	 * to be big enough to accommodate the largest message we can receive
2703 	 * from the chip/firmware; which is 64 bytes ...
2704 	 */
2705 	iqe_size = 64;
2706 
2707 	/*
2708 	 * Set up default Queue Set parameters ...  Start off with the
2709 	 * shortest interrupt holdoff timer.
2710 	 */
2711 	for (qs = 0; qs < s->max_ethqsets; qs++) {
2712 		struct sge_eth_rxq *rxq = &s->ethrxq[qs];
2713 		struct sge_eth_txq *txq = &s->ethtxq[qs];
2714 
2715 		init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
2716 		rxq->fl.size = 72;
2717 		txq->q.size = 1024;
2718 	}
2719 
2720 	/*
2721 	 * The firmware event queue is used for link state changes and
2722 	 * notifications of TX DMA completions.
2723 	 */
2724 	init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size);
2725 
2726 	/*
2727 	 * The forwarded interrupt queue is used when we're in MSI interrupt
2728 	 * mode.  In this mode all interrupts associated with RX queues will
2729 	 * be forwarded to a single queue which we'll associate with our MSI
2730 	 * interrupt vector.  The messages dropped in the forwarded interrupt
2731 	 * queue will indicate which ingress queue needs servicing ...  This
2732 	 * queue needs to be large enough to accommodate all of the ingress
2733 	 * queues which are forwarding their interrupt (+1 to prevent the PIDX
2734 	 * from equalling the CIDX if every ingress queue has an outstanding
2735 	 * interrupt).  The queue doesn't need to be any larger because no
2736 	 * ingress queue will ever have more than one outstanding interrupt at
2737 	 * any time ...
2738 	 */
2739 	init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
2740 		  iqe_size);
2741 }
2742 
2743 /*
2744  * Reduce the number of Ethernet queues across all ports to at most n.
2745  * n provides at least one queue per port.
2746  */
2747 static void reduce_ethqs(struct adapter *adapter, int n)
2748 {
2749 	int i;
2750 	struct port_info *pi;
2751 
2752 	/*
2753 	 * While we have too many active Ether Queue Sets, interate across the
2754 	 * "ports" and reduce their individual Queue Set allocations.
2755 	 */
2756 	BUG_ON(n < adapter->params.nports);
2757 	while (n < adapter->sge.ethqsets)
2758 		for_each_port(adapter, i) {
2759 			pi = adap2pinfo(adapter, i);
2760 			if (pi->nqsets > 1) {
2761 				pi->nqsets--;
2762 				adapter->sge.ethqsets--;
2763 				if (adapter->sge.ethqsets <= n)
2764 					break;
2765 			}
2766 		}
2767 
2768 	/*
2769 	 * Reassign the starting Queue Sets for each of the "ports" ...
2770 	 */
2771 	n = 0;
2772 	for_each_port(adapter, i) {
2773 		pi = adap2pinfo(adapter, i);
2774 		pi->first_qset = n;
2775 		n += pi->nqsets;
2776 	}
2777 }
2778 
2779 /*
2780  * We need to grab enough MSI-X vectors to cover our interrupt needs.  Ideally
2781  * we get a separate MSI-X vector for every "Queue Set" plus any extras we
2782  * need.  Minimally we need one for every Virtual Interface plus those needed
2783  * for our "extras".  Note that this process may lower the maximum number of
2784  * allowed Queue Sets ...
2785  */
2786 static int enable_msix(struct adapter *adapter)
2787 {
2788 	int i, want, need, nqsets;
2789 	struct msix_entry entries[MSIX_ENTRIES];
2790 	struct sge *s = &adapter->sge;
2791 
2792 	for (i = 0; i < MSIX_ENTRIES; ++i)
2793 		entries[i].entry = i;
2794 
2795 	/*
2796 	 * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets"
2797 	 * plus those needed for our "extras" (for example, the firmware
2798 	 * message queue).  We _need_ at least one "Queue Set" per Virtual
2799 	 * Interface plus those needed for our "extras".  So now we get to see
2800 	 * if the song is right ...
2801 	 */
2802 	want = s->max_ethqsets + MSIX_EXTRAS;
2803 	need = adapter->params.nports + MSIX_EXTRAS;
2804 
2805 	want = pci_enable_msix_range(adapter->pdev, entries, need, want);
2806 	if (want < 0)
2807 		return want;
2808 
2809 	nqsets = want - MSIX_EXTRAS;
2810 	if (nqsets < s->max_ethqsets) {
2811 		dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
2812 			 " for %d Queue Sets\n", nqsets);
2813 		s->max_ethqsets = nqsets;
2814 		if (nqsets < s->ethqsets)
2815 			reduce_ethqs(adapter, nqsets);
2816 	}
2817 	for (i = 0; i < want; ++i)
2818 		adapter->msix_info[i].vec = entries[i].vector;
2819 
2820 	return 0;
2821 }
2822 
2823 static const struct net_device_ops cxgb4vf_netdev_ops	= {
2824 	.ndo_open		= cxgb4vf_open,
2825 	.ndo_stop		= cxgb4vf_stop,
2826 	.ndo_start_xmit		= t4vf_eth_xmit,
2827 	.ndo_get_stats		= cxgb4vf_get_stats,
2828 	.ndo_set_rx_mode	= cxgb4vf_set_rxmode,
2829 	.ndo_set_mac_address	= cxgb4vf_set_mac_addr,
2830 	.ndo_validate_addr	= eth_validate_addr,
2831 	.ndo_do_ioctl		= cxgb4vf_do_ioctl,
2832 	.ndo_change_mtu		= cxgb4vf_change_mtu,
2833 	.ndo_fix_features	= cxgb4vf_fix_features,
2834 	.ndo_set_features	= cxgb4vf_set_features,
2835 #ifdef CONFIG_NET_POLL_CONTROLLER
2836 	.ndo_poll_controller	= cxgb4vf_poll_controller,
2837 #endif
2838 };
2839 
2840 /*
2841  * "Probe" a device: initialize a device and construct all kernel and driver
2842  * state needed to manage the device.  This routine is called "init_one" in
2843  * the PF Driver ...
2844  */
2845 static int cxgb4vf_pci_probe(struct pci_dev *pdev,
2846 			     const struct pci_device_id *ent)
2847 {
2848 	int pci_using_dac;
2849 	int err, pidx;
2850 	unsigned int pmask;
2851 	struct adapter *adapter;
2852 	struct port_info *pi;
2853 	struct net_device *netdev;
2854 	unsigned int pf;
2855 
2856 	/*
2857 	 * Print our driver banner the first time we're called to initialize a
2858 	 * device.
2859 	 */
2860 	pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
2861 
2862 	/*
2863 	 * Initialize generic PCI device state.
2864 	 */
2865 	err = pci_enable_device(pdev);
2866 	if (err) {
2867 		dev_err(&pdev->dev, "cannot enable PCI device\n");
2868 		return err;
2869 	}
2870 
2871 	/*
2872 	 * Reserve PCI resources for the device.  If we can't get them some
2873 	 * other driver may have already claimed the device ...
2874 	 */
2875 	err = pci_request_regions(pdev, KBUILD_MODNAME);
2876 	if (err) {
2877 		dev_err(&pdev->dev, "cannot obtain PCI resources\n");
2878 		goto err_disable_device;
2879 	}
2880 
2881 	/*
2882 	 * Set up our DMA mask: try for 64-bit address masking first and
2883 	 * fall back to 32-bit if we can't get 64 bits ...
2884 	 */
2885 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2886 	if (err == 0) {
2887 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2888 		if (err) {
2889 			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for"
2890 				" coherent allocations\n");
2891 			goto err_release_regions;
2892 		}
2893 		pci_using_dac = 1;
2894 	} else {
2895 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2896 		if (err != 0) {
2897 			dev_err(&pdev->dev, "no usable DMA configuration\n");
2898 			goto err_release_regions;
2899 		}
2900 		pci_using_dac = 0;
2901 	}
2902 
2903 	/*
2904 	 * Enable bus mastering for the device ...
2905 	 */
2906 	pci_set_master(pdev);
2907 
2908 	/*
2909 	 * Allocate our adapter data structure and attach it to the device.
2910 	 */
2911 	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2912 	if (!adapter) {
2913 		err = -ENOMEM;
2914 		goto err_release_regions;
2915 	}
2916 	pci_set_drvdata(pdev, adapter);
2917 	adapter->pdev = pdev;
2918 	adapter->pdev_dev = &pdev->dev;
2919 
2920 	adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
2921 				    (sizeof(struct mbox_cmd) *
2922 				     T4VF_OS_LOG_MBOX_CMDS),
2923 				    GFP_KERNEL);
2924 	if (!adapter->mbox_log) {
2925 		err = -ENOMEM;
2926 		goto err_free_adapter;
2927 	}
2928 	adapter->mbox_log->size = T4VF_OS_LOG_MBOX_CMDS;
2929 
2930 	/*
2931 	 * Initialize SMP data synchronization resources.
2932 	 */
2933 	spin_lock_init(&adapter->stats_lock);
2934 	spin_lock_init(&adapter->mbox_lock);
2935 	INIT_LIST_HEAD(&adapter->mlist.list);
2936 
2937 	/*
2938 	 * Map our I/O registers in BAR0.
2939 	 */
2940 	adapter->regs = pci_ioremap_bar(pdev, 0);
2941 	if (!adapter->regs) {
2942 		dev_err(&pdev->dev, "cannot map device registers\n");
2943 		err = -ENOMEM;
2944 		goto err_free_adapter;
2945 	}
2946 
2947 	/* Wait for the device to become ready before proceeding ...
2948 	 */
2949 	err = t4vf_prep_adapter(adapter);
2950 	if (err) {
2951 		dev_err(adapter->pdev_dev, "device didn't become ready:"
2952 			" err=%d\n", err);
2953 		goto err_unmap_bar0;
2954 	}
2955 
2956 	/* For T5 and later we want to use the new BAR-based User Doorbells,
2957 	 * so we need to map BAR2 here ...
2958 	 */
2959 	if (!is_t4(adapter->params.chip)) {
2960 		adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
2961 					   pci_resource_len(pdev, 2));
2962 		if (!adapter->bar2) {
2963 			dev_err(adapter->pdev_dev, "cannot map BAR2 doorbells\n");
2964 			err = -ENOMEM;
2965 			goto err_unmap_bar0;
2966 		}
2967 	}
2968 	/*
2969 	 * Initialize adapter level features.
2970 	 */
2971 	adapter->name = pci_name(pdev);
2972 	adapter->msg_enable = DFLT_MSG_ENABLE;
2973 
2974 	/* If possible, we use PCIe Relaxed Ordering Attribute to deliver
2975 	 * Ingress Packet Data to Free List Buffers in order to allow for
2976 	 * chipset performance optimizations between the Root Complex and
2977 	 * Memory Controllers.  (Messages to the associated Ingress Queue
2978 	 * notifying new Packet Placement in the Free Lists Buffers will be
2979 	 * send without the Relaxed Ordering Attribute thus guaranteeing that
2980 	 * all preceding PCIe Transaction Layer Packets will be processed
2981 	 * first.)  But some Root Complexes have various issues with Upstream
2982 	 * Transaction Layer Packets with the Relaxed Ordering Attribute set.
2983 	 * The PCIe devices which under the Root Complexes will be cleared the
2984 	 * Relaxed Ordering bit in the configuration space, So we check our
2985 	 * PCIe configuration space to see if it's flagged with advice against
2986 	 * using Relaxed Ordering.
2987 	 */
2988 	if (!pcie_relaxed_ordering_enabled(pdev))
2989 		adapter->flags |= ROOT_NO_RELAXED_ORDERING;
2990 
2991 	err = adap_init0(adapter);
2992 	if (err)
2993 		goto err_unmap_bar;
2994 
2995 	/*
2996 	 * Allocate our "adapter ports" and stitch everything together.
2997 	 */
2998 	pmask = adapter->params.vfres.pmask;
2999 	pf = t4vf_get_pf_from_vf(adapter);
3000 	for_each_port(adapter, pidx) {
3001 		int port_id, viid;
3002 		u8 mac[ETH_ALEN];
3003 		unsigned int naddr = 1;
3004 
3005 		/*
3006 		 * We simplistically allocate our virtual interfaces
3007 		 * sequentially across the port numbers to which we have
3008 		 * access rights.  This should be configurable in some manner
3009 		 * ...
3010 		 */
3011 		if (pmask == 0)
3012 			break;
3013 		port_id = ffs(pmask) - 1;
3014 		pmask &= ~(1 << port_id);
3015 		viid = t4vf_alloc_vi(adapter, port_id);
3016 		if (viid < 0) {
3017 			dev_err(&pdev->dev, "cannot allocate VI for port %d:"
3018 				" err=%d\n", port_id, viid);
3019 			err = viid;
3020 			goto err_free_dev;
3021 		}
3022 
3023 		/*
3024 		 * Allocate our network device and stitch things together.
3025 		 */
3026 		netdev = alloc_etherdev_mq(sizeof(struct port_info),
3027 					   MAX_PORT_QSETS);
3028 		if (netdev == NULL) {
3029 			t4vf_free_vi(adapter, viid);
3030 			err = -ENOMEM;
3031 			goto err_free_dev;
3032 		}
3033 		adapter->port[pidx] = netdev;
3034 		SET_NETDEV_DEV(netdev, &pdev->dev);
3035 		pi = netdev_priv(netdev);
3036 		pi->adapter = adapter;
3037 		pi->pidx = pidx;
3038 		pi->port_id = port_id;
3039 		pi->viid = viid;
3040 
3041 		/*
3042 		 * Initialize the starting state of our "port" and register
3043 		 * it.
3044 		 */
3045 		pi->xact_addr_filt = -1;
3046 		netif_carrier_off(netdev);
3047 		netdev->irq = pdev->irq;
3048 
3049 		netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
3050 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3051 			NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
3052 		netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
3053 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3054 			NETIF_F_HIGHDMA;
3055 		netdev->features = netdev->hw_features |
3056 				   NETIF_F_HW_VLAN_CTAG_TX;
3057 		if (pci_using_dac)
3058 			netdev->features |= NETIF_F_HIGHDMA;
3059 
3060 		netdev->priv_flags |= IFF_UNICAST_FLT;
3061 		netdev->min_mtu = 81;
3062 		netdev->max_mtu = ETH_MAX_MTU;
3063 
3064 		netdev->netdev_ops = &cxgb4vf_netdev_ops;
3065 		netdev->ethtool_ops = &cxgb4vf_ethtool_ops;
3066 		netdev->dev_port = pi->port_id;
3067 
3068 		/*
3069 		 * Initialize the hardware/software state for the port.
3070 		 */
3071 		err = t4vf_port_init(adapter, pidx);
3072 		if (err) {
3073 			dev_err(&pdev->dev, "cannot initialize port %d\n",
3074 				pidx);
3075 			goto err_free_dev;
3076 		}
3077 
3078 		err = t4vf_get_vf_mac_acl(adapter, pf, &naddr, mac);
3079 		if (err) {
3080 			dev_err(&pdev->dev,
3081 				"unable to determine MAC ACL address, "
3082 				"continuing anyway.. (status %d)\n", err);
3083 		} else if (naddr && adapter->params.vfres.nvi == 1) {
3084 			struct sockaddr addr;
3085 
3086 			ether_addr_copy(addr.sa_data, mac);
3087 			err = cxgb4vf_set_mac_addr(netdev, &addr);
3088 			if (err) {
3089 				dev_err(&pdev->dev,
3090 					"unable to set MAC address %pM\n",
3091 					mac);
3092 				goto err_free_dev;
3093 			}
3094 			dev_info(&pdev->dev,
3095 				 "Using assigned MAC ACL: %pM\n", mac);
3096 		}
3097 	}
3098 
3099 	/* See what interrupts we'll be using.  If we've been configured to
3100 	 * use MSI-X interrupts, try to enable them but fall back to using
3101 	 * MSI interrupts if we can't enable MSI-X interrupts.  If we can't
3102 	 * get MSI interrupts we bail with the error.
3103 	 */
3104 	if (msi == MSI_MSIX && enable_msix(adapter) == 0)
3105 		adapter->flags |= USING_MSIX;
3106 	else {
3107 		if (msi == MSI_MSIX) {
3108 			dev_info(adapter->pdev_dev,
3109 				 "Unable to use MSI-X Interrupts; falling "
3110 				 "back to MSI Interrupts\n");
3111 
3112 			/* We're going to need a Forwarded Interrupt Queue so
3113 			 * that may cut into how many Queue Sets we can
3114 			 * support.
3115 			 */
3116 			msi = MSI_MSI;
3117 			size_nports_qsets(adapter);
3118 		}
3119 		err = pci_enable_msi(pdev);
3120 		if (err) {
3121 			dev_err(&pdev->dev, "Unable to allocate MSI Interrupts;"
3122 				" err=%d\n", err);
3123 			goto err_free_dev;
3124 		}
3125 		adapter->flags |= USING_MSI;
3126 	}
3127 
3128 	/* Now that we know how many "ports" we have and what interrupt
3129 	 * mechanism we're going to use, we can configure our queue resources.
3130 	 */
3131 	cfg_queues(adapter);
3132 
3133 	/*
3134 	 * The "card" is now ready to go.  If any errors occur during device
3135 	 * registration we do not fail the whole "card" but rather proceed
3136 	 * only with the ports we manage to register successfully.  However we
3137 	 * must register at least one net device.
3138 	 */
3139 	for_each_port(adapter, pidx) {
3140 		struct port_info *pi = netdev_priv(adapter->port[pidx]);
3141 		netdev = adapter->port[pidx];
3142 		if (netdev == NULL)
3143 			continue;
3144 
3145 		netif_set_real_num_tx_queues(netdev, pi->nqsets);
3146 		netif_set_real_num_rx_queues(netdev, pi->nqsets);
3147 
3148 		err = register_netdev(netdev);
3149 		if (err) {
3150 			dev_warn(&pdev->dev, "cannot register net device %s,"
3151 				 " skipping\n", netdev->name);
3152 			continue;
3153 		}
3154 
3155 		set_bit(pidx, &adapter->registered_device_map);
3156 	}
3157 	if (adapter->registered_device_map == 0) {
3158 		dev_err(&pdev->dev, "could not register any net devices\n");
3159 		goto err_disable_interrupts;
3160 	}
3161 
3162 	/*
3163 	 * Set up our debugfs entries.
3164 	 */
3165 	if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
3166 		adapter->debugfs_root =
3167 			debugfs_create_dir(pci_name(pdev),
3168 					   cxgb4vf_debugfs_root);
3169 		if (IS_ERR_OR_NULL(adapter->debugfs_root))
3170 			dev_warn(&pdev->dev, "could not create debugfs"
3171 				 " directory");
3172 		else
3173 			setup_debugfs(adapter);
3174 	}
3175 
3176 	/*
3177 	 * Print a short notice on the existence and configuration of the new
3178 	 * VF network device ...
3179 	 */
3180 	for_each_port(adapter, pidx) {
3181 		dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n",
3182 			 adapter->port[pidx]->name,
3183 			 (adapter->flags & USING_MSIX) ? "MSI-X" :
3184 			 (adapter->flags & USING_MSI)  ? "MSI" : "");
3185 	}
3186 
3187 	/*
3188 	 * Return success!
3189 	 */
3190 	return 0;
3191 
3192 	/*
3193 	 * Error recovery and exit code.  Unwind state that's been created
3194 	 * so far and return the error.
3195 	 */
3196 err_disable_interrupts:
3197 	if (adapter->flags & USING_MSIX) {
3198 		pci_disable_msix(adapter->pdev);
3199 		adapter->flags &= ~USING_MSIX;
3200 	} else if (adapter->flags & USING_MSI) {
3201 		pci_disable_msi(adapter->pdev);
3202 		adapter->flags &= ~USING_MSI;
3203 	}
3204 
3205 err_free_dev:
3206 	for_each_port(adapter, pidx) {
3207 		netdev = adapter->port[pidx];
3208 		if (netdev == NULL)
3209 			continue;
3210 		pi = netdev_priv(netdev);
3211 		t4vf_free_vi(adapter, pi->viid);
3212 		if (test_bit(pidx, &adapter->registered_device_map))
3213 			unregister_netdev(netdev);
3214 		free_netdev(netdev);
3215 	}
3216 
3217 err_unmap_bar:
3218 	if (!is_t4(adapter->params.chip))
3219 		iounmap(adapter->bar2);
3220 
3221 err_unmap_bar0:
3222 	iounmap(adapter->regs);
3223 
3224 err_free_adapter:
3225 	kfree(adapter->mbox_log);
3226 	kfree(adapter);
3227 
3228 err_release_regions:
3229 	pci_release_regions(pdev);
3230 	pci_clear_master(pdev);
3231 
3232 err_disable_device:
3233 	pci_disable_device(pdev);
3234 
3235 	return err;
3236 }
3237 
3238 /*
3239  * "Remove" a device: tear down all kernel and driver state created in the
3240  * "probe" routine and quiesce the device (disable interrupts, etc.).  (Note
3241  * that this is called "remove_one" in the PF Driver.)
3242  */
3243 static void cxgb4vf_pci_remove(struct pci_dev *pdev)
3244 {
3245 	struct adapter *adapter = pci_get_drvdata(pdev);
3246 
3247 	/*
3248 	 * Tear down driver state associated with device.
3249 	 */
3250 	if (adapter) {
3251 		int pidx;
3252 
3253 		/*
3254 		 * Stop all of our activity.  Unregister network port,
3255 		 * disable interrupts, etc.
3256 		 */
3257 		for_each_port(adapter, pidx)
3258 			if (test_bit(pidx, &adapter->registered_device_map))
3259 				unregister_netdev(adapter->port[pidx]);
3260 		t4vf_sge_stop(adapter);
3261 		if (adapter->flags & USING_MSIX) {
3262 			pci_disable_msix(adapter->pdev);
3263 			adapter->flags &= ~USING_MSIX;
3264 		} else if (adapter->flags & USING_MSI) {
3265 			pci_disable_msi(adapter->pdev);
3266 			adapter->flags &= ~USING_MSI;
3267 		}
3268 
3269 		/*
3270 		 * Tear down our debugfs entries.
3271 		 */
3272 		if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
3273 			cleanup_debugfs(adapter);
3274 			debugfs_remove_recursive(adapter->debugfs_root);
3275 		}
3276 
3277 		/*
3278 		 * Free all of the various resources which we've acquired ...
3279 		 */
3280 		t4vf_free_sge_resources(adapter);
3281 		for_each_port(adapter, pidx) {
3282 			struct net_device *netdev = adapter->port[pidx];
3283 			struct port_info *pi;
3284 
3285 			if (netdev == NULL)
3286 				continue;
3287 
3288 			pi = netdev_priv(netdev);
3289 			t4vf_free_vi(adapter, pi->viid);
3290 			free_netdev(netdev);
3291 		}
3292 		iounmap(adapter->regs);
3293 		if (!is_t4(adapter->params.chip))
3294 			iounmap(adapter->bar2);
3295 		kfree(adapter->mbox_log);
3296 		kfree(adapter);
3297 	}
3298 
3299 	/*
3300 	 * Disable the device and release its PCI resources.
3301 	 */
3302 	pci_disable_device(pdev);
3303 	pci_clear_master(pdev);
3304 	pci_release_regions(pdev);
3305 }
3306 
3307 /*
3308  * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
3309  * delivery.
3310  */
3311 static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
3312 {
3313 	struct adapter *adapter;
3314 	int pidx;
3315 
3316 	adapter = pci_get_drvdata(pdev);
3317 	if (!adapter)
3318 		return;
3319 
3320 	/* Disable all Virtual Interfaces.  This will shut down the
3321 	 * delivery of all ingress packets into the chip for these
3322 	 * Virtual Interfaces.
3323 	 */
3324 	for_each_port(adapter, pidx)
3325 		if (test_bit(pidx, &adapter->registered_device_map))
3326 			unregister_netdev(adapter->port[pidx]);
3327 
3328 	/* Free up all Queues which will prevent further DMA and
3329 	 * Interrupts allowing various internal pathways to drain.
3330 	 */
3331 	t4vf_sge_stop(adapter);
3332 	if (adapter->flags & USING_MSIX) {
3333 		pci_disable_msix(adapter->pdev);
3334 		adapter->flags &= ~USING_MSIX;
3335 	} else if (adapter->flags & USING_MSI) {
3336 		pci_disable_msi(adapter->pdev);
3337 		adapter->flags &= ~USING_MSI;
3338 	}
3339 
3340 	/*
3341 	 * Free up all Queues which will prevent further DMA and
3342 	 * Interrupts allowing various internal pathways to drain.
3343 	 */
3344 	t4vf_free_sge_resources(adapter);
3345 	pci_set_drvdata(pdev, NULL);
3346 }
3347 
3348 /* Macros needed to support the PCI Device ID Table ...
3349  */
3350 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
3351 	static const struct pci_device_id cxgb4vf_pci_tbl[] = {
3352 #define CH_PCI_DEVICE_ID_FUNCTION	0x8
3353 
3354 #define CH_PCI_ID_TABLE_ENTRY(devid) \
3355 		{ PCI_VDEVICE(CHELSIO, (devid)), 0 }
3356 
3357 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } }
3358 
3359 #include "../cxgb4/t4_pci_id_tbl.h"
3360 
3361 MODULE_DESCRIPTION(DRV_DESC);
3362 MODULE_AUTHOR("Chelsio Communications");
3363 MODULE_LICENSE("Dual BSD/GPL");
3364 MODULE_VERSION(DRV_VERSION);
3365 MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl);
3366 
3367 static struct pci_driver cxgb4vf_driver = {
3368 	.name		= KBUILD_MODNAME,
3369 	.id_table	= cxgb4vf_pci_tbl,
3370 	.probe		= cxgb4vf_pci_probe,
3371 	.remove		= cxgb4vf_pci_remove,
3372 	.shutdown	= cxgb4vf_pci_shutdown,
3373 };
3374 
3375 /*
3376  * Initialize global driver state.
3377  */
3378 static int __init cxgb4vf_module_init(void)
3379 {
3380 	int ret;
3381 
3382 	/*
3383 	 * Vet our module parameters.
3384 	 */
3385 	if (msi != MSI_MSIX && msi != MSI_MSI) {
3386 		pr_warn("bad module parameter msi=%d; must be %d (MSI-X or MSI) or %d (MSI)\n",
3387 			msi, MSI_MSIX, MSI_MSI);
3388 		return -EINVAL;
3389 	}
3390 
3391 	/* Debugfs support is optional, just warn if this fails */
3392 	cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
3393 	if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
3394 		pr_warn("could not create debugfs entry, continuing\n");
3395 
3396 	ret = pci_register_driver(&cxgb4vf_driver);
3397 	if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
3398 		debugfs_remove(cxgb4vf_debugfs_root);
3399 	return ret;
3400 }
3401 
3402 /*
3403  * Tear down global driver state.
3404  */
3405 static void __exit cxgb4vf_module_exit(void)
3406 {
3407 	pci_unregister_driver(&cxgb4vf_driver);
3408 	debugfs_remove(cxgb4vf_debugfs_root);
3409 }
3410 
3411 module_init(cxgb4vf_module_init);
3412 module_exit(cxgb4vf_module_exit);
3413