1 /*
2  * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3  * driver for Linux.
4  *
5  * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 
38 #include <linux/module.h>
39 #include <linux/moduleparam.h>
40 #include <linux/init.h>
41 #include <linux/pci.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/debugfs.h>
46 #include <linux/ethtool.h>
47 #include <linux/mdio.h>
48 
49 #include "t4vf_common.h"
50 #include "t4vf_defs.h"
51 
52 #include "../cxgb4/t4_regs.h"
53 #include "../cxgb4/t4_msg.h"
54 
55 /*
56  * Generic information about the driver.
57  */
58 #define DRV_VERSION "2.0.0-ko"
59 #define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver"
60 
61 /*
62  * Module Parameters.
63  * ==================
64  */
65 
66 /*
67  * Default ethtool "message level" for adapters.
68  */
69 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
70 			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
71 			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
72 
73 /*
74  * The driver uses the best interrupt scheme available on a platform in the
75  * order MSI-X then MSI.  This parameter determines which of these schemes the
76  * driver may consider as follows:
77  *
78  *     msi = 2: choose from among MSI-X and MSI
79  *     msi = 1: only consider MSI interrupts
80  *
81  * Note that unlike the Physical Function driver, this Virtual Function driver
82  * does _not_ support legacy INTx interrupts (this limitation is mandated by
83  * the PCI-E SR-IOV standard).
84  */
85 #define MSI_MSIX	2
86 #define MSI_MSI		1
87 #define MSI_DEFAULT	MSI_MSIX
88 
89 static int msi = MSI_DEFAULT;
90 
91 module_param(msi, int, 0644);
92 MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI");
93 
94 /*
95  * Fundamental constants.
96  * ======================
97  */
98 
99 enum {
100 	MAX_TXQ_ENTRIES		= 16384,
101 	MAX_RSPQ_ENTRIES	= 16384,
102 	MAX_RX_BUFFERS		= 16384,
103 
104 	MIN_TXQ_ENTRIES		= 32,
105 	MIN_RSPQ_ENTRIES	= 128,
106 	MIN_FL_ENTRIES		= 16,
107 
108 	/*
109 	 * For purposes of manipulating the Free List size we need to
110 	 * recognize that Free Lists are actually Egress Queues (the host
111 	 * produces free buffers which the hardware consumes), Egress Queues
112 	 * indices are all in units of Egress Context Units bytes, and free
113 	 * list entries are 64-bit PCI DMA addresses.  And since the state of
114 	 * the Producer Index == the Consumer Index implies an EMPTY list, we
115 	 * always have at least one Egress Unit's worth of Free List entries
116 	 * unused.  See sge.c for more details ...
117 	 */
118 	EQ_UNIT = SGE_EQ_IDXSIZE,
119 	FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
120 	MIN_FL_RESID = FL_PER_EQ_UNIT,
121 };
122 
123 /*
124  * Global driver state.
125  * ====================
126  */
127 
128 static struct dentry *cxgb4vf_debugfs_root;
129 
130 /*
131  * OS "Callback" functions.
132  * ========================
133  */
134 
135 /*
136  * The link status has changed on the indicated "port" (Virtual Interface).
137  */
138 void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
139 {
140 	struct net_device *dev = adapter->port[pidx];
141 
142 	/*
143 	 * If the port is disabled or the current recorded "link up"
144 	 * status matches the new status, just return.
145 	 */
146 	if (!netif_running(dev) || link_ok == netif_carrier_ok(dev))
147 		return;
148 
149 	/*
150 	 * Tell the OS that the link status has changed and print a short
151 	 * informative message on the console about the event.
152 	 */
153 	if (link_ok) {
154 		const char *s;
155 		const char *fc;
156 		const struct port_info *pi = netdev_priv(dev);
157 
158 		netif_carrier_on(dev);
159 
160 		switch (pi->link_cfg.speed) {
161 		case 100:
162 			s = "100Mbps";
163 			break;
164 		case 1000:
165 			s = "1Gbps";
166 			break;
167 		case 10000:
168 			s = "10Gbps";
169 			break;
170 		case 25000:
171 			s = "25Gbps";
172 			break;
173 		case 40000:
174 			s = "40Gbps";
175 			break;
176 		case 100000:
177 			s = "100Gbps";
178 			break;
179 
180 		default:
181 			s = "unknown";
182 			break;
183 		}
184 
185 		switch ((int)pi->link_cfg.fc) {
186 		case PAUSE_RX:
187 			fc = "RX";
188 			break;
189 
190 		case PAUSE_TX:
191 			fc = "TX";
192 			break;
193 
194 		case PAUSE_RX | PAUSE_TX:
195 			fc = "RX/TX";
196 			break;
197 
198 		default:
199 			fc = "no";
200 			break;
201 		}
202 
203 		netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc);
204 	} else {
205 		netif_carrier_off(dev);
206 		netdev_info(dev, "link down\n");
207 	}
208 }
209 
210 /*
211  * THe port module type has changed on the indicated "port" (Virtual
212  * Interface).
213  */
214 void t4vf_os_portmod_changed(struct adapter *adapter, int pidx)
215 {
216 	static const char * const mod_str[] = {
217 		NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
218 	};
219 	const struct net_device *dev = adapter->port[pidx];
220 	const struct port_info *pi = netdev_priv(dev);
221 
222 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
223 		dev_info(adapter->pdev_dev, "%s: port module unplugged\n",
224 			 dev->name);
225 	else if (pi->mod_type < ARRAY_SIZE(mod_str))
226 		dev_info(adapter->pdev_dev, "%s: %s port module inserted\n",
227 			 dev->name, mod_str[pi->mod_type]);
228 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
229 		dev_info(adapter->pdev_dev, "%s: unsupported optical port "
230 			 "module inserted\n", dev->name);
231 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
232 		dev_info(adapter->pdev_dev, "%s: unknown port module inserted,"
233 			 "forcing TWINAX\n", dev->name);
234 	else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
235 		dev_info(adapter->pdev_dev, "%s: transceiver module error\n",
236 			 dev->name);
237 	else
238 		dev_info(adapter->pdev_dev, "%s: unknown module type %d "
239 			 "inserted\n", dev->name, pi->mod_type);
240 }
241 
242 /*
243  * Net device operations.
244  * ======================
245  */
246 
247 
248 
249 
250 /*
251  * Perform the MAC and PHY actions needed to enable a "port" (Virtual
252  * Interface).
253  */
254 static int link_start(struct net_device *dev)
255 {
256 	int ret;
257 	struct port_info *pi = netdev_priv(dev);
258 
259 	/*
260 	 * We do not set address filters and promiscuity here, the stack does
261 	 * that step explicitly. Enable vlan accel.
262 	 */
263 	ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1,
264 			      true);
265 	if (ret == 0) {
266 		ret = t4vf_change_mac(pi->adapter, pi->viid,
267 				      pi->xact_addr_filt, dev->dev_addr, true);
268 		if (ret >= 0) {
269 			pi->xact_addr_filt = ret;
270 			ret = 0;
271 		}
272 	}
273 
274 	/*
275 	 * We don't need to actually "start the link" itself since the
276 	 * firmware will do that for us when the first Virtual Interface
277 	 * is enabled on a port.
278 	 */
279 	if (ret == 0)
280 		ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true);
281 	return ret;
282 }
283 
284 /*
285  * Name the MSI-X interrupts.
286  */
287 static void name_msix_vecs(struct adapter *adapter)
288 {
289 	int namelen = sizeof(adapter->msix_info[0].desc) - 1;
290 	int pidx;
291 
292 	/*
293 	 * Firmware events.
294 	 */
295 	snprintf(adapter->msix_info[MSIX_FW].desc, namelen,
296 		 "%s-FWeventq", adapter->name);
297 	adapter->msix_info[MSIX_FW].desc[namelen] = 0;
298 
299 	/*
300 	 * Ethernet queues.
301 	 */
302 	for_each_port(adapter, pidx) {
303 		struct net_device *dev = adapter->port[pidx];
304 		const struct port_info *pi = netdev_priv(dev);
305 		int qs, msi;
306 
307 		for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
308 			snprintf(adapter->msix_info[msi].desc, namelen,
309 				 "%s-%d", dev->name, qs);
310 			adapter->msix_info[msi].desc[namelen] = 0;
311 		}
312 	}
313 }
314 
315 /*
316  * Request all of our MSI-X resources.
317  */
318 static int request_msix_queue_irqs(struct adapter *adapter)
319 {
320 	struct sge *s = &adapter->sge;
321 	int rxq, msi, err;
322 
323 	/*
324 	 * Firmware events.
325 	 */
326 	err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix,
327 			  0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq);
328 	if (err)
329 		return err;
330 
331 	/*
332 	 * Ethernet queues.
333 	 */
334 	msi = MSIX_IQFLINT;
335 	for_each_ethrxq(s, rxq) {
336 		err = request_irq(adapter->msix_info[msi].vec,
337 				  t4vf_sge_intr_msix, 0,
338 				  adapter->msix_info[msi].desc,
339 				  &s->ethrxq[rxq].rspq);
340 		if (err)
341 			goto err_free_irqs;
342 		msi++;
343 	}
344 	return 0;
345 
346 err_free_irqs:
347 	while (--rxq >= 0)
348 		free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
349 	free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
350 	return err;
351 }
352 
353 /*
354  * Free our MSI-X resources.
355  */
356 static void free_msix_queue_irqs(struct adapter *adapter)
357 {
358 	struct sge *s = &adapter->sge;
359 	int rxq, msi;
360 
361 	free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
362 	msi = MSIX_IQFLINT;
363 	for_each_ethrxq(s, rxq)
364 		free_irq(adapter->msix_info[msi++].vec,
365 			 &s->ethrxq[rxq].rspq);
366 }
367 
368 /*
369  * Turn on NAPI and start up interrupts on a response queue.
370  */
371 static void qenable(struct sge_rspq *rspq)
372 {
373 	napi_enable(&rspq->napi);
374 
375 	/*
376 	 * 0-increment the Going To Sleep register to start the timer and
377 	 * enable interrupts.
378 	 */
379 	t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
380 		     CIDXINC_V(0) |
381 		     SEINTARM_V(rspq->intr_params) |
382 		     INGRESSQID_V(rspq->cntxt_id));
383 }
384 
385 /*
386  * Enable NAPI scheduling and interrupt generation for all Receive Queues.
387  */
388 static void enable_rx(struct adapter *adapter)
389 {
390 	int rxq;
391 	struct sge *s = &adapter->sge;
392 
393 	for_each_ethrxq(s, rxq)
394 		qenable(&s->ethrxq[rxq].rspq);
395 	qenable(&s->fw_evtq);
396 
397 	/*
398 	 * The interrupt queue doesn't use NAPI so we do the 0-increment of
399 	 * its Going To Sleep register here to get it started.
400 	 */
401 	if (adapter->flags & USING_MSI)
402 		t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
403 			     CIDXINC_V(0) |
404 			     SEINTARM_V(s->intrq.intr_params) |
405 			     INGRESSQID_V(s->intrq.cntxt_id));
406 
407 }
408 
409 /*
410  * Wait until all NAPI handlers are descheduled.
411  */
412 static void quiesce_rx(struct adapter *adapter)
413 {
414 	struct sge *s = &adapter->sge;
415 	int rxq;
416 
417 	for_each_ethrxq(s, rxq)
418 		napi_disable(&s->ethrxq[rxq].rspq.napi);
419 	napi_disable(&s->fw_evtq.napi);
420 }
421 
422 /*
423  * Response queue handler for the firmware event queue.
424  */
425 static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
426 			  const struct pkt_gl *gl)
427 {
428 	/*
429 	 * Extract response opcode and get pointer to CPL message body.
430 	 */
431 	struct adapter *adapter = rspq->adapter;
432 	u8 opcode = ((const struct rss_header *)rsp)->opcode;
433 	void *cpl = (void *)(rsp + 1);
434 
435 	switch (opcode) {
436 	case CPL_FW6_MSG: {
437 		/*
438 		 * We've received an asynchronous message from the firmware.
439 		 */
440 		const struct cpl_fw6_msg *fw_msg = cpl;
441 		if (fw_msg->type == FW6_TYPE_CMD_RPL)
442 			t4vf_handle_fw_rpl(adapter, fw_msg->data);
443 		break;
444 	}
445 
446 	case CPL_FW4_MSG: {
447 		/* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
448 		 */
449 		const struct cpl_sge_egr_update *p = (void *)(rsp + 3);
450 		opcode = CPL_OPCODE_G(ntohl(p->opcode_qid));
451 		if (opcode != CPL_SGE_EGR_UPDATE) {
452 			dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
453 				, opcode);
454 			break;
455 		}
456 		cpl = (void *)p;
457 		/*FALLTHROUGH*/
458 	}
459 
460 	case CPL_SGE_EGR_UPDATE: {
461 		/*
462 		 * We've received an Egress Queue Status Update message.  We
463 		 * get these, if the SGE is configured to send these when the
464 		 * firmware passes certain points in processing our TX
465 		 * Ethernet Queue or if we make an explicit request for one.
466 		 * We use these updates to determine when we may need to
467 		 * restart a TX Ethernet Queue which was stopped for lack of
468 		 * free TX Queue Descriptors ...
469 		 */
470 		const struct cpl_sge_egr_update *p = cpl;
471 		unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid));
472 		struct sge *s = &adapter->sge;
473 		struct sge_txq *tq;
474 		struct sge_eth_txq *txq;
475 		unsigned int eq_idx;
476 
477 		/*
478 		 * Perform sanity checking on the Queue ID to make sure it
479 		 * really refers to one of our TX Ethernet Egress Queues which
480 		 * is active and matches the queue's ID.  None of these error
481 		 * conditions should ever happen so we may want to either make
482 		 * them fatal and/or conditionalized under DEBUG.
483 		 */
484 		eq_idx = EQ_IDX(s, qid);
485 		if (unlikely(eq_idx >= MAX_EGRQ)) {
486 			dev_err(adapter->pdev_dev,
487 				"Egress Update QID %d out of range\n", qid);
488 			break;
489 		}
490 		tq = s->egr_map[eq_idx];
491 		if (unlikely(tq == NULL)) {
492 			dev_err(adapter->pdev_dev,
493 				"Egress Update QID %d TXQ=NULL\n", qid);
494 			break;
495 		}
496 		txq = container_of(tq, struct sge_eth_txq, q);
497 		if (unlikely(tq->abs_id != qid)) {
498 			dev_err(adapter->pdev_dev,
499 				"Egress Update QID %d refers to TXQ %d\n",
500 				qid, tq->abs_id);
501 			break;
502 		}
503 
504 		/*
505 		 * Restart a stopped TX Queue which has less than half of its
506 		 * TX ring in use ...
507 		 */
508 		txq->q.restarts++;
509 		netif_tx_wake_queue(txq->txq);
510 		break;
511 	}
512 
513 	default:
514 		dev_err(adapter->pdev_dev,
515 			"unexpected CPL %#x on FW event queue\n", opcode);
516 	}
517 
518 	return 0;
519 }
520 
521 /*
522  * Allocate SGE TX/RX response queues.  Determine how many sets of SGE queues
523  * to use and initializes them.  We support multiple "Queue Sets" per port if
524  * we have MSI-X, otherwise just one queue set per port.
525  */
526 static int setup_sge_queues(struct adapter *adapter)
527 {
528 	struct sge *s = &adapter->sge;
529 	int err, pidx, msix;
530 
531 	/*
532 	 * Clear "Queue Set" Free List Starving and TX Queue Mapping Error
533 	 * state.
534 	 */
535 	bitmap_zero(s->starving_fl, MAX_EGRQ);
536 
537 	/*
538 	 * If we're using MSI interrupt mode we need to set up a "forwarded
539 	 * interrupt" queue which we'll set up with our MSI vector.  The rest
540 	 * of the ingress queues will be set up to forward their interrupts to
541 	 * this queue ...  This must be first since t4vf_sge_alloc_rxq() uses
542 	 * the intrq's queue ID as the interrupt forwarding queue for the
543 	 * subsequent calls ...
544 	 */
545 	if (adapter->flags & USING_MSI) {
546 		err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false,
547 					 adapter->port[0], 0, NULL, NULL);
548 		if (err)
549 			goto err_free_queues;
550 	}
551 
552 	/*
553 	 * Allocate our ingress queue for asynchronous firmware messages.
554 	 */
555 	err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0],
556 				 MSIX_FW, NULL, fwevtq_handler);
557 	if (err)
558 		goto err_free_queues;
559 
560 	/*
561 	 * Allocate each "port"'s initial Queue Sets.  These can be changed
562 	 * later on ... up to the point where any interface on the adapter is
563 	 * brought up at which point lots of things get nailed down
564 	 * permanently ...
565 	 */
566 	msix = MSIX_IQFLINT;
567 	for_each_port(adapter, pidx) {
568 		struct net_device *dev = adapter->port[pidx];
569 		struct port_info *pi = netdev_priv(dev);
570 		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
571 		struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
572 		int qs;
573 
574 		for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
575 			err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false,
576 						 dev, msix++,
577 						 &rxq->fl, t4vf_ethrx_handler);
578 			if (err)
579 				goto err_free_queues;
580 
581 			err = t4vf_sge_alloc_eth_txq(adapter, txq, dev,
582 					     netdev_get_tx_queue(dev, qs),
583 					     s->fw_evtq.cntxt_id);
584 			if (err)
585 				goto err_free_queues;
586 
587 			rxq->rspq.idx = qs;
588 			memset(&rxq->stats, 0, sizeof(rxq->stats));
589 		}
590 	}
591 
592 	/*
593 	 * Create the reverse mappings for the queues.
594 	 */
595 	s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id;
596 	s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id;
597 	IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq;
598 	for_each_port(adapter, pidx) {
599 		struct net_device *dev = adapter->port[pidx];
600 		struct port_info *pi = netdev_priv(dev);
601 		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
602 		struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
603 		int qs;
604 
605 		for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
606 			IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq;
607 			EQ_MAP(s, txq->q.abs_id) = &txq->q;
608 
609 			/*
610 			 * The FW_IQ_CMD doesn't return the Absolute Queue IDs
611 			 * for Free Lists but since all of the Egress Queues
612 			 * (including Free Lists) have Relative Queue IDs
613 			 * which are computed as Absolute - Base Queue ID, we
614 			 * can synthesize the Absolute Queue IDs for the Free
615 			 * Lists.  This is useful for debugging purposes when
616 			 * we want to dump Queue Contexts via the PF Driver.
617 			 */
618 			rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base;
619 			EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl;
620 		}
621 	}
622 	return 0;
623 
624 err_free_queues:
625 	t4vf_free_sge_resources(adapter);
626 	return err;
627 }
628 
629 /*
630  * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive
631  * queues.  We configure the RSS CPU lookup table to distribute to the number
632  * of HW receive queues, and the response queue lookup table to narrow that
633  * down to the response queues actually configured for each "port" (Virtual
634  * Interface).  We always configure the RSS mapping for all ports since the
635  * mapping table has plenty of entries.
636  */
637 static int setup_rss(struct adapter *adapter)
638 {
639 	int pidx;
640 
641 	for_each_port(adapter, pidx) {
642 		struct port_info *pi = adap2pinfo(adapter, pidx);
643 		struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
644 		u16 rss[MAX_PORT_QSETS];
645 		int qs, err;
646 
647 		for (qs = 0; qs < pi->nqsets; qs++)
648 			rss[qs] = rxq[qs].rspq.abs_id;
649 
650 		err = t4vf_config_rss_range(adapter, pi->viid,
651 					    0, pi->rss_size, rss, pi->nqsets);
652 		if (err)
653 			return err;
654 
655 		/*
656 		 * Perform Global RSS Mode-specific initialization.
657 		 */
658 		switch (adapter->params.rss.mode) {
659 		case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL:
660 			/*
661 			 * If Tunnel All Lookup isn't specified in the global
662 			 * RSS Configuration, then we need to specify a
663 			 * default Ingress Queue for any ingress packets which
664 			 * aren't hashed.  We'll use our first ingress queue
665 			 * ...
666 			 */
667 			if (!adapter->params.rss.u.basicvirtual.tnlalllookup) {
668 				union rss_vi_config config;
669 				err = t4vf_read_rss_vi_config(adapter,
670 							      pi->viid,
671 							      &config);
672 				if (err)
673 					return err;
674 				config.basicvirtual.defaultq =
675 					rxq[0].rspq.abs_id;
676 				err = t4vf_write_rss_vi_config(adapter,
677 							       pi->viid,
678 							       &config);
679 				if (err)
680 					return err;
681 			}
682 			break;
683 		}
684 	}
685 
686 	return 0;
687 }
688 
689 /*
690  * Bring the adapter up.  Called whenever we go from no "ports" open to having
691  * one open.  This function performs the actions necessary to make an adapter
692  * operational, such as completing the initialization of HW modules, and
693  * enabling interrupts.  Must be called with the rtnl lock held.  (Note that
694  * this is called "cxgb_up" in the PF Driver.)
695  */
696 static int adapter_up(struct adapter *adapter)
697 {
698 	int err;
699 
700 	/*
701 	 * If this is the first time we've been called, perform basic
702 	 * adapter setup.  Once we've done this, many of our adapter
703 	 * parameters can no longer be changed ...
704 	 */
705 	if ((adapter->flags & FULL_INIT_DONE) == 0) {
706 		err = setup_sge_queues(adapter);
707 		if (err)
708 			return err;
709 		err = setup_rss(adapter);
710 		if (err) {
711 			t4vf_free_sge_resources(adapter);
712 			return err;
713 		}
714 
715 		if (adapter->flags & USING_MSIX)
716 			name_msix_vecs(adapter);
717 		adapter->flags |= FULL_INIT_DONE;
718 	}
719 
720 	/*
721 	 * Acquire our interrupt resources.  We only support MSI-X and MSI.
722 	 */
723 	BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
724 	if (adapter->flags & USING_MSIX)
725 		err = request_msix_queue_irqs(adapter);
726 	else
727 		err = request_irq(adapter->pdev->irq,
728 				  t4vf_intr_handler(adapter), 0,
729 				  adapter->name, adapter);
730 	if (err) {
731 		dev_err(adapter->pdev_dev, "request_irq failed, err %d\n",
732 			err);
733 		return err;
734 	}
735 
736 	/*
737 	 * Enable NAPI ingress processing and return success.
738 	 */
739 	enable_rx(adapter);
740 	t4vf_sge_start(adapter);
741 
742 	/* Initialize hash mac addr list*/
743 	INIT_LIST_HEAD(&adapter->mac_hlist);
744 	return 0;
745 }
746 
747 /*
748  * Bring the adapter down.  Called whenever the last "port" (Virtual
749  * Interface) closed.  (Note that this routine is called "cxgb_down" in the PF
750  * Driver.)
751  */
752 static void adapter_down(struct adapter *adapter)
753 {
754 	/*
755 	 * Free interrupt resources.
756 	 */
757 	if (adapter->flags & USING_MSIX)
758 		free_msix_queue_irqs(adapter);
759 	else
760 		free_irq(adapter->pdev->irq, adapter);
761 
762 	/*
763 	 * Wait for NAPI handlers to finish.
764 	 */
765 	quiesce_rx(adapter);
766 }
767 
768 /*
769  * Start up a net device.
770  */
771 static int cxgb4vf_open(struct net_device *dev)
772 {
773 	int err;
774 	struct port_info *pi = netdev_priv(dev);
775 	struct adapter *adapter = pi->adapter;
776 
777 	/*
778 	 * If this is the first interface that we're opening on the "adapter",
779 	 * bring the "adapter" up now.
780 	 */
781 	if (adapter->open_device_map == 0) {
782 		err = adapter_up(adapter);
783 		if (err)
784 			return err;
785 	}
786 
787 	/*
788 	 * Note that this interface is up and start everything up ...
789 	 */
790 	err = link_start(dev);
791 	if (err)
792 		goto err_unwind;
793 
794 	netif_tx_start_all_queues(dev);
795 	set_bit(pi->port_id, &adapter->open_device_map);
796 	return 0;
797 
798 err_unwind:
799 	if (adapter->open_device_map == 0)
800 		adapter_down(adapter);
801 	return err;
802 }
803 
804 /*
805  * Shut down a net device.  This routine is called "cxgb_close" in the PF
806  * Driver ...
807  */
808 static int cxgb4vf_stop(struct net_device *dev)
809 {
810 	struct port_info *pi = netdev_priv(dev);
811 	struct adapter *adapter = pi->adapter;
812 
813 	netif_tx_stop_all_queues(dev);
814 	netif_carrier_off(dev);
815 	t4vf_enable_vi(adapter, pi->viid, false, false);
816 	pi->link_cfg.link_ok = 0;
817 
818 	clear_bit(pi->port_id, &adapter->open_device_map);
819 	if (adapter->open_device_map == 0)
820 		adapter_down(adapter);
821 	return 0;
822 }
823 
824 /*
825  * Translate our basic statistics into the standard "ifconfig" statistics.
826  */
827 static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
828 {
829 	struct t4vf_port_stats stats;
830 	struct port_info *pi = netdev2pinfo(dev);
831 	struct adapter *adapter = pi->adapter;
832 	struct net_device_stats *ns = &dev->stats;
833 	int err;
834 
835 	spin_lock(&adapter->stats_lock);
836 	err = t4vf_get_port_stats(adapter, pi->pidx, &stats);
837 	spin_unlock(&adapter->stats_lock);
838 
839 	memset(ns, 0, sizeof(*ns));
840 	if (err)
841 		return ns;
842 
843 	ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes +
844 			stats.tx_ucast_bytes + stats.tx_offload_bytes);
845 	ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames +
846 			  stats.tx_ucast_frames + stats.tx_offload_frames);
847 	ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes +
848 			stats.rx_ucast_bytes);
849 	ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames +
850 			  stats.rx_ucast_frames);
851 	ns->multicast = stats.rx_mcast_frames;
852 	ns->tx_errors = stats.tx_drop_frames;
853 	ns->rx_errors = stats.rx_err_frames;
854 
855 	return ns;
856 }
857 
858 static inline int cxgb4vf_set_addr_hash(struct port_info *pi)
859 {
860 	struct adapter *adapter = pi->adapter;
861 	u64 vec = 0;
862 	bool ucast = false;
863 	struct hash_mac_addr *entry;
864 
865 	/* Calculate the hash vector for the updated list and program it */
866 	list_for_each_entry(entry, &adapter->mac_hlist, list) {
867 		ucast |= is_unicast_ether_addr(entry->addr);
868 		vec |= (1ULL << hash_mac_addr(entry->addr));
869 	}
870 	return t4vf_set_addr_hash(adapter, pi->viid, ucast, vec, false);
871 }
872 
873 static int cxgb4vf_mac_sync(struct net_device *netdev, const u8 *mac_addr)
874 {
875 	struct port_info *pi = netdev_priv(netdev);
876 	struct adapter *adapter = pi->adapter;
877 	int ret;
878 	u64 mhash = 0;
879 	u64 uhash = 0;
880 	bool free = false;
881 	bool ucast = is_unicast_ether_addr(mac_addr);
882 	const u8 *maclist[1] = {mac_addr};
883 	struct hash_mac_addr *new_entry;
884 
885 	ret = t4vf_alloc_mac_filt(adapter, pi->viid, free, 1, maclist,
886 				  NULL, ucast ? &uhash : &mhash, false);
887 	if (ret < 0)
888 		goto out;
889 	/* if hash != 0, then add the addr to hash addr list
890 	 * so on the end we will calculate the hash for the
891 	 * list and program it
892 	 */
893 	if (uhash || mhash) {
894 		new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
895 		if (!new_entry)
896 			return -ENOMEM;
897 		ether_addr_copy(new_entry->addr, mac_addr);
898 		list_add_tail(&new_entry->list, &adapter->mac_hlist);
899 		ret = cxgb4vf_set_addr_hash(pi);
900 	}
901 out:
902 	return ret < 0 ? ret : 0;
903 }
904 
905 static int cxgb4vf_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
906 {
907 	struct port_info *pi = netdev_priv(netdev);
908 	struct adapter *adapter = pi->adapter;
909 	int ret;
910 	const u8 *maclist[1] = {mac_addr};
911 	struct hash_mac_addr *entry, *tmp;
912 
913 	/* If the MAC address to be removed is in the hash addr
914 	 * list, delete it from the list and update hash vector
915 	 */
916 	list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) {
917 		if (ether_addr_equal(entry->addr, mac_addr)) {
918 			list_del(&entry->list);
919 			kfree(entry);
920 			return cxgb4vf_set_addr_hash(pi);
921 		}
922 	}
923 
924 	ret = t4vf_free_mac_filt(adapter, pi->viid, 1, maclist, false);
925 	return ret < 0 ? -EINVAL : 0;
926 }
927 
928 /*
929  * Set RX properties of a port, such as promiscruity, address filters, and MTU.
930  * If @mtu is -1 it is left unchanged.
931  */
932 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
933 {
934 	struct port_info *pi = netdev_priv(dev);
935 
936 	__dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
937 	__dev_mc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
938 	return t4vf_set_rxmode(pi->adapter, pi->viid, -1,
939 			       (dev->flags & IFF_PROMISC) != 0,
940 			       (dev->flags & IFF_ALLMULTI) != 0,
941 			       1, -1, sleep_ok);
942 }
943 
944 /*
945  * Set the current receive modes on the device.
946  */
947 static void cxgb4vf_set_rxmode(struct net_device *dev)
948 {
949 	/* unfortunately we can't return errors to the stack */
950 	set_rxmode(dev, -1, false);
951 }
952 
953 /*
954  * Find the entry in the interrupt holdoff timer value array which comes
955  * closest to the specified interrupt holdoff value.
956  */
957 static int closest_timer(const struct sge *s, int us)
958 {
959 	int i, timer_idx = 0, min_delta = INT_MAX;
960 
961 	for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
962 		int delta = us - s->timer_val[i];
963 		if (delta < 0)
964 			delta = -delta;
965 		if (delta < min_delta) {
966 			min_delta = delta;
967 			timer_idx = i;
968 		}
969 	}
970 	return timer_idx;
971 }
972 
973 static int closest_thres(const struct sge *s, int thres)
974 {
975 	int i, delta, pktcnt_idx = 0, min_delta = INT_MAX;
976 
977 	for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
978 		delta = thres - s->counter_val[i];
979 		if (delta < 0)
980 			delta = -delta;
981 		if (delta < min_delta) {
982 			min_delta = delta;
983 			pktcnt_idx = i;
984 		}
985 	}
986 	return pktcnt_idx;
987 }
988 
989 /*
990  * Return a queue's interrupt hold-off time in us.  0 means no timer.
991  */
992 static unsigned int qtimer_val(const struct adapter *adapter,
993 			       const struct sge_rspq *rspq)
994 {
995 	unsigned int timer_idx = QINTR_TIMER_IDX_G(rspq->intr_params);
996 
997 	return timer_idx < SGE_NTIMERS
998 		? adapter->sge.timer_val[timer_idx]
999 		: 0;
1000 }
1001 
1002 /**
1003  *	set_rxq_intr_params - set a queue's interrupt holdoff parameters
1004  *	@adapter: the adapter
1005  *	@rspq: the RX response queue
1006  *	@us: the hold-off time in us, or 0 to disable timer
1007  *	@cnt: the hold-off packet count, or 0 to disable counter
1008  *
1009  *	Sets an RX response queue's interrupt hold-off time and packet count.
1010  *	At least one of the two needs to be enabled for the queue to generate
1011  *	interrupts.
1012  */
1013 static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
1014 			       unsigned int us, unsigned int cnt)
1015 {
1016 	unsigned int timer_idx;
1017 
1018 	/*
1019 	 * If both the interrupt holdoff timer and count are specified as
1020 	 * zero, default to a holdoff count of 1 ...
1021 	 */
1022 	if ((us | cnt) == 0)
1023 		cnt = 1;
1024 
1025 	/*
1026 	 * If an interrupt holdoff count has been specified, then find the
1027 	 * closest configured holdoff count and use that.  If the response
1028 	 * queue has already been created, then update its queue context
1029 	 * parameters ...
1030 	 */
1031 	if (cnt) {
1032 		int err;
1033 		u32 v, pktcnt_idx;
1034 
1035 		pktcnt_idx = closest_thres(&adapter->sge, cnt);
1036 		if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) {
1037 			v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1038 			    FW_PARAMS_PARAM_X_V(
1039 					FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1040 			    FW_PARAMS_PARAM_YZ_V(rspq->cntxt_id);
1041 			err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx);
1042 			if (err)
1043 				return err;
1044 		}
1045 		rspq->pktcnt_idx = pktcnt_idx;
1046 	}
1047 
1048 	/*
1049 	 * Compute the closest holdoff timer index from the supplied holdoff
1050 	 * timer value.
1051 	 */
1052 	timer_idx = (us == 0
1053 		     ? SGE_TIMER_RSTRT_CNTR
1054 		     : closest_timer(&adapter->sge, us));
1055 
1056 	/*
1057 	 * Update the response queue's interrupt coalescing parameters and
1058 	 * return success.
1059 	 */
1060 	rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
1061 			     QINTR_CNT_EN_V(cnt > 0));
1062 	return 0;
1063 }
1064 
1065 /*
1066  * Return a version number to identify the type of adapter.  The scheme is:
1067  * - bits 0..9: chip version
1068  * - bits 10..15: chip revision
1069  */
1070 static inline unsigned int mk_adap_vers(const struct adapter *adapter)
1071 {
1072 	/*
1073 	 * Chip version 4, revision 0x3f (cxgb4vf).
1074 	 */
1075 	return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10);
1076 }
1077 
1078 /*
1079  * Execute the specified ioctl command.
1080  */
1081 static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1082 {
1083 	int ret = 0;
1084 
1085 	switch (cmd) {
1086 	    /*
1087 	     * The VF Driver doesn't have access to any of the other
1088 	     * common Ethernet device ioctl()'s (like reading/writing
1089 	     * PHY registers, etc.
1090 	     */
1091 
1092 	default:
1093 		ret = -EOPNOTSUPP;
1094 		break;
1095 	}
1096 	return ret;
1097 }
1098 
1099 /*
1100  * Change the device's MTU.
1101  */
1102 static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
1103 {
1104 	int ret;
1105 	struct port_info *pi = netdev_priv(dev);
1106 
1107 	ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
1108 			      -1, -1, -1, -1, true);
1109 	if (!ret)
1110 		dev->mtu = new_mtu;
1111 	return ret;
1112 }
1113 
1114 static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
1115 	netdev_features_t features)
1116 {
1117 	/*
1118 	 * Since there is no support for separate rx/tx vlan accel
1119 	 * enable/disable make sure tx flag is always in same state as rx.
1120 	 */
1121 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
1122 		features |= NETIF_F_HW_VLAN_CTAG_TX;
1123 	else
1124 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
1125 
1126 	return features;
1127 }
1128 
1129 static int cxgb4vf_set_features(struct net_device *dev,
1130 	netdev_features_t features)
1131 {
1132 	struct port_info *pi = netdev_priv(dev);
1133 	netdev_features_t changed = dev->features ^ features;
1134 
1135 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1136 		t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
1137 				features & NETIF_F_HW_VLAN_CTAG_TX, 0);
1138 
1139 	return 0;
1140 }
1141 
1142 /*
1143  * Change the devices MAC address.
1144  */
1145 static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
1146 {
1147 	int ret;
1148 	struct sockaddr *addr = _addr;
1149 	struct port_info *pi = netdev_priv(dev);
1150 
1151 	if (!is_valid_ether_addr(addr->sa_data))
1152 		return -EADDRNOTAVAIL;
1153 
1154 	ret = t4vf_change_mac(pi->adapter, pi->viid, pi->xact_addr_filt,
1155 			      addr->sa_data, true);
1156 	if (ret < 0)
1157 		return ret;
1158 
1159 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1160 	pi->xact_addr_filt = ret;
1161 	return 0;
1162 }
1163 
1164 #ifdef CONFIG_NET_POLL_CONTROLLER
1165 /*
1166  * Poll all of our receive queues.  This is called outside of normal interrupt
1167  * context.
1168  */
1169 static void cxgb4vf_poll_controller(struct net_device *dev)
1170 {
1171 	struct port_info *pi = netdev_priv(dev);
1172 	struct adapter *adapter = pi->adapter;
1173 
1174 	if (adapter->flags & USING_MSIX) {
1175 		struct sge_eth_rxq *rxq;
1176 		int nqsets;
1177 
1178 		rxq = &adapter->sge.ethrxq[pi->first_qset];
1179 		for (nqsets = pi->nqsets; nqsets; nqsets--) {
1180 			t4vf_sge_intr_msix(0, &rxq->rspq);
1181 			rxq++;
1182 		}
1183 	} else
1184 		t4vf_intr_handler(adapter)(0, adapter);
1185 }
1186 #endif
1187 
1188 /*
1189  * Ethtool operations.
1190  * ===================
1191  *
1192  * Note that we don't support any ethtool operations which change the physical
1193  * state of the port to which we're linked.
1194  */
1195 
1196 /**
1197  *	from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool
1198  *	@port_type: Firmware Port Type
1199  *	@mod_type: Firmware Module Type
1200  *
1201  *	Translate Firmware Port/Module type to Ethtool Port Type.
1202  */
1203 static int from_fw_port_mod_type(enum fw_port_type port_type,
1204 				 enum fw_port_module_type mod_type)
1205 {
1206 	if (port_type == FW_PORT_TYPE_BT_SGMII ||
1207 	    port_type == FW_PORT_TYPE_BT_XFI ||
1208 	    port_type == FW_PORT_TYPE_BT_XAUI) {
1209 		return PORT_TP;
1210 	} else if (port_type == FW_PORT_TYPE_FIBER_XFI ||
1211 		   port_type == FW_PORT_TYPE_FIBER_XAUI) {
1212 		return PORT_FIBRE;
1213 	} else if (port_type == FW_PORT_TYPE_SFP ||
1214 		   port_type == FW_PORT_TYPE_QSFP_10G ||
1215 		   port_type == FW_PORT_TYPE_QSA ||
1216 		   port_type == FW_PORT_TYPE_QSFP ||
1217 		   port_type == FW_PORT_TYPE_CR4_QSFP ||
1218 		   port_type == FW_PORT_TYPE_CR_QSFP ||
1219 		   port_type == FW_PORT_TYPE_CR2_QSFP ||
1220 		   port_type == FW_PORT_TYPE_SFP28) {
1221 		if (mod_type == FW_PORT_MOD_TYPE_LR ||
1222 		    mod_type == FW_PORT_MOD_TYPE_SR ||
1223 		    mod_type == FW_PORT_MOD_TYPE_ER ||
1224 		    mod_type == FW_PORT_MOD_TYPE_LRM)
1225 			return PORT_FIBRE;
1226 		else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1227 			 mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1228 			return PORT_DA;
1229 		else
1230 			return PORT_OTHER;
1231 	} else if (port_type == FW_PORT_TYPE_KR4_100G ||
1232 		   port_type == FW_PORT_TYPE_KR_SFP28) {
1233 		return PORT_NONE;
1234 	}
1235 
1236 	return PORT_OTHER;
1237 }
1238 
1239 /**
1240  *	fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask
1241  *	@port_type: Firmware Port Type
1242  *	@fw_caps: Firmware Port Capabilities
1243  *	@link_mode_mask: ethtool Link Mode Mask
1244  *
1245  *	Translate a Firmware Port Capabilities specification to an ethtool
1246  *	Link Mode Mask.
1247  */
1248 static void fw_caps_to_lmm(enum fw_port_type port_type,
1249 			   unsigned int fw_caps,
1250 			   unsigned long *link_mode_mask)
1251 {
1252 	#define SET_LMM(__lmm_name) \
1253 		__set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
1254 			  link_mode_mask)
1255 
1256 	#define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
1257 		do { \
1258 			if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
1259 				SET_LMM(__lmm_name); \
1260 		} while (0)
1261 
1262 	switch (port_type) {
1263 	case FW_PORT_TYPE_BT_SGMII:
1264 	case FW_PORT_TYPE_BT_XFI:
1265 	case FW_PORT_TYPE_BT_XAUI:
1266 		SET_LMM(TP);
1267 		FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full);
1268 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1269 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
1270 		break;
1271 
1272 	case FW_PORT_TYPE_KX4:
1273 	case FW_PORT_TYPE_KX:
1274 		SET_LMM(Backplane);
1275 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
1276 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
1277 		break;
1278 
1279 	case FW_PORT_TYPE_KR:
1280 		SET_LMM(Backplane);
1281 		SET_LMM(10000baseKR_Full);
1282 		break;
1283 
1284 	case FW_PORT_TYPE_BP_AP:
1285 		SET_LMM(Backplane);
1286 		SET_LMM(10000baseR_FEC);
1287 		SET_LMM(10000baseKR_Full);
1288 		SET_LMM(1000baseKX_Full);
1289 		break;
1290 
1291 	case FW_PORT_TYPE_BP4_AP:
1292 		SET_LMM(Backplane);
1293 		SET_LMM(10000baseR_FEC);
1294 		SET_LMM(10000baseKR_Full);
1295 		SET_LMM(1000baseKX_Full);
1296 		SET_LMM(10000baseKX4_Full);
1297 		break;
1298 
1299 	case FW_PORT_TYPE_FIBER_XFI:
1300 	case FW_PORT_TYPE_FIBER_XAUI:
1301 	case FW_PORT_TYPE_SFP:
1302 	case FW_PORT_TYPE_QSFP_10G:
1303 	case FW_PORT_TYPE_QSA:
1304 		SET_LMM(FIBRE);
1305 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1306 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
1307 		break;
1308 
1309 	case FW_PORT_TYPE_BP40_BA:
1310 	case FW_PORT_TYPE_QSFP:
1311 		SET_LMM(FIBRE);
1312 		SET_LMM(40000baseSR4_Full);
1313 		break;
1314 
1315 	case FW_PORT_TYPE_CR_QSFP:
1316 	case FW_PORT_TYPE_SFP28:
1317 		SET_LMM(FIBRE);
1318 		SET_LMM(25000baseCR_Full);
1319 		break;
1320 
1321 	case FW_PORT_TYPE_KR_SFP28:
1322 		SET_LMM(Backplane);
1323 		SET_LMM(25000baseKR_Full);
1324 		break;
1325 
1326 	case FW_PORT_TYPE_CR2_QSFP:
1327 		SET_LMM(FIBRE);
1328 		SET_LMM(50000baseSR2_Full);
1329 		break;
1330 
1331 	case FW_PORT_TYPE_KR4_100G:
1332 	case FW_PORT_TYPE_CR4_QSFP:
1333 		SET_LMM(FIBRE);
1334 		SET_LMM(100000baseCR4_Full);
1335 		break;
1336 
1337 	default:
1338 		break;
1339 	}
1340 
1341 	FW_CAPS_TO_LMM(ANEG, Autoneg);
1342 	FW_CAPS_TO_LMM(802_3_PAUSE, Pause);
1343 	FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause);
1344 
1345 	#undef FW_CAPS_TO_LMM
1346 	#undef SET_LMM
1347 }
1348 
1349 static int cxgb4vf_get_link_ksettings(struct net_device *dev,
1350 				  struct ethtool_link_ksettings *link_ksettings)
1351 {
1352 	struct port_info *pi = netdev_priv(dev);
1353 	struct ethtool_link_settings *base = &link_ksettings->base;
1354 
1355 	/* For the nonce, the Firmware doesn't send up Port State changes
1356 	 * when the Virtual Interface attached to the Port is down.  So
1357 	 * if it's down, let's grab any changes.
1358 	 */
1359 	if (!netif_running(dev))
1360 		(void)t4vf_update_port_info(pi);
1361 
1362 	ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
1363 	ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
1364 	ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
1365 
1366 	base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type);
1367 
1368 	if (pi->mdio_addr >= 0) {
1369 		base->phy_address = pi->mdio_addr;
1370 		base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII
1371 				      ? ETH_MDIO_SUPPORTS_C22
1372 				      : ETH_MDIO_SUPPORTS_C45);
1373 	} else {
1374 		base->phy_address = 255;
1375 		base->mdio_support = 0;
1376 	}
1377 
1378 	fw_caps_to_lmm(pi->port_type, pi->link_cfg.pcaps,
1379 		       link_ksettings->link_modes.supported);
1380 	fw_caps_to_lmm(pi->port_type, pi->link_cfg.acaps,
1381 		       link_ksettings->link_modes.advertising);
1382 	fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
1383 		       link_ksettings->link_modes.lp_advertising);
1384 
1385 	if (netif_carrier_ok(dev)) {
1386 		base->speed = pi->link_cfg.speed;
1387 		base->duplex = DUPLEX_FULL;
1388 	} else {
1389 		base->speed = SPEED_UNKNOWN;
1390 		base->duplex = DUPLEX_UNKNOWN;
1391 	}
1392 
1393 	base->autoneg = pi->link_cfg.autoneg;
1394 	if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)
1395 		ethtool_link_ksettings_add_link_mode(link_ksettings,
1396 						     supported, Autoneg);
1397 	if (pi->link_cfg.autoneg)
1398 		ethtool_link_ksettings_add_link_mode(link_ksettings,
1399 						     advertising, Autoneg);
1400 
1401 	return 0;
1402 }
1403 
1404 /*
1405  * Return our driver information.
1406  */
1407 static void cxgb4vf_get_drvinfo(struct net_device *dev,
1408 				struct ethtool_drvinfo *drvinfo)
1409 {
1410 	struct adapter *adapter = netdev2adap(dev);
1411 
1412 	strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
1413 	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
1414 	strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
1415 		sizeof(drvinfo->bus_info));
1416 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1417 		 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1418 		 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.fwrev),
1419 		 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.fwrev),
1420 		 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.fwrev),
1421 		 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.fwrev),
1422 		 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.tprev),
1423 		 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.tprev),
1424 		 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.tprev),
1425 		 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.tprev));
1426 }
1427 
1428 /*
1429  * Return current adapter message level.
1430  */
1431 static u32 cxgb4vf_get_msglevel(struct net_device *dev)
1432 {
1433 	return netdev2adap(dev)->msg_enable;
1434 }
1435 
1436 /*
1437  * Set current adapter message level.
1438  */
1439 static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
1440 {
1441 	netdev2adap(dev)->msg_enable = msglevel;
1442 }
1443 
1444 /*
1445  * Return the device's current Queue Set ring size parameters along with the
1446  * allowed maximum values.  Since ethtool doesn't understand the concept of
1447  * multi-queue devices, we just return the current values associated with the
1448  * first Queue Set.
1449  */
1450 static void cxgb4vf_get_ringparam(struct net_device *dev,
1451 				  struct ethtool_ringparam *rp)
1452 {
1453 	const struct port_info *pi = netdev_priv(dev);
1454 	const struct sge *s = &pi->adapter->sge;
1455 
1456 	rp->rx_max_pending = MAX_RX_BUFFERS;
1457 	rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1458 	rp->rx_jumbo_max_pending = 0;
1459 	rp->tx_max_pending = MAX_TXQ_ENTRIES;
1460 
1461 	rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID;
1462 	rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1463 	rp->rx_jumbo_pending = 0;
1464 	rp->tx_pending = s->ethtxq[pi->first_qset].q.size;
1465 }
1466 
1467 /*
1468  * Set the Queue Set ring size parameters for the device.  Again, since
1469  * ethtool doesn't allow for the concept of multiple queues per device, we'll
1470  * apply these new values across all of the Queue Sets associated with the
1471  * device -- after vetting them of course!
1472  */
1473 static int cxgb4vf_set_ringparam(struct net_device *dev,
1474 				 struct ethtool_ringparam *rp)
1475 {
1476 	const struct port_info *pi = netdev_priv(dev);
1477 	struct adapter *adapter = pi->adapter;
1478 	struct sge *s = &adapter->sge;
1479 	int qs;
1480 
1481 	if (rp->rx_pending > MAX_RX_BUFFERS ||
1482 	    rp->rx_jumbo_pending ||
1483 	    rp->tx_pending > MAX_TXQ_ENTRIES ||
1484 	    rp->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1485 	    rp->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1486 	    rp->rx_pending < MIN_FL_ENTRIES ||
1487 	    rp->tx_pending < MIN_TXQ_ENTRIES)
1488 		return -EINVAL;
1489 
1490 	if (adapter->flags & FULL_INIT_DONE)
1491 		return -EBUSY;
1492 
1493 	for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) {
1494 		s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID;
1495 		s->ethrxq[qs].rspq.size = rp->rx_mini_pending;
1496 		s->ethtxq[qs].q.size = rp->tx_pending;
1497 	}
1498 	return 0;
1499 }
1500 
1501 /*
1502  * Return the interrupt holdoff timer and count for the first Queue Set on the
1503  * device.  Our extension ioctl() (the cxgbtool interface) allows the
1504  * interrupt holdoff timer to be read on all of the device's Queue Sets.
1505  */
1506 static int cxgb4vf_get_coalesce(struct net_device *dev,
1507 				struct ethtool_coalesce *coalesce)
1508 {
1509 	const struct port_info *pi = netdev_priv(dev);
1510 	const struct adapter *adapter = pi->adapter;
1511 	const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq;
1512 
1513 	coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
1514 	coalesce->rx_max_coalesced_frames =
1515 		((rspq->intr_params & QINTR_CNT_EN_F)
1516 		 ? adapter->sge.counter_val[rspq->pktcnt_idx]
1517 		 : 0);
1518 	return 0;
1519 }
1520 
1521 /*
1522  * Set the RX interrupt holdoff timer and count for the first Queue Set on the
1523  * interface.  Our extension ioctl() (the cxgbtool interface) allows us to set
1524  * the interrupt holdoff timer on any of the device's Queue Sets.
1525  */
1526 static int cxgb4vf_set_coalesce(struct net_device *dev,
1527 				struct ethtool_coalesce *coalesce)
1528 {
1529 	const struct port_info *pi = netdev_priv(dev);
1530 	struct adapter *adapter = pi->adapter;
1531 
1532 	return set_rxq_intr_params(adapter,
1533 				   &adapter->sge.ethrxq[pi->first_qset].rspq,
1534 				   coalesce->rx_coalesce_usecs,
1535 				   coalesce->rx_max_coalesced_frames);
1536 }
1537 
1538 /*
1539  * Report current port link pause parameter settings.
1540  */
1541 static void cxgb4vf_get_pauseparam(struct net_device *dev,
1542 				   struct ethtool_pauseparam *pauseparam)
1543 {
1544 	struct port_info *pi = netdev_priv(dev);
1545 
1546 	pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1547 	pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0;
1548 	pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0;
1549 }
1550 
1551 /*
1552  * Identify the port by blinking the port's LED.
1553  */
1554 static int cxgb4vf_phys_id(struct net_device *dev,
1555 			   enum ethtool_phys_id_state state)
1556 {
1557 	unsigned int val;
1558 	struct port_info *pi = netdev_priv(dev);
1559 
1560 	if (state == ETHTOOL_ID_ACTIVE)
1561 		val = 0xffff;
1562 	else if (state == ETHTOOL_ID_INACTIVE)
1563 		val = 0;
1564 	else
1565 		return -EINVAL;
1566 
1567 	return t4vf_identify_port(pi->adapter, pi->viid, val);
1568 }
1569 
1570 /*
1571  * Port stats maintained per queue of the port.
1572  */
1573 struct queue_port_stats {
1574 	u64 tso;
1575 	u64 tx_csum;
1576 	u64 rx_csum;
1577 	u64 vlan_ex;
1578 	u64 vlan_ins;
1579 	u64 lro_pkts;
1580 	u64 lro_merged;
1581 };
1582 
1583 /*
1584  * Strings for the ETH_SS_STATS statistics set ("ethtool -S").  Note that
1585  * these need to match the order of statistics returned by
1586  * t4vf_get_port_stats().
1587  */
1588 static const char stats_strings[][ETH_GSTRING_LEN] = {
1589 	/*
1590 	 * These must match the layout of the t4vf_port_stats structure.
1591 	 */
1592 	"TxBroadcastBytes  ",
1593 	"TxBroadcastFrames ",
1594 	"TxMulticastBytes  ",
1595 	"TxMulticastFrames ",
1596 	"TxUnicastBytes    ",
1597 	"TxUnicastFrames   ",
1598 	"TxDroppedFrames   ",
1599 	"TxOffloadBytes    ",
1600 	"TxOffloadFrames   ",
1601 	"RxBroadcastBytes  ",
1602 	"RxBroadcastFrames ",
1603 	"RxMulticastBytes  ",
1604 	"RxMulticastFrames ",
1605 	"RxUnicastBytes    ",
1606 	"RxUnicastFrames   ",
1607 	"RxErrorFrames     ",
1608 
1609 	/*
1610 	 * These are accumulated per-queue statistics and must match the
1611 	 * order of the fields in the queue_port_stats structure.
1612 	 */
1613 	"TSO               ",
1614 	"TxCsumOffload     ",
1615 	"RxCsumGood        ",
1616 	"VLANextractions   ",
1617 	"VLANinsertions    ",
1618 	"GROPackets        ",
1619 	"GROMerged         ",
1620 };
1621 
1622 /*
1623  * Return the number of statistics in the specified statistics set.
1624  */
1625 static int cxgb4vf_get_sset_count(struct net_device *dev, int sset)
1626 {
1627 	switch (sset) {
1628 	case ETH_SS_STATS:
1629 		return ARRAY_SIZE(stats_strings);
1630 	default:
1631 		return -EOPNOTSUPP;
1632 	}
1633 	/*NOTREACHED*/
1634 }
1635 
1636 /*
1637  * Return the strings for the specified statistics set.
1638  */
1639 static void cxgb4vf_get_strings(struct net_device *dev,
1640 				u32 sset,
1641 				u8 *data)
1642 {
1643 	switch (sset) {
1644 	case ETH_SS_STATS:
1645 		memcpy(data, stats_strings, sizeof(stats_strings));
1646 		break;
1647 	}
1648 }
1649 
1650 /*
1651  * Small utility routine to accumulate queue statistics across the queues of
1652  * a "port".
1653  */
1654 static void collect_sge_port_stats(const struct adapter *adapter,
1655 				   const struct port_info *pi,
1656 				   struct queue_port_stats *stats)
1657 {
1658 	const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset];
1659 	const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
1660 	int qs;
1661 
1662 	memset(stats, 0, sizeof(*stats));
1663 	for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
1664 		stats->tso += txq->tso;
1665 		stats->tx_csum += txq->tx_cso;
1666 		stats->rx_csum += rxq->stats.rx_cso;
1667 		stats->vlan_ex += rxq->stats.vlan_ex;
1668 		stats->vlan_ins += txq->vlan_ins;
1669 		stats->lro_pkts += rxq->stats.lro_pkts;
1670 		stats->lro_merged += rxq->stats.lro_merged;
1671 	}
1672 }
1673 
1674 /*
1675  * Return the ETH_SS_STATS statistics set.
1676  */
1677 static void cxgb4vf_get_ethtool_stats(struct net_device *dev,
1678 				      struct ethtool_stats *stats,
1679 				      u64 *data)
1680 {
1681 	struct port_info *pi = netdev2pinfo(dev);
1682 	struct adapter *adapter = pi->adapter;
1683 	int err = t4vf_get_port_stats(adapter, pi->pidx,
1684 				      (struct t4vf_port_stats *)data);
1685 	if (err)
1686 		memset(data, 0, sizeof(struct t4vf_port_stats));
1687 
1688 	data += sizeof(struct t4vf_port_stats) / sizeof(u64);
1689 	collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1690 }
1691 
1692 /*
1693  * Return the size of our register map.
1694  */
1695 static int cxgb4vf_get_regs_len(struct net_device *dev)
1696 {
1697 	return T4VF_REGMAP_SIZE;
1698 }
1699 
1700 /*
1701  * Dump a block of registers, start to end inclusive, into a buffer.
1702  */
1703 static void reg_block_dump(struct adapter *adapter, void *regbuf,
1704 			   unsigned int start, unsigned int end)
1705 {
1706 	u32 *bp = regbuf + start - T4VF_REGMAP_START;
1707 
1708 	for ( ; start <= end; start += sizeof(u32)) {
1709 		/*
1710 		 * Avoid reading the Mailbox Control register since that
1711 		 * can trigger a Mailbox Ownership Arbitration cycle and
1712 		 * interfere with communication with the firmware.
1713 		 */
1714 		if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL)
1715 			*bp++ = 0xffff;
1716 		else
1717 			*bp++ = t4_read_reg(adapter, start);
1718 	}
1719 }
1720 
1721 /*
1722  * Copy our entire register map into the provided buffer.
1723  */
1724 static void cxgb4vf_get_regs(struct net_device *dev,
1725 			     struct ethtool_regs *regs,
1726 			     void *regbuf)
1727 {
1728 	struct adapter *adapter = netdev2adap(dev);
1729 
1730 	regs->version = mk_adap_vers(adapter);
1731 
1732 	/*
1733 	 * Fill in register buffer with our register map.
1734 	 */
1735 	memset(regbuf, 0, T4VF_REGMAP_SIZE);
1736 
1737 	reg_block_dump(adapter, regbuf,
1738 		       T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST,
1739 		       T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST);
1740 	reg_block_dump(adapter, regbuf,
1741 		       T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST,
1742 		       T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST);
1743 
1744 	/* T5 adds new registers in the PL Register map.
1745 	 */
1746 	reg_block_dump(adapter, regbuf,
1747 		       T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
1748 		       T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
1749 		       ? PL_VF_WHOAMI_A : PL_VF_REVISION_A));
1750 	reg_block_dump(adapter, regbuf,
1751 		       T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
1752 		       T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
1753 
1754 	reg_block_dump(adapter, regbuf,
1755 		       T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST,
1756 		       T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST);
1757 }
1758 
1759 /*
1760  * Report current Wake On LAN settings.
1761  */
1762 static void cxgb4vf_get_wol(struct net_device *dev,
1763 			    struct ethtool_wolinfo *wol)
1764 {
1765 	wol->supported = 0;
1766 	wol->wolopts = 0;
1767 	memset(&wol->sopass, 0, sizeof(wol->sopass));
1768 }
1769 
1770 /*
1771  * TCP Segmentation Offload flags which we support.
1772  */
1773 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1774 
1775 static const struct ethtool_ops cxgb4vf_ethtool_ops = {
1776 	.get_link_ksettings	= cxgb4vf_get_link_ksettings,
1777 	.get_drvinfo		= cxgb4vf_get_drvinfo,
1778 	.get_msglevel		= cxgb4vf_get_msglevel,
1779 	.set_msglevel		= cxgb4vf_set_msglevel,
1780 	.get_ringparam		= cxgb4vf_get_ringparam,
1781 	.set_ringparam		= cxgb4vf_set_ringparam,
1782 	.get_coalesce		= cxgb4vf_get_coalesce,
1783 	.set_coalesce		= cxgb4vf_set_coalesce,
1784 	.get_pauseparam		= cxgb4vf_get_pauseparam,
1785 	.get_link		= ethtool_op_get_link,
1786 	.get_strings		= cxgb4vf_get_strings,
1787 	.set_phys_id		= cxgb4vf_phys_id,
1788 	.get_sset_count		= cxgb4vf_get_sset_count,
1789 	.get_ethtool_stats	= cxgb4vf_get_ethtool_stats,
1790 	.get_regs_len		= cxgb4vf_get_regs_len,
1791 	.get_regs		= cxgb4vf_get_regs,
1792 	.get_wol		= cxgb4vf_get_wol,
1793 };
1794 
1795 /*
1796  * /sys/kernel/debug/cxgb4vf support code and data.
1797  * ================================================
1798  */
1799 
1800 /*
1801  * Show Firmware Mailbox Command/Reply Log
1802  *
1803  * Note that we don't do any locking when dumping the Firmware Mailbox Log so
1804  * it's possible that we can catch things during a log update and therefore
1805  * see partially corrupted log entries.  But i9t's probably Good Enough(tm).
1806  * If we ever decide that we want to make sure that we're dumping a coherent
1807  * log, we'd need to perform locking in the mailbox logging and in
1808  * mboxlog_open() where we'd need to grab the entire mailbox log in one go
1809  * like we do for the Firmware Device Log.  But as stated above, meh ...
1810  */
1811 static int mboxlog_show(struct seq_file *seq, void *v)
1812 {
1813 	struct adapter *adapter = seq->private;
1814 	struct mbox_cmd_log *log = adapter->mbox_log;
1815 	struct mbox_cmd *entry;
1816 	int entry_idx, i;
1817 
1818 	if (v == SEQ_START_TOKEN) {
1819 		seq_printf(seq,
1820 			   "%10s  %15s  %5s  %5s  %s\n",
1821 			   "Seq#", "Tstamp", "Atime", "Etime",
1822 			   "Command/Reply");
1823 		return 0;
1824 	}
1825 
1826 	entry_idx = log->cursor + ((uintptr_t)v - 2);
1827 	if (entry_idx >= log->size)
1828 		entry_idx -= log->size;
1829 	entry = mbox_cmd_log_entry(log, entry_idx);
1830 
1831 	/* skip over unused entries */
1832 	if (entry->timestamp == 0)
1833 		return 0;
1834 
1835 	seq_printf(seq, "%10u  %15llu  %5d  %5d",
1836 		   entry->seqno, entry->timestamp,
1837 		   entry->access, entry->execute);
1838 	for (i = 0; i < MBOX_LEN / 8; i++) {
1839 		u64 flit = entry->cmd[i];
1840 		u32 hi = (u32)(flit >> 32);
1841 		u32 lo = (u32)flit;
1842 
1843 		seq_printf(seq, "  %08x %08x", hi, lo);
1844 	}
1845 	seq_puts(seq, "\n");
1846 	return 0;
1847 }
1848 
1849 static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos)
1850 {
1851 	struct adapter *adapter = seq->private;
1852 	struct mbox_cmd_log *log = adapter->mbox_log;
1853 
1854 	return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL);
1855 }
1856 
1857 static void *mboxlog_start(struct seq_file *seq, loff_t *pos)
1858 {
1859 	return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN;
1860 }
1861 
1862 static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos)
1863 {
1864 	++*pos;
1865 	return mboxlog_get_idx(seq, *pos);
1866 }
1867 
1868 static void mboxlog_stop(struct seq_file *seq, void *v)
1869 {
1870 }
1871 
1872 static const struct seq_operations mboxlog_seq_ops = {
1873 	.start = mboxlog_start,
1874 	.next  = mboxlog_next,
1875 	.stop  = mboxlog_stop,
1876 	.show  = mboxlog_show
1877 };
1878 
1879 static int mboxlog_open(struct inode *inode, struct file *file)
1880 {
1881 	int res = seq_open(file, &mboxlog_seq_ops);
1882 
1883 	if (!res) {
1884 		struct seq_file *seq = file->private_data;
1885 
1886 		seq->private = inode->i_private;
1887 	}
1888 	return res;
1889 }
1890 
1891 static const struct file_operations mboxlog_fops = {
1892 	.owner   = THIS_MODULE,
1893 	.open    = mboxlog_open,
1894 	.read    = seq_read,
1895 	.llseek  = seq_lseek,
1896 	.release = seq_release,
1897 };
1898 
1899 /*
1900  * Show SGE Queue Set information.  We display QPL Queues Sets per line.
1901  */
1902 #define QPL	4
1903 
1904 static int sge_qinfo_show(struct seq_file *seq, void *v)
1905 {
1906 	struct adapter *adapter = seq->private;
1907 	int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
1908 	int qs, r = (uintptr_t)v - 1;
1909 
1910 	if (r)
1911 		seq_putc(seq, '\n');
1912 
1913 	#define S3(fmt_spec, s, v) \
1914 		do {\
1915 			seq_printf(seq, "%-12s", s); \
1916 			for (qs = 0; qs < n; ++qs) \
1917 				seq_printf(seq, " %16" fmt_spec, v); \
1918 			seq_putc(seq, '\n'); \
1919 		} while (0)
1920 	#define S(s, v)		S3("s", s, v)
1921 	#define T(s, v)		S3("u", s, txq[qs].v)
1922 	#define R(s, v)		S3("u", s, rxq[qs].v)
1923 
1924 	if (r < eth_entries) {
1925 		const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
1926 		const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
1927 		int n = min(QPL, adapter->sge.ethqsets - QPL * r);
1928 
1929 		S("QType:", "Ethernet");
1930 		S("Interface:",
1931 		  (rxq[qs].rspq.netdev
1932 		   ? rxq[qs].rspq.netdev->name
1933 		   : "N/A"));
1934 		S3("d", "Port:",
1935 		   (rxq[qs].rspq.netdev
1936 		    ? ((struct port_info *)
1937 		       netdev_priv(rxq[qs].rspq.netdev))->port_id
1938 		    : -1));
1939 		T("TxQ ID:", q.abs_id);
1940 		T("TxQ size:", q.size);
1941 		T("TxQ inuse:", q.in_use);
1942 		T("TxQ PIdx:", q.pidx);
1943 		T("TxQ CIdx:", q.cidx);
1944 		R("RspQ ID:", rspq.abs_id);
1945 		R("RspQ size:", rspq.size);
1946 		R("RspQE size:", rspq.iqe_len);
1947 		S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq));
1948 		S3("u", "Intr pktcnt:",
1949 		   adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]);
1950 		R("RspQ CIdx:", rspq.cidx);
1951 		R("RspQ Gen:", rspq.gen);
1952 		R("FL ID:", fl.abs_id);
1953 		R("FL size:", fl.size - MIN_FL_RESID);
1954 		R("FL avail:", fl.avail);
1955 		R("FL PIdx:", fl.pidx);
1956 		R("FL CIdx:", fl.cidx);
1957 		return 0;
1958 	}
1959 
1960 	r -= eth_entries;
1961 	if (r == 0) {
1962 		const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
1963 
1964 		seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
1965 		seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
1966 		seq_printf(seq, "%-12s %16u\n", "Intr delay:",
1967 			   qtimer_val(adapter, evtq));
1968 		seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
1969 			   adapter->sge.counter_val[evtq->pktcnt_idx]);
1970 		seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx);
1971 		seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
1972 	} else if (r == 1) {
1973 		const struct sge_rspq *intrq = &adapter->sge.intrq;
1974 
1975 		seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue");
1976 		seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id);
1977 		seq_printf(seq, "%-12s %16u\n", "Intr delay:",
1978 			   qtimer_val(adapter, intrq));
1979 		seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
1980 			   adapter->sge.counter_val[intrq->pktcnt_idx]);
1981 		seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx);
1982 		seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen);
1983 	}
1984 
1985 	#undef R
1986 	#undef T
1987 	#undef S
1988 	#undef S3
1989 
1990 	return 0;
1991 }
1992 
1993 /*
1994  * Return the number of "entries" in our "file".  We group the multi-Queue
1995  * sections with QPL Queue Sets per "entry".  The sections of the output are:
1996  *
1997  *     Ethernet RX/TX Queue Sets
1998  *     Firmware Event Queue
1999  *     Forwarded Interrupt Queue (if in MSI mode)
2000  */
2001 static int sge_queue_entries(const struct adapter *adapter)
2002 {
2003 	return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
2004 		((adapter->flags & USING_MSI) != 0);
2005 }
2006 
2007 static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
2008 {
2009 	int entries = sge_queue_entries(seq->private);
2010 
2011 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2012 }
2013 
2014 static void sge_queue_stop(struct seq_file *seq, void *v)
2015 {
2016 }
2017 
2018 static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
2019 {
2020 	int entries = sge_queue_entries(seq->private);
2021 
2022 	++*pos;
2023 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2024 }
2025 
2026 static const struct seq_operations sge_qinfo_seq_ops = {
2027 	.start = sge_queue_start,
2028 	.next  = sge_queue_next,
2029 	.stop  = sge_queue_stop,
2030 	.show  = sge_qinfo_show
2031 };
2032 
2033 static int sge_qinfo_open(struct inode *inode, struct file *file)
2034 {
2035 	int res = seq_open(file, &sge_qinfo_seq_ops);
2036 
2037 	if (!res) {
2038 		struct seq_file *seq = file->private_data;
2039 		seq->private = inode->i_private;
2040 	}
2041 	return res;
2042 }
2043 
2044 static const struct file_operations sge_qinfo_debugfs_fops = {
2045 	.owner   = THIS_MODULE,
2046 	.open    = sge_qinfo_open,
2047 	.read    = seq_read,
2048 	.llseek  = seq_lseek,
2049 	.release = seq_release,
2050 };
2051 
2052 /*
2053  * Show SGE Queue Set statistics.  We display QPL Queues Sets per line.
2054  */
2055 #define QPL	4
2056 
2057 static int sge_qstats_show(struct seq_file *seq, void *v)
2058 {
2059 	struct adapter *adapter = seq->private;
2060 	int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
2061 	int qs, r = (uintptr_t)v - 1;
2062 
2063 	if (r)
2064 		seq_putc(seq, '\n');
2065 
2066 	#define S3(fmt, s, v) \
2067 		do { \
2068 			seq_printf(seq, "%-16s", s); \
2069 			for (qs = 0; qs < n; ++qs) \
2070 				seq_printf(seq, " %8" fmt, v); \
2071 			seq_putc(seq, '\n'); \
2072 		} while (0)
2073 	#define S(s, v)		S3("s", s, v)
2074 
2075 	#define T3(fmt, s, v)	S3(fmt, s, txq[qs].v)
2076 	#define T(s, v)		T3("lu", s, v)
2077 
2078 	#define R3(fmt, s, v)	S3(fmt, s, rxq[qs].v)
2079 	#define R(s, v)		R3("lu", s, v)
2080 
2081 	if (r < eth_entries) {
2082 		const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
2083 		const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
2084 		int n = min(QPL, adapter->sge.ethqsets - QPL * r);
2085 
2086 		S("QType:", "Ethernet");
2087 		S("Interface:",
2088 		  (rxq[qs].rspq.netdev
2089 		   ? rxq[qs].rspq.netdev->name
2090 		   : "N/A"));
2091 		R3("u", "RspQNullInts:", rspq.unhandled_irqs);
2092 		R("RxPackets:", stats.pkts);
2093 		R("RxCSO:", stats.rx_cso);
2094 		R("VLANxtract:", stats.vlan_ex);
2095 		R("LROmerged:", stats.lro_merged);
2096 		R("LROpackets:", stats.lro_pkts);
2097 		R("RxDrops:", stats.rx_drops);
2098 		T("TSO:", tso);
2099 		T("TxCSO:", tx_cso);
2100 		T("VLANins:", vlan_ins);
2101 		T("TxQFull:", q.stops);
2102 		T("TxQRestarts:", q.restarts);
2103 		T("TxMapErr:", mapping_err);
2104 		R("FLAllocErr:", fl.alloc_failed);
2105 		R("FLLrgAlcErr:", fl.large_alloc_failed);
2106 		R("FLStarving:", fl.starving);
2107 		return 0;
2108 	}
2109 
2110 	r -= eth_entries;
2111 	if (r == 0) {
2112 		const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
2113 
2114 		seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue");
2115 		seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
2116 			   evtq->unhandled_irqs);
2117 		seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx);
2118 		seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen);
2119 	} else if (r == 1) {
2120 		const struct sge_rspq *intrq = &adapter->sge.intrq;
2121 
2122 		seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue");
2123 		seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
2124 			   intrq->unhandled_irqs);
2125 		seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx);
2126 		seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen);
2127 	}
2128 
2129 	#undef R
2130 	#undef T
2131 	#undef S
2132 	#undef R3
2133 	#undef T3
2134 	#undef S3
2135 
2136 	return 0;
2137 }
2138 
2139 /*
2140  * Return the number of "entries" in our "file".  We group the multi-Queue
2141  * sections with QPL Queue Sets per "entry".  The sections of the output are:
2142  *
2143  *     Ethernet RX/TX Queue Sets
2144  *     Firmware Event Queue
2145  *     Forwarded Interrupt Queue (if in MSI mode)
2146  */
2147 static int sge_qstats_entries(const struct adapter *adapter)
2148 {
2149 	return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
2150 		((adapter->flags & USING_MSI) != 0);
2151 }
2152 
2153 static void *sge_qstats_start(struct seq_file *seq, loff_t *pos)
2154 {
2155 	int entries = sge_qstats_entries(seq->private);
2156 
2157 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2158 }
2159 
2160 static void sge_qstats_stop(struct seq_file *seq, void *v)
2161 {
2162 }
2163 
2164 static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos)
2165 {
2166 	int entries = sge_qstats_entries(seq->private);
2167 
2168 	(*pos)++;
2169 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2170 }
2171 
2172 static const struct seq_operations sge_qstats_seq_ops = {
2173 	.start = sge_qstats_start,
2174 	.next  = sge_qstats_next,
2175 	.stop  = sge_qstats_stop,
2176 	.show  = sge_qstats_show
2177 };
2178 
2179 static int sge_qstats_open(struct inode *inode, struct file *file)
2180 {
2181 	int res = seq_open(file, &sge_qstats_seq_ops);
2182 
2183 	if (res == 0) {
2184 		struct seq_file *seq = file->private_data;
2185 		seq->private = inode->i_private;
2186 	}
2187 	return res;
2188 }
2189 
2190 static const struct file_operations sge_qstats_proc_fops = {
2191 	.owner   = THIS_MODULE,
2192 	.open    = sge_qstats_open,
2193 	.read    = seq_read,
2194 	.llseek  = seq_lseek,
2195 	.release = seq_release,
2196 };
2197 
2198 /*
2199  * Show PCI-E SR-IOV Virtual Function Resource Limits.
2200  */
2201 static int resources_show(struct seq_file *seq, void *v)
2202 {
2203 	struct adapter *adapter = seq->private;
2204 	struct vf_resources *vfres = &adapter->params.vfres;
2205 
2206 	#define S(desc, fmt, var) \
2207 		seq_printf(seq, "%-60s " fmt "\n", \
2208 			   desc " (" #var "):", vfres->var)
2209 
2210 	S("Virtual Interfaces", "%d", nvi);
2211 	S("Egress Queues", "%d", neq);
2212 	S("Ethernet Control", "%d", nethctrl);
2213 	S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
2214 	S("Ingress Queues", "%d", niq);
2215 	S("Traffic Class", "%d", tc);
2216 	S("Port Access Rights Mask", "%#x", pmask);
2217 	S("MAC Address Filters", "%d", nexactf);
2218 	S("Firmware Command Read Capabilities", "%#x", r_caps);
2219 	S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
2220 
2221 	#undef S
2222 
2223 	return 0;
2224 }
2225 
2226 static int resources_open(struct inode *inode, struct file *file)
2227 {
2228 	return single_open(file, resources_show, inode->i_private);
2229 }
2230 
2231 static const struct file_operations resources_proc_fops = {
2232 	.owner   = THIS_MODULE,
2233 	.open    = resources_open,
2234 	.read    = seq_read,
2235 	.llseek  = seq_lseek,
2236 	.release = single_release,
2237 };
2238 
2239 /*
2240  * Show Virtual Interfaces.
2241  */
2242 static int interfaces_show(struct seq_file *seq, void *v)
2243 {
2244 	if (v == SEQ_START_TOKEN) {
2245 		seq_puts(seq, "Interface  Port   VIID\n");
2246 	} else {
2247 		struct adapter *adapter = seq->private;
2248 		int pidx = (uintptr_t)v - 2;
2249 		struct net_device *dev = adapter->port[pidx];
2250 		struct port_info *pi = netdev_priv(dev);
2251 
2252 		seq_printf(seq, "%9s  %4d  %#5x\n",
2253 			   dev->name, pi->port_id, pi->viid);
2254 	}
2255 	return 0;
2256 }
2257 
2258 static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos)
2259 {
2260 	return pos <= adapter->params.nports
2261 		? (void *)(uintptr_t)(pos + 1)
2262 		: NULL;
2263 }
2264 
2265 static void *interfaces_start(struct seq_file *seq, loff_t *pos)
2266 {
2267 	return *pos
2268 		? interfaces_get_idx(seq->private, *pos)
2269 		: SEQ_START_TOKEN;
2270 }
2271 
2272 static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos)
2273 {
2274 	(*pos)++;
2275 	return interfaces_get_idx(seq->private, *pos);
2276 }
2277 
2278 static void interfaces_stop(struct seq_file *seq, void *v)
2279 {
2280 }
2281 
2282 static const struct seq_operations interfaces_seq_ops = {
2283 	.start = interfaces_start,
2284 	.next  = interfaces_next,
2285 	.stop  = interfaces_stop,
2286 	.show  = interfaces_show
2287 };
2288 
2289 static int interfaces_open(struct inode *inode, struct file *file)
2290 {
2291 	int res = seq_open(file, &interfaces_seq_ops);
2292 
2293 	if (res == 0) {
2294 		struct seq_file *seq = file->private_data;
2295 		seq->private = inode->i_private;
2296 	}
2297 	return res;
2298 }
2299 
2300 static const struct file_operations interfaces_proc_fops = {
2301 	.owner   = THIS_MODULE,
2302 	.open    = interfaces_open,
2303 	.read    = seq_read,
2304 	.llseek  = seq_lseek,
2305 	.release = seq_release,
2306 };
2307 
2308 /*
2309  * /sys/kernel/debugfs/cxgb4vf/ files list.
2310  */
2311 struct cxgb4vf_debugfs_entry {
2312 	const char *name;		/* name of debugfs node */
2313 	umode_t mode;			/* file system mode */
2314 	const struct file_operations *fops;
2315 };
2316 
2317 static struct cxgb4vf_debugfs_entry debugfs_files[] = {
2318 	{ "mboxlog",    S_IRUGO, &mboxlog_fops },
2319 	{ "sge_qinfo",  S_IRUGO, &sge_qinfo_debugfs_fops },
2320 	{ "sge_qstats", S_IRUGO, &sge_qstats_proc_fops },
2321 	{ "resources",  S_IRUGO, &resources_proc_fops },
2322 	{ "interfaces", S_IRUGO, &interfaces_proc_fops },
2323 };
2324 
2325 /*
2326  * Module and device initialization and cleanup code.
2327  * ==================================================
2328  */
2329 
2330 /*
2331  * Set up out /sys/kernel/debug/cxgb4vf sub-nodes.  We assume that the
2332  * directory (debugfs_root) has already been set up.
2333  */
2334 static int setup_debugfs(struct adapter *adapter)
2335 {
2336 	int i;
2337 
2338 	BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2339 
2340 	/*
2341 	 * Debugfs support is best effort.
2342 	 */
2343 	for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
2344 		(void)debugfs_create_file(debugfs_files[i].name,
2345 				  debugfs_files[i].mode,
2346 				  adapter->debugfs_root,
2347 				  (void *)adapter,
2348 				  debugfs_files[i].fops);
2349 
2350 	return 0;
2351 }
2352 
2353 /*
2354  * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above.  We leave
2355  * it to our caller to tear down the directory (debugfs_root).
2356  */
2357 static void cleanup_debugfs(struct adapter *adapter)
2358 {
2359 	BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2360 
2361 	/*
2362 	 * Unlike our sister routine cleanup_proc(), we don't need to remove
2363 	 * individual entries because a call will be made to
2364 	 * debugfs_remove_recursive().  We just need to clean up any ancillary
2365 	 * persistent state.
2366 	 */
2367 	/* nothing to do */
2368 }
2369 
2370 /* Figure out how many Ports and Queue Sets we can support.  This depends on
2371  * knowing our Virtual Function Resources and may be called a second time if
2372  * we fall back from MSI-X to MSI Interrupt Mode.
2373  */
2374 static void size_nports_qsets(struct adapter *adapter)
2375 {
2376 	struct vf_resources *vfres = &adapter->params.vfres;
2377 	unsigned int ethqsets, pmask_nports;
2378 
2379 	/* The number of "ports" which we support is equal to the number of
2380 	 * Virtual Interfaces with which we've been provisioned.
2381 	 */
2382 	adapter->params.nports = vfres->nvi;
2383 	if (adapter->params.nports > MAX_NPORTS) {
2384 		dev_warn(adapter->pdev_dev, "only using %d of %d maximum"
2385 			 " allowed virtual interfaces\n", MAX_NPORTS,
2386 			 adapter->params.nports);
2387 		adapter->params.nports = MAX_NPORTS;
2388 	}
2389 
2390 	/* We may have been provisioned with more VIs than the number of
2391 	 * ports we're allowed to access (our Port Access Rights Mask).
2392 	 * This is obviously a configuration conflict but we don't want to
2393 	 * crash the kernel or anything silly just because of that.
2394 	 */
2395 	pmask_nports = hweight32(adapter->params.vfres.pmask);
2396 	if (pmask_nports < adapter->params.nports) {
2397 		dev_warn(adapter->pdev_dev, "only using %d of %d provisioned"
2398 			 " virtual interfaces; limited by Port Access Rights"
2399 			 " mask %#x\n", pmask_nports, adapter->params.nports,
2400 			 adapter->params.vfres.pmask);
2401 		adapter->params.nports = pmask_nports;
2402 	}
2403 
2404 	/* We need to reserve an Ingress Queue for the Asynchronous Firmware
2405 	 * Event Queue.  And if we're using MSI Interrupts, we'll also need to
2406 	 * reserve an Ingress Queue for a Forwarded Interrupts.
2407 	 *
2408 	 * The rest of the FL/Intr-capable ingress queues will be matched up
2409 	 * one-for-one with Ethernet/Control egress queues in order to form
2410 	 * "Queue Sets" which will be aportioned between the "ports".  For
2411 	 * each Queue Set, we'll need the ability to allocate two Egress
2412 	 * Contexts -- one for the Ingress Queue Free List and one for the TX
2413 	 * Ethernet Queue.
2414 	 *
2415 	 * Note that even if we're currently configured to use MSI-X
2416 	 * Interrupts (module variable msi == MSI_MSIX) we may get downgraded
2417 	 * to MSI Interrupts if we can't get enough MSI-X Interrupts.  If that
2418 	 * happens we'll need to adjust things later.
2419 	 */
2420 	ethqsets = vfres->niqflint - 1 - (msi == MSI_MSI);
2421 	if (vfres->nethctrl != ethqsets)
2422 		ethqsets = min(vfres->nethctrl, ethqsets);
2423 	if (vfres->neq < ethqsets*2)
2424 		ethqsets = vfres->neq/2;
2425 	if (ethqsets > MAX_ETH_QSETS)
2426 		ethqsets = MAX_ETH_QSETS;
2427 	adapter->sge.max_ethqsets = ethqsets;
2428 
2429 	if (adapter->sge.max_ethqsets < adapter->params.nports) {
2430 		dev_warn(adapter->pdev_dev, "only using %d of %d available"
2431 			 " virtual interfaces (too few Queue Sets)\n",
2432 			 adapter->sge.max_ethqsets, adapter->params.nports);
2433 		adapter->params.nports = adapter->sge.max_ethqsets;
2434 	}
2435 }
2436 
2437 /*
2438  * Perform early "adapter" initialization.  This is where we discover what
2439  * adapter parameters we're going to be using and initialize basic adapter
2440  * hardware support.
2441  */
2442 static int adap_init0(struct adapter *adapter)
2443 {
2444 	struct sge_params *sge_params = &adapter->params.sge;
2445 	struct sge *s = &adapter->sge;
2446 	int err;
2447 	u32 param, val = 0;
2448 
2449 	/*
2450 	 * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
2451 	 * 2.6.31 and later we can't call pci_reset_function() in order to
2452 	 * issue an FLR because of a self- deadlock on the device semaphore.
2453 	 * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
2454 	 * cases where they're needed -- for instance, some versions of KVM
2455 	 * fail to reset "Assigned Devices" when the VM reboots.  Therefore we
2456 	 * use the firmware based reset in order to reset any per function
2457 	 * state.
2458 	 */
2459 	err = t4vf_fw_reset(adapter);
2460 	if (err < 0) {
2461 		dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
2462 		return err;
2463 	}
2464 
2465 	/*
2466 	 * Grab basic operational parameters.  These will predominantly have
2467 	 * been set up by the Physical Function Driver or will be hard coded
2468 	 * into the adapter.  We just have to live with them ...  Note that
2469 	 * we _must_ get our VPD parameters before our SGE parameters because
2470 	 * we need to know the adapter's core clock from the VPD in order to
2471 	 * properly decode the SGE Timer Values.
2472 	 */
2473 	err = t4vf_get_dev_params(adapter);
2474 	if (err) {
2475 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2476 			" device parameters: err=%d\n", err);
2477 		return err;
2478 	}
2479 	err = t4vf_get_vpd_params(adapter);
2480 	if (err) {
2481 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2482 			" VPD parameters: err=%d\n", err);
2483 		return err;
2484 	}
2485 	err = t4vf_get_sge_params(adapter);
2486 	if (err) {
2487 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2488 			" SGE parameters: err=%d\n", err);
2489 		return err;
2490 	}
2491 	err = t4vf_get_rss_glb_config(adapter);
2492 	if (err) {
2493 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2494 			" RSS parameters: err=%d\n", err);
2495 		return err;
2496 	}
2497 	if (adapter->params.rss.mode !=
2498 	    FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2499 		dev_err(adapter->pdev_dev, "unable to operate with global RSS"
2500 			" mode %d\n", adapter->params.rss.mode);
2501 		return -EINVAL;
2502 	}
2503 	err = t4vf_sge_init(adapter);
2504 	if (err) {
2505 		dev_err(adapter->pdev_dev, "unable to use adapter parameters:"
2506 			" err=%d\n", err);
2507 		return err;
2508 	}
2509 
2510 	/* If we're running on newer firmware, let it know that we're
2511 	 * prepared to deal with encapsulated CPL messages.  Older
2512 	 * firmware won't understand this and we'll just get
2513 	 * unencapsulated messages ...
2514 	 */
2515 	param = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
2516 		FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
2517 	val = 1;
2518 	(void) t4vf_set_params(adapter, 1, &param, &val);
2519 
2520 	/*
2521 	 * Retrieve our RX interrupt holdoff timer values and counter
2522 	 * threshold values from the SGE parameters.
2523 	 */
2524 	s->timer_val[0] = core_ticks_to_us(adapter,
2525 		TIMERVALUE0_G(sge_params->sge_timer_value_0_and_1));
2526 	s->timer_val[1] = core_ticks_to_us(adapter,
2527 		TIMERVALUE1_G(sge_params->sge_timer_value_0_and_1));
2528 	s->timer_val[2] = core_ticks_to_us(adapter,
2529 		TIMERVALUE0_G(sge_params->sge_timer_value_2_and_3));
2530 	s->timer_val[3] = core_ticks_to_us(adapter,
2531 		TIMERVALUE1_G(sge_params->sge_timer_value_2_and_3));
2532 	s->timer_val[4] = core_ticks_to_us(adapter,
2533 		TIMERVALUE0_G(sge_params->sge_timer_value_4_and_5));
2534 	s->timer_val[5] = core_ticks_to_us(adapter,
2535 		TIMERVALUE1_G(sge_params->sge_timer_value_4_and_5));
2536 
2537 	s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold);
2538 	s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold);
2539 	s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold);
2540 	s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold);
2541 
2542 	/*
2543 	 * Grab our Virtual Interface resource allocation, extract the
2544 	 * features that we're interested in and do a bit of sanity testing on
2545 	 * what we discover.
2546 	 */
2547 	err = t4vf_get_vfres(adapter);
2548 	if (err) {
2549 		dev_err(adapter->pdev_dev, "unable to get virtual interface"
2550 			" resources: err=%d\n", err);
2551 		return err;
2552 	}
2553 
2554 	/* Check for various parameter sanity issues */
2555 	if (adapter->params.vfres.pmask == 0) {
2556 		dev_err(adapter->pdev_dev, "no port access configured\n"
2557 			"usable!\n");
2558 		return -EINVAL;
2559 	}
2560 	if (adapter->params.vfres.nvi == 0) {
2561 		dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
2562 			"usable!\n");
2563 		return -EINVAL;
2564 	}
2565 
2566 	/* Initialize nports and max_ethqsets now that we have our Virtual
2567 	 * Function Resources.
2568 	 */
2569 	size_nports_qsets(adapter);
2570 
2571 	return 0;
2572 }
2573 
2574 static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
2575 			     u8 pkt_cnt_idx, unsigned int size,
2576 			     unsigned int iqe_size)
2577 {
2578 	rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
2579 			     (pkt_cnt_idx < SGE_NCOUNTERS ?
2580 			      QINTR_CNT_EN_F : 0));
2581 	rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
2582 			    ? pkt_cnt_idx
2583 			    : 0);
2584 	rspq->iqe_len = iqe_size;
2585 	rspq->size = size;
2586 }
2587 
2588 /*
2589  * Perform default configuration of DMA queues depending on the number and
2590  * type of ports we found and the number of available CPUs.  Most settings can
2591  * be modified by the admin via ethtool and cxgbtool prior to the adapter
2592  * being brought up for the first time.
2593  */
2594 static void cfg_queues(struct adapter *adapter)
2595 {
2596 	struct sge *s = &adapter->sge;
2597 	int q10g, n10g, qidx, pidx, qs;
2598 	size_t iqe_size;
2599 
2600 	/*
2601 	 * We should not be called till we know how many Queue Sets we can
2602 	 * support.  In particular, this means that we need to know what kind
2603 	 * of interrupts we'll be using ...
2604 	 */
2605 	BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
2606 
2607 	/*
2608 	 * Count the number of 10GbE Virtual Interfaces that we have.
2609 	 */
2610 	n10g = 0;
2611 	for_each_port(adapter, pidx)
2612 		n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
2613 
2614 	/*
2615 	 * We default to 1 queue per non-10G port and up to # of cores queues
2616 	 * per 10G port.
2617 	 */
2618 	if (n10g == 0)
2619 		q10g = 0;
2620 	else {
2621 		int n1g = (adapter->params.nports - n10g);
2622 		q10g = (adapter->sge.max_ethqsets - n1g) / n10g;
2623 		if (q10g > num_online_cpus())
2624 			q10g = num_online_cpus();
2625 	}
2626 
2627 	/*
2628 	 * Allocate the "Queue Sets" to the various Virtual Interfaces.
2629 	 * The layout will be established in setup_sge_queues() when the
2630 	 * adapter is brough up for the first time.
2631 	 */
2632 	qidx = 0;
2633 	for_each_port(adapter, pidx) {
2634 		struct port_info *pi = adap2pinfo(adapter, pidx);
2635 
2636 		pi->first_qset = qidx;
2637 		pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
2638 		qidx += pi->nqsets;
2639 	}
2640 	s->ethqsets = qidx;
2641 
2642 	/*
2643 	 * The Ingress Queue Entry Size for our various Response Queues needs
2644 	 * to be big enough to accommodate the largest message we can receive
2645 	 * from the chip/firmware; which is 64 bytes ...
2646 	 */
2647 	iqe_size = 64;
2648 
2649 	/*
2650 	 * Set up default Queue Set parameters ...  Start off with the
2651 	 * shortest interrupt holdoff timer.
2652 	 */
2653 	for (qs = 0; qs < s->max_ethqsets; qs++) {
2654 		struct sge_eth_rxq *rxq = &s->ethrxq[qs];
2655 		struct sge_eth_txq *txq = &s->ethtxq[qs];
2656 
2657 		init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
2658 		rxq->fl.size = 72;
2659 		txq->q.size = 1024;
2660 	}
2661 
2662 	/*
2663 	 * The firmware event queue is used for link state changes and
2664 	 * notifications of TX DMA completions.
2665 	 */
2666 	init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size);
2667 
2668 	/*
2669 	 * The forwarded interrupt queue is used when we're in MSI interrupt
2670 	 * mode.  In this mode all interrupts associated with RX queues will
2671 	 * be forwarded to a single queue which we'll associate with our MSI
2672 	 * interrupt vector.  The messages dropped in the forwarded interrupt
2673 	 * queue will indicate which ingress queue needs servicing ...  This
2674 	 * queue needs to be large enough to accommodate all of the ingress
2675 	 * queues which are forwarding their interrupt (+1 to prevent the PIDX
2676 	 * from equalling the CIDX if every ingress queue has an outstanding
2677 	 * interrupt).  The queue doesn't need to be any larger because no
2678 	 * ingress queue will ever have more than one outstanding interrupt at
2679 	 * any time ...
2680 	 */
2681 	init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
2682 		  iqe_size);
2683 }
2684 
2685 /*
2686  * Reduce the number of Ethernet queues across all ports to at most n.
2687  * n provides at least one queue per port.
2688  */
2689 static void reduce_ethqs(struct adapter *adapter, int n)
2690 {
2691 	int i;
2692 	struct port_info *pi;
2693 
2694 	/*
2695 	 * While we have too many active Ether Queue Sets, interate across the
2696 	 * "ports" and reduce their individual Queue Set allocations.
2697 	 */
2698 	BUG_ON(n < adapter->params.nports);
2699 	while (n < adapter->sge.ethqsets)
2700 		for_each_port(adapter, i) {
2701 			pi = adap2pinfo(adapter, i);
2702 			if (pi->nqsets > 1) {
2703 				pi->nqsets--;
2704 				adapter->sge.ethqsets--;
2705 				if (adapter->sge.ethqsets <= n)
2706 					break;
2707 			}
2708 		}
2709 
2710 	/*
2711 	 * Reassign the starting Queue Sets for each of the "ports" ...
2712 	 */
2713 	n = 0;
2714 	for_each_port(adapter, i) {
2715 		pi = adap2pinfo(adapter, i);
2716 		pi->first_qset = n;
2717 		n += pi->nqsets;
2718 	}
2719 }
2720 
2721 /*
2722  * We need to grab enough MSI-X vectors to cover our interrupt needs.  Ideally
2723  * we get a separate MSI-X vector for every "Queue Set" plus any extras we
2724  * need.  Minimally we need one for every Virtual Interface plus those needed
2725  * for our "extras".  Note that this process may lower the maximum number of
2726  * allowed Queue Sets ...
2727  */
2728 static int enable_msix(struct adapter *adapter)
2729 {
2730 	int i, want, need, nqsets;
2731 	struct msix_entry entries[MSIX_ENTRIES];
2732 	struct sge *s = &adapter->sge;
2733 
2734 	for (i = 0; i < MSIX_ENTRIES; ++i)
2735 		entries[i].entry = i;
2736 
2737 	/*
2738 	 * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets"
2739 	 * plus those needed for our "extras" (for example, the firmware
2740 	 * message queue).  We _need_ at least one "Queue Set" per Virtual
2741 	 * Interface plus those needed for our "extras".  So now we get to see
2742 	 * if the song is right ...
2743 	 */
2744 	want = s->max_ethqsets + MSIX_EXTRAS;
2745 	need = adapter->params.nports + MSIX_EXTRAS;
2746 
2747 	want = pci_enable_msix_range(adapter->pdev, entries, need, want);
2748 	if (want < 0)
2749 		return want;
2750 
2751 	nqsets = want - MSIX_EXTRAS;
2752 	if (nqsets < s->max_ethqsets) {
2753 		dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
2754 			 " for %d Queue Sets\n", nqsets);
2755 		s->max_ethqsets = nqsets;
2756 		if (nqsets < s->ethqsets)
2757 			reduce_ethqs(adapter, nqsets);
2758 	}
2759 	for (i = 0; i < want; ++i)
2760 		adapter->msix_info[i].vec = entries[i].vector;
2761 
2762 	return 0;
2763 }
2764 
2765 static const struct net_device_ops cxgb4vf_netdev_ops	= {
2766 	.ndo_open		= cxgb4vf_open,
2767 	.ndo_stop		= cxgb4vf_stop,
2768 	.ndo_start_xmit		= t4vf_eth_xmit,
2769 	.ndo_get_stats		= cxgb4vf_get_stats,
2770 	.ndo_set_rx_mode	= cxgb4vf_set_rxmode,
2771 	.ndo_set_mac_address	= cxgb4vf_set_mac_addr,
2772 	.ndo_validate_addr	= eth_validate_addr,
2773 	.ndo_do_ioctl		= cxgb4vf_do_ioctl,
2774 	.ndo_change_mtu		= cxgb4vf_change_mtu,
2775 	.ndo_fix_features	= cxgb4vf_fix_features,
2776 	.ndo_set_features	= cxgb4vf_set_features,
2777 #ifdef CONFIG_NET_POLL_CONTROLLER
2778 	.ndo_poll_controller	= cxgb4vf_poll_controller,
2779 #endif
2780 };
2781 
2782 /*
2783  * "Probe" a device: initialize a device and construct all kernel and driver
2784  * state needed to manage the device.  This routine is called "init_one" in
2785  * the PF Driver ...
2786  */
2787 static int cxgb4vf_pci_probe(struct pci_dev *pdev,
2788 			     const struct pci_device_id *ent)
2789 {
2790 	int pci_using_dac;
2791 	int err, pidx;
2792 	unsigned int pmask;
2793 	struct adapter *adapter;
2794 	struct port_info *pi;
2795 	struct net_device *netdev;
2796 	unsigned int pf;
2797 
2798 	/*
2799 	 * Print our driver banner the first time we're called to initialize a
2800 	 * device.
2801 	 */
2802 	pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
2803 
2804 	/*
2805 	 * Initialize generic PCI device state.
2806 	 */
2807 	err = pci_enable_device(pdev);
2808 	if (err) {
2809 		dev_err(&pdev->dev, "cannot enable PCI device\n");
2810 		return err;
2811 	}
2812 
2813 	/*
2814 	 * Reserve PCI resources for the device.  If we can't get them some
2815 	 * other driver may have already claimed the device ...
2816 	 */
2817 	err = pci_request_regions(pdev, KBUILD_MODNAME);
2818 	if (err) {
2819 		dev_err(&pdev->dev, "cannot obtain PCI resources\n");
2820 		goto err_disable_device;
2821 	}
2822 
2823 	/*
2824 	 * Set up our DMA mask: try for 64-bit address masking first and
2825 	 * fall back to 32-bit if we can't get 64 bits ...
2826 	 */
2827 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2828 	if (err == 0) {
2829 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2830 		if (err) {
2831 			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for"
2832 				" coherent allocations\n");
2833 			goto err_release_regions;
2834 		}
2835 		pci_using_dac = 1;
2836 	} else {
2837 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2838 		if (err != 0) {
2839 			dev_err(&pdev->dev, "no usable DMA configuration\n");
2840 			goto err_release_regions;
2841 		}
2842 		pci_using_dac = 0;
2843 	}
2844 
2845 	/*
2846 	 * Enable bus mastering for the device ...
2847 	 */
2848 	pci_set_master(pdev);
2849 
2850 	/*
2851 	 * Allocate our adapter data structure and attach it to the device.
2852 	 */
2853 	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2854 	if (!adapter) {
2855 		err = -ENOMEM;
2856 		goto err_release_regions;
2857 	}
2858 	pci_set_drvdata(pdev, adapter);
2859 	adapter->pdev = pdev;
2860 	adapter->pdev_dev = &pdev->dev;
2861 
2862 	adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
2863 				    (sizeof(struct mbox_cmd) *
2864 				     T4VF_OS_LOG_MBOX_CMDS),
2865 				    GFP_KERNEL);
2866 	if (!adapter->mbox_log) {
2867 		err = -ENOMEM;
2868 		goto err_free_adapter;
2869 	}
2870 	adapter->mbox_log->size = T4VF_OS_LOG_MBOX_CMDS;
2871 
2872 	/*
2873 	 * Initialize SMP data synchronization resources.
2874 	 */
2875 	spin_lock_init(&adapter->stats_lock);
2876 	spin_lock_init(&adapter->mbox_lock);
2877 	INIT_LIST_HEAD(&adapter->mlist.list);
2878 
2879 	/*
2880 	 * Map our I/O registers in BAR0.
2881 	 */
2882 	adapter->regs = pci_ioremap_bar(pdev, 0);
2883 	if (!adapter->regs) {
2884 		dev_err(&pdev->dev, "cannot map device registers\n");
2885 		err = -ENOMEM;
2886 		goto err_free_adapter;
2887 	}
2888 
2889 	/* Wait for the device to become ready before proceeding ...
2890 	 */
2891 	err = t4vf_prep_adapter(adapter);
2892 	if (err) {
2893 		dev_err(adapter->pdev_dev, "device didn't become ready:"
2894 			" err=%d\n", err);
2895 		goto err_unmap_bar0;
2896 	}
2897 
2898 	/* For T5 and later we want to use the new BAR-based User Doorbells,
2899 	 * so we need to map BAR2 here ...
2900 	 */
2901 	if (!is_t4(adapter->params.chip)) {
2902 		adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
2903 					   pci_resource_len(pdev, 2));
2904 		if (!adapter->bar2) {
2905 			dev_err(adapter->pdev_dev, "cannot map BAR2 doorbells\n");
2906 			err = -ENOMEM;
2907 			goto err_unmap_bar0;
2908 		}
2909 	}
2910 	/*
2911 	 * Initialize adapter level features.
2912 	 */
2913 	adapter->name = pci_name(pdev);
2914 	adapter->msg_enable = DFLT_MSG_ENABLE;
2915 
2916 	/* If possible, we use PCIe Relaxed Ordering Attribute to deliver
2917 	 * Ingress Packet Data to Free List Buffers in order to allow for
2918 	 * chipset performance optimizations between the Root Complex and
2919 	 * Memory Controllers.  (Messages to the associated Ingress Queue
2920 	 * notifying new Packet Placement in the Free Lists Buffers will be
2921 	 * send without the Relaxed Ordering Attribute thus guaranteeing that
2922 	 * all preceding PCIe Transaction Layer Packets will be processed
2923 	 * first.)  But some Root Complexes have various issues with Upstream
2924 	 * Transaction Layer Packets with the Relaxed Ordering Attribute set.
2925 	 * The PCIe devices which under the Root Complexes will be cleared the
2926 	 * Relaxed Ordering bit in the configuration space, So we check our
2927 	 * PCIe configuration space to see if it's flagged with advice against
2928 	 * using Relaxed Ordering.
2929 	 */
2930 	if (!pcie_relaxed_ordering_enabled(pdev))
2931 		adapter->flags |= ROOT_NO_RELAXED_ORDERING;
2932 
2933 	err = adap_init0(adapter);
2934 	if (err)
2935 		goto err_unmap_bar;
2936 
2937 	/*
2938 	 * Allocate our "adapter ports" and stitch everything together.
2939 	 */
2940 	pmask = adapter->params.vfres.pmask;
2941 	pf = t4vf_get_pf_from_vf(adapter);
2942 	for_each_port(adapter, pidx) {
2943 		int port_id, viid;
2944 		u8 mac[ETH_ALEN];
2945 		unsigned int naddr = 1;
2946 
2947 		/*
2948 		 * We simplistically allocate our virtual interfaces
2949 		 * sequentially across the port numbers to which we have
2950 		 * access rights.  This should be configurable in some manner
2951 		 * ...
2952 		 */
2953 		if (pmask == 0)
2954 			break;
2955 		port_id = ffs(pmask) - 1;
2956 		pmask &= ~(1 << port_id);
2957 		viid = t4vf_alloc_vi(adapter, port_id);
2958 		if (viid < 0) {
2959 			dev_err(&pdev->dev, "cannot allocate VI for port %d:"
2960 				" err=%d\n", port_id, viid);
2961 			err = viid;
2962 			goto err_free_dev;
2963 		}
2964 
2965 		/*
2966 		 * Allocate our network device and stitch things together.
2967 		 */
2968 		netdev = alloc_etherdev_mq(sizeof(struct port_info),
2969 					   MAX_PORT_QSETS);
2970 		if (netdev == NULL) {
2971 			t4vf_free_vi(adapter, viid);
2972 			err = -ENOMEM;
2973 			goto err_free_dev;
2974 		}
2975 		adapter->port[pidx] = netdev;
2976 		SET_NETDEV_DEV(netdev, &pdev->dev);
2977 		pi = netdev_priv(netdev);
2978 		pi->adapter = adapter;
2979 		pi->pidx = pidx;
2980 		pi->port_id = port_id;
2981 		pi->viid = viid;
2982 
2983 		/*
2984 		 * Initialize the starting state of our "port" and register
2985 		 * it.
2986 		 */
2987 		pi->xact_addr_filt = -1;
2988 		netif_carrier_off(netdev);
2989 		netdev->irq = pdev->irq;
2990 
2991 		netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
2992 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2993 			NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
2994 		netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
2995 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2996 			NETIF_F_HIGHDMA;
2997 		netdev->features = netdev->hw_features |
2998 				   NETIF_F_HW_VLAN_CTAG_TX;
2999 		if (pci_using_dac)
3000 			netdev->features |= NETIF_F_HIGHDMA;
3001 
3002 		netdev->priv_flags |= IFF_UNICAST_FLT;
3003 		netdev->min_mtu = 81;
3004 		netdev->max_mtu = ETH_MAX_MTU;
3005 
3006 		netdev->netdev_ops = &cxgb4vf_netdev_ops;
3007 		netdev->ethtool_ops = &cxgb4vf_ethtool_ops;
3008 		netdev->dev_port = pi->port_id;
3009 
3010 		/*
3011 		 * Initialize the hardware/software state for the port.
3012 		 */
3013 		err = t4vf_port_init(adapter, pidx);
3014 		if (err) {
3015 			dev_err(&pdev->dev, "cannot initialize port %d\n",
3016 				pidx);
3017 			goto err_free_dev;
3018 		}
3019 
3020 		err = t4vf_get_vf_mac_acl(adapter, pf, &naddr, mac);
3021 		if (err) {
3022 			dev_err(&pdev->dev,
3023 				"unable to determine MAC ACL address, "
3024 				"continuing anyway.. (status %d)\n", err);
3025 		} else if (naddr && adapter->params.vfres.nvi == 1) {
3026 			struct sockaddr addr;
3027 
3028 			ether_addr_copy(addr.sa_data, mac);
3029 			err = cxgb4vf_set_mac_addr(netdev, &addr);
3030 			if (err) {
3031 				dev_err(&pdev->dev,
3032 					"unable to set MAC address %pM\n",
3033 					mac);
3034 				goto err_free_dev;
3035 			}
3036 			dev_info(&pdev->dev,
3037 				 "Using assigned MAC ACL: %pM\n", mac);
3038 		}
3039 	}
3040 
3041 	/* See what interrupts we'll be using.  If we've been configured to
3042 	 * use MSI-X interrupts, try to enable them but fall back to using
3043 	 * MSI interrupts if we can't enable MSI-X interrupts.  If we can't
3044 	 * get MSI interrupts we bail with the error.
3045 	 */
3046 	if (msi == MSI_MSIX && enable_msix(adapter) == 0)
3047 		adapter->flags |= USING_MSIX;
3048 	else {
3049 		if (msi == MSI_MSIX) {
3050 			dev_info(adapter->pdev_dev,
3051 				 "Unable to use MSI-X Interrupts; falling "
3052 				 "back to MSI Interrupts\n");
3053 
3054 			/* We're going to need a Forwarded Interrupt Queue so
3055 			 * that may cut into how many Queue Sets we can
3056 			 * support.
3057 			 */
3058 			msi = MSI_MSI;
3059 			size_nports_qsets(adapter);
3060 		}
3061 		err = pci_enable_msi(pdev);
3062 		if (err) {
3063 			dev_err(&pdev->dev, "Unable to allocate MSI Interrupts;"
3064 				" err=%d\n", err);
3065 			goto err_free_dev;
3066 		}
3067 		adapter->flags |= USING_MSI;
3068 	}
3069 
3070 	/* Now that we know how many "ports" we have and what interrupt
3071 	 * mechanism we're going to use, we can configure our queue resources.
3072 	 */
3073 	cfg_queues(adapter);
3074 
3075 	/*
3076 	 * The "card" is now ready to go.  If any errors occur during device
3077 	 * registration we do not fail the whole "card" but rather proceed
3078 	 * only with the ports we manage to register successfully.  However we
3079 	 * must register at least one net device.
3080 	 */
3081 	for_each_port(adapter, pidx) {
3082 		struct port_info *pi = netdev_priv(adapter->port[pidx]);
3083 		netdev = adapter->port[pidx];
3084 		if (netdev == NULL)
3085 			continue;
3086 
3087 		netif_set_real_num_tx_queues(netdev, pi->nqsets);
3088 		netif_set_real_num_rx_queues(netdev, pi->nqsets);
3089 
3090 		err = register_netdev(netdev);
3091 		if (err) {
3092 			dev_warn(&pdev->dev, "cannot register net device %s,"
3093 				 " skipping\n", netdev->name);
3094 			continue;
3095 		}
3096 
3097 		set_bit(pidx, &adapter->registered_device_map);
3098 	}
3099 	if (adapter->registered_device_map == 0) {
3100 		dev_err(&pdev->dev, "could not register any net devices\n");
3101 		goto err_disable_interrupts;
3102 	}
3103 
3104 	/*
3105 	 * Set up our debugfs entries.
3106 	 */
3107 	if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
3108 		adapter->debugfs_root =
3109 			debugfs_create_dir(pci_name(pdev),
3110 					   cxgb4vf_debugfs_root);
3111 		if (IS_ERR_OR_NULL(adapter->debugfs_root))
3112 			dev_warn(&pdev->dev, "could not create debugfs"
3113 				 " directory");
3114 		else
3115 			setup_debugfs(adapter);
3116 	}
3117 
3118 	/*
3119 	 * Print a short notice on the existence and configuration of the new
3120 	 * VF network device ...
3121 	 */
3122 	for_each_port(adapter, pidx) {
3123 		dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n",
3124 			 adapter->port[pidx]->name,
3125 			 (adapter->flags & USING_MSIX) ? "MSI-X" :
3126 			 (adapter->flags & USING_MSI)  ? "MSI" : "");
3127 	}
3128 
3129 	/*
3130 	 * Return success!
3131 	 */
3132 	return 0;
3133 
3134 	/*
3135 	 * Error recovery and exit code.  Unwind state that's been created
3136 	 * so far and return the error.
3137 	 */
3138 err_disable_interrupts:
3139 	if (adapter->flags & USING_MSIX) {
3140 		pci_disable_msix(adapter->pdev);
3141 		adapter->flags &= ~USING_MSIX;
3142 	} else if (adapter->flags & USING_MSI) {
3143 		pci_disable_msi(adapter->pdev);
3144 		adapter->flags &= ~USING_MSI;
3145 	}
3146 
3147 err_free_dev:
3148 	for_each_port(adapter, pidx) {
3149 		netdev = adapter->port[pidx];
3150 		if (netdev == NULL)
3151 			continue;
3152 		pi = netdev_priv(netdev);
3153 		t4vf_free_vi(adapter, pi->viid);
3154 		if (test_bit(pidx, &adapter->registered_device_map))
3155 			unregister_netdev(netdev);
3156 		free_netdev(netdev);
3157 	}
3158 
3159 err_unmap_bar:
3160 	if (!is_t4(adapter->params.chip))
3161 		iounmap(adapter->bar2);
3162 
3163 err_unmap_bar0:
3164 	iounmap(adapter->regs);
3165 
3166 err_free_adapter:
3167 	kfree(adapter->mbox_log);
3168 	kfree(adapter);
3169 
3170 err_release_regions:
3171 	pci_release_regions(pdev);
3172 	pci_clear_master(pdev);
3173 
3174 err_disable_device:
3175 	pci_disable_device(pdev);
3176 
3177 	return err;
3178 }
3179 
3180 /*
3181  * "Remove" a device: tear down all kernel and driver state created in the
3182  * "probe" routine and quiesce the device (disable interrupts, etc.).  (Note
3183  * that this is called "remove_one" in the PF Driver.)
3184  */
3185 static void cxgb4vf_pci_remove(struct pci_dev *pdev)
3186 {
3187 	struct adapter *adapter = pci_get_drvdata(pdev);
3188 
3189 	/*
3190 	 * Tear down driver state associated with device.
3191 	 */
3192 	if (adapter) {
3193 		int pidx;
3194 
3195 		/*
3196 		 * Stop all of our activity.  Unregister network port,
3197 		 * disable interrupts, etc.
3198 		 */
3199 		for_each_port(adapter, pidx)
3200 			if (test_bit(pidx, &adapter->registered_device_map))
3201 				unregister_netdev(adapter->port[pidx]);
3202 		t4vf_sge_stop(adapter);
3203 		if (adapter->flags & USING_MSIX) {
3204 			pci_disable_msix(adapter->pdev);
3205 			adapter->flags &= ~USING_MSIX;
3206 		} else if (adapter->flags & USING_MSI) {
3207 			pci_disable_msi(adapter->pdev);
3208 			adapter->flags &= ~USING_MSI;
3209 		}
3210 
3211 		/*
3212 		 * Tear down our debugfs entries.
3213 		 */
3214 		if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
3215 			cleanup_debugfs(adapter);
3216 			debugfs_remove_recursive(adapter->debugfs_root);
3217 		}
3218 
3219 		/*
3220 		 * Free all of the various resources which we've acquired ...
3221 		 */
3222 		t4vf_free_sge_resources(adapter);
3223 		for_each_port(adapter, pidx) {
3224 			struct net_device *netdev = adapter->port[pidx];
3225 			struct port_info *pi;
3226 
3227 			if (netdev == NULL)
3228 				continue;
3229 
3230 			pi = netdev_priv(netdev);
3231 			t4vf_free_vi(adapter, pi->viid);
3232 			free_netdev(netdev);
3233 		}
3234 		iounmap(adapter->regs);
3235 		if (!is_t4(adapter->params.chip))
3236 			iounmap(adapter->bar2);
3237 		kfree(adapter->mbox_log);
3238 		kfree(adapter);
3239 	}
3240 
3241 	/*
3242 	 * Disable the device and release its PCI resources.
3243 	 */
3244 	pci_disable_device(pdev);
3245 	pci_clear_master(pdev);
3246 	pci_release_regions(pdev);
3247 }
3248 
3249 /*
3250  * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
3251  * delivery.
3252  */
3253 static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
3254 {
3255 	struct adapter *adapter;
3256 	int pidx;
3257 
3258 	adapter = pci_get_drvdata(pdev);
3259 	if (!adapter)
3260 		return;
3261 
3262 	/* Disable all Virtual Interfaces.  This will shut down the
3263 	 * delivery of all ingress packets into the chip for these
3264 	 * Virtual Interfaces.
3265 	 */
3266 	for_each_port(adapter, pidx)
3267 		if (test_bit(pidx, &adapter->registered_device_map))
3268 			unregister_netdev(adapter->port[pidx]);
3269 
3270 	/* Free up all Queues which will prevent further DMA and
3271 	 * Interrupts allowing various internal pathways to drain.
3272 	 */
3273 	t4vf_sge_stop(adapter);
3274 	if (adapter->flags & USING_MSIX) {
3275 		pci_disable_msix(adapter->pdev);
3276 		adapter->flags &= ~USING_MSIX;
3277 	} else if (adapter->flags & USING_MSI) {
3278 		pci_disable_msi(adapter->pdev);
3279 		adapter->flags &= ~USING_MSI;
3280 	}
3281 
3282 	/*
3283 	 * Free up all Queues which will prevent further DMA and
3284 	 * Interrupts allowing various internal pathways to drain.
3285 	 */
3286 	t4vf_free_sge_resources(adapter);
3287 	pci_set_drvdata(pdev, NULL);
3288 }
3289 
3290 /* Macros needed to support the PCI Device ID Table ...
3291  */
3292 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
3293 	static const struct pci_device_id cxgb4vf_pci_tbl[] = {
3294 #define CH_PCI_DEVICE_ID_FUNCTION	0x8
3295 
3296 #define CH_PCI_ID_TABLE_ENTRY(devid) \
3297 		{ PCI_VDEVICE(CHELSIO, (devid)), 0 }
3298 
3299 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } }
3300 
3301 #include "../cxgb4/t4_pci_id_tbl.h"
3302 
3303 MODULE_DESCRIPTION(DRV_DESC);
3304 MODULE_AUTHOR("Chelsio Communications");
3305 MODULE_LICENSE("Dual BSD/GPL");
3306 MODULE_VERSION(DRV_VERSION);
3307 MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl);
3308 
3309 static struct pci_driver cxgb4vf_driver = {
3310 	.name		= KBUILD_MODNAME,
3311 	.id_table	= cxgb4vf_pci_tbl,
3312 	.probe		= cxgb4vf_pci_probe,
3313 	.remove		= cxgb4vf_pci_remove,
3314 	.shutdown	= cxgb4vf_pci_shutdown,
3315 };
3316 
3317 /*
3318  * Initialize global driver state.
3319  */
3320 static int __init cxgb4vf_module_init(void)
3321 {
3322 	int ret;
3323 
3324 	/*
3325 	 * Vet our module parameters.
3326 	 */
3327 	if (msi != MSI_MSIX && msi != MSI_MSI) {
3328 		pr_warn("bad module parameter msi=%d; must be %d (MSI-X or MSI) or %d (MSI)\n",
3329 			msi, MSI_MSIX, MSI_MSI);
3330 		return -EINVAL;
3331 	}
3332 
3333 	/* Debugfs support is optional, just warn if this fails */
3334 	cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
3335 	if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
3336 		pr_warn("could not create debugfs entry, continuing\n");
3337 
3338 	ret = pci_register_driver(&cxgb4vf_driver);
3339 	if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
3340 		debugfs_remove(cxgb4vf_debugfs_root);
3341 	return ret;
3342 }
3343 
3344 /*
3345  * Tear down global driver state.
3346  */
3347 static void __exit cxgb4vf_module_exit(void)
3348 {
3349 	pci_unregister_driver(&cxgb4vf_driver);
3350 	debugfs_remove(cxgb4vf_debugfs_root);
3351 }
3352 
3353 module_init(cxgb4vf_module_init);
3354 module_exit(cxgb4vf_module_exit);
3355