1 /*
2  * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3  * driver for Linux.
4  *
5  * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 
38 #include <linux/module.h>
39 #include <linux/moduleparam.h>
40 #include <linux/init.h>
41 #include <linux/pci.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/debugfs.h>
46 #include <linux/ethtool.h>
47 #include <linux/mdio.h>
48 
49 #include "t4vf_common.h"
50 #include "t4vf_defs.h"
51 
52 #include "../cxgb4/t4_regs.h"
53 #include "../cxgb4/t4_msg.h"
54 
55 /*
56  * Generic information about the driver.
57  */
58 #define DRV_VERSION "2.0.0-ko"
59 #define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver"
60 
61 /*
62  * Module Parameters.
63  * ==================
64  */
65 
66 /*
67  * Default ethtool "message level" for adapters.
68  */
69 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
70 			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
71 			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
72 
73 /*
74  * The driver uses the best interrupt scheme available on a platform in the
75  * order MSI-X then MSI.  This parameter determines which of these schemes the
76  * driver may consider as follows:
77  *
78  *     msi = 2: choose from among MSI-X and MSI
79  *     msi = 1: only consider MSI interrupts
80  *
81  * Note that unlike the Physical Function driver, this Virtual Function driver
82  * does _not_ support legacy INTx interrupts (this limitation is mandated by
83  * the PCI-E SR-IOV standard).
84  */
85 #define MSI_MSIX	2
86 #define MSI_MSI		1
87 #define MSI_DEFAULT	MSI_MSIX
88 
89 static int msi = MSI_DEFAULT;
90 
91 module_param(msi, int, 0644);
92 MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI");
93 
94 /*
95  * Fundamental constants.
96  * ======================
97  */
98 
99 enum {
100 	MAX_TXQ_ENTRIES		= 16384,
101 	MAX_RSPQ_ENTRIES	= 16384,
102 	MAX_RX_BUFFERS		= 16384,
103 
104 	MIN_TXQ_ENTRIES		= 32,
105 	MIN_RSPQ_ENTRIES	= 128,
106 	MIN_FL_ENTRIES		= 16,
107 
108 	/*
109 	 * For purposes of manipulating the Free List size we need to
110 	 * recognize that Free Lists are actually Egress Queues (the host
111 	 * produces free buffers which the hardware consumes), Egress Queues
112 	 * indices are all in units of Egress Context Units bytes, and free
113 	 * list entries are 64-bit PCI DMA addresses.  And since the state of
114 	 * the Producer Index == the Consumer Index implies an EMPTY list, we
115 	 * always have at least one Egress Unit's worth of Free List entries
116 	 * unused.  See sge.c for more details ...
117 	 */
118 	EQ_UNIT = SGE_EQ_IDXSIZE,
119 	FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
120 	MIN_FL_RESID = FL_PER_EQ_UNIT,
121 };
122 
123 /*
124  * Global driver state.
125  * ====================
126  */
127 
128 static struct dentry *cxgb4vf_debugfs_root;
129 
130 /*
131  * OS "Callback" functions.
132  * ========================
133  */
134 
135 /*
136  * The link status has changed on the indicated "port" (Virtual Interface).
137  */
138 void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
139 {
140 	struct net_device *dev = adapter->port[pidx];
141 
142 	/*
143 	 * If the port is disabled or the current recorded "link up"
144 	 * status matches the new status, just return.
145 	 */
146 	if (!netif_running(dev) || link_ok == netif_carrier_ok(dev))
147 		return;
148 
149 	/*
150 	 * Tell the OS that the link status has changed and print a short
151 	 * informative message on the console about the event.
152 	 */
153 	if (link_ok) {
154 		const char *s;
155 		const char *fc;
156 		const struct port_info *pi = netdev_priv(dev);
157 
158 		netif_carrier_on(dev);
159 
160 		switch (pi->link_cfg.speed) {
161 		case 100:
162 			s = "100Mbps";
163 			break;
164 		case 1000:
165 			s = "1Gbps";
166 			break;
167 		case 10000:
168 			s = "10Gbps";
169 			break;
170 		case 25000:
171 			s = "25Gbps";
172 			break;
173 		case 40000:
174 			s = "40Gbps";
175 			break;
176 		case 100000:
177 			s = "100Gbps";
178 			break;
179 
180 		default:
181 			s = "unknown";
182 			break;
183 		}
184 
185 		switch ((int)pi->link_cfg.fc) {
186 		case PAUSE_RX:
187 			fc = "RX";
188 			break;
189 
190 		case PAUSE_TX:
191 			fc = "TX";
192 			break;
193 
194 		case PAUSE_RX | PAUSE_TX:
195 			fc = "RX/TX";
196 			break;
197 
198 		default:
199 			fc = "no";
200 			break;
201 		}
202 
203 		netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc);
204 	} else {
205 		netif_carrier_off(dev);
206 		netdev_info(dev, "link down\n");
207 	}
208 }
209 
210 /*
211  * THe port module type has changed on the indicated "port" (Virtual
212  * Interface).
213  */
214 void t4vf_os_portmod_changed(struct adapter *adapter, int pidx)
215 {
216 	static const char * const mod_str[] = {
217 		NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
218 	};
219 	const struct net_device *dev = adapter->port[pidx];
220 	const struct port_info *pi = netdev_priv(dev);
221 
222 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
223 		dev_info(adapter->pdev_dev, "%s: port module unplugged\n",
224 			 dev->name);
225 	else if (pi->mod_type < ARRAY_SIZE(mod_str))
226 		dev_info(adapter->pdev_dev, "%s: %s port module inserted\n",
227 			 dev->name, mod_str[pi->mod_type]);
228 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
229 		dev_info(adapter->pdev_dev, "%s: unsupported optical port "
230 			 "module inserted\n", dev->name);
231 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
232 		dev_info(adapter->pdev_dev, "%s: unknown port module inserted,"
233 			 "forcing TWINAX\n", dev->name);
234 	else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
235 		dev_info(adapter->pdev_dev, "%s: transceiver module error\n",
236 			 dev->name);
237 	else
238 		dev_info(adapter->pdev_dev, "%s: unknown module type %d "
239 			 "inserted\n", dev->name, pi->mod_type);
240 }
241 
242 /*
243  * Net device operations.
244  * ======================
245  */
246 
247 
248 
249 
250 /*
251  * Perform the MAC and PHY actions needed to enable a "port" (Virtual
252  * Interface).
253  */
254 static int link_start(struct net_device *dev)
255 {
256 	int ret;
257 	struct port_info *pi = netdev_priv(dev);
258 
259 	/*
260 	 * We do not set address filters and promiscuity here, the stack does
261 	 * that step explicitly. Enable vlan accel.
262 	 */
263 	ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1,
264 			      true);
265 	if (ret == 0) {
266 		ret = t4vf_change_mac(pi->adapter, pi->viid,
267 				      pi->xact_addr_filt, dev->dev_addr, true);
268 		if (ret >= 0) {
269 			pi->xact_addr_filt = ret;
270 			ret = 0;
271 		}
272 	}
273 
274 	/*
275 	 * We don't need to actually "start the link" itself since the
276 	 * firmware will do that for us when the first Virtual Interface
277 	 * is enabled on a port.
278 	 */
279 	if (ret == 0)
280 		ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true);
281 	return ret;
282 }
283 
284 /*
285  * Name the MSI-X interrupts.
286  */
287 static void name_msix_vecs(struct adapter *adapter)
288 {
289 	int namelen = sizeof(adapter->msix_info[0].desc) - 1;
290 	int pidx;
291 
292 	/*
293 	 * Firmware events.
294 	 */
295 	snprintf(adapter->msix_info[MSIX_FW].desc, namelen,
296 		 "%s-FWeventq", adapter->name);
297 	adapter->msix_info[MSIX_FW].desc[namelen] = 0;
298 
299 	/*
300 	 * Ethernet queues.
301 	 */
302 	for_each_port(adapter, pidx) {
303 		struct net_device *dev = adapter->port[pidx];
304 		const struct port_info *pi = netdev_priv(dev);
305 		int qs, msi;
306 
307 		for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
308 			snprintf(adapter->msix_info[msi].desc, namelen,
309 				 "%s-%d", dev->name, qs);
310 			adapter->msix_info[msi].desc[namelen] = 0;
311 		}
312 	}
313 }
314 
315 /*
316  * Request all of our MSI-X resources.
317  */
318 static int request_msix_queue_irqs(struct adapter *adapter)
319 {
320 	struct sge *s = &adapter->sge;
321 	int rxq, msi, err;
322 
323 	/*
324 	 * Firmware events.
325 	 */
326 	err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix,
327 			  0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq);
328 	if (err)
329 		return err;
330 
331 	/*
332 	 * Ethernet queues.
333 	 */
334 	msi = MSIX_IQFLINT;
335 	for_each_ethrxq(s, rxq) {
336 		err = request_irq(adapter->msix_info[msi].vec,
337 				  t4vf_sge_intr_msix, 0,
338 				  adapter->msix_info[msi].desc,
339 				  &s->ethrxq[rxq].rspq);
340 		if (err)
341 			goto err_free_irqs;
342 		msi++;
343 	}
344 	return 0;
345 
346 err_free_irqs:
347 	while (--rxq >= 0)
348 		free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
349 	free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
350 	return err;
351 }
352 
353 /*
354  * Free our MSI-X resources.
355  */
356 static void free_msix_queue_irqs(struct adapter *adapter)
357 {
358 	struct sge *s = &adapter->sge;
359 	int rxq, msi;
360 
361 	free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
362 	msi = MSIX_IQFLINT;
363 	for_each_ethrxq(s, rxq)
364 		free_irq(adapter->msix_info[msi++].vec,
365 			 &s->ethrxq[rxq].rspq);
366 }
367 
368 /*
369  * Turn on NAPI and start up interrupts on a response queue.
370  */
371 static void qenable(struct sge_rspq *rspq)
372 {
373 	napi_enable(&rspq->napi);
374 
375 	/*
376 	 * 0-increment the Going To Sleep register to start the timer and
377 	 * enable interrupts.
378 	 */
379 	t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
380 		     CIDXINC_V(0) |
381 		     SEINTARM_V(rspq->intr_params) |
382 		     INGRESSQID_V(rspq->cntxt_id));
383 }
384 
385 /*
386  * Enable NAPI scheduling and interrupt generation for all Receive Queues.
387  */
388 static void enable_rx(struct adapter *adapter)
389 {
390 	int rxq;
391 	struct sge *s = &adapter->sge;
392 
393 	for_each_ethrxq(s, rxq)
394 		qenable(&s->ethrxq[rxq].rspq);
395 	qenable(&s->fw_evtq);
396 
397 	/*
398 	 * The interrupt queue doesn't use NAPI so we do the 0-increment of
399 	 * its Going To Sleep register here to get it started.
400 	 */
401 	if (adapter->flags & USING_MSI)
402 		t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
403 			     CIDXINC_V(0) |
404 			     SEINTARM_V(s->intrq.intr_params) |
405 			     INGRESSQID_V(s->intrq.cntxt_id));
406 
407 }
408 
409 /*
410  * Wait until all NAPI handlers are descheduled.
411  */
412 static void quiesce_rx(struct adapter *adapter)
413 {
414 	struct sge *s = &adapter->sge;
415 	int rxq;
416 
417 	for_each_ethrxq(s, rxq)
418 		napi_disable(&s->ethrxq[rxq].rspq.napi);
419 	napi_disable(&s->fw_evtq.napi);
420 }
421 
422 /*
423  * Response queue handler for the firmware event queue.
424  */
425 static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
426 			  const struct pkt_gl *gl)
427 {
428 	/*
429 	 * Extract response opcode and get pointer to CPL message body.
430 	 */
431 	struct adapter *adapter = rspq->adapter;
432 	u8 opcode = ((const struct rss_header *)rsp)->opcode;
433 	void *cpl = (void *)(rsp + 1);
434 
435 	switch (opcode) {
436 	case CPL_FW6_MSG: {
437 		/*
438 		 * We've received an asynchronous message from the firmware.
439 		 */
440 		const struct cpl_fw6_msg *fw_msg = cpl;
441 		if (fw_msg->type == FW6_TYPE_CMD_RPL)
442 			t4vf_handle_fw_rpl(adapter, fw_msg->data);
443 		break;
444 	}
445 
446 	case CPL_FW4_MSG: {
447 		/* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
448 		 */
449 		const struct cpl_sge_egr_update *p = (void *)(rsp + 3);
450 		opcode = CPL_OPCODE_G(ntohl(p->opcode_qid));
451 		if (opcode != CPL_SGE_EGR_UPDATE) {
452 			dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
453 				, opcode);
454 			break;
455 		}
456 		cpl = (void *)p;
457 		/*FALLTHROUGH*/
458 	}
459 
460 	case CPL_SGE_EGR_UPDATE: {
461 		/*
462 		 * We've received an Egress Queue Status Update message.  We
463 		 * get these, if the SGE is configured to send these when the
464 		 * firmware passes certain points in processing our TX
465 		 * Ethernet Queue or if we make an explicit request for one.
466 		 * We use these updates to determine when we may need to
467 		 * restart a TX Ethernet Queue which was stopped for lack of
468 		 * free TX Queue Descriptors ...
469 		 */
470 		const struct cpl_sge_egr_update *p = cpl;
471 		unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid));
472 		struct sge *s = &adapter->sge;
473 		struct sge_txq *tq;
474 		struct sge_eth_txq *txq;
475 		unsigned int eq_idx;
476 
477 		/*
478 		 * Perform sanity checking on the Queue ID to make sure it
479 		 * really refers to one of our TX Ethernet Egress Queues which
480 		 * is active and matches the queue's ID.  None of these error
481 		 * conditions should ever happen so we may want to either make
482 		 * them fatal and/or conditionalized under DEBUG.
483 		 */
484 		eq_idx = EQ_IDX(s, qid);
485 		if (unlikely(eq_idx >= MAX_EGRQ)) {
486 			dev_err(adapter->pdev_dev,
487 				"Egress Update QID %d out of range\n", qid);
488 			break;
489 		}
490 		tq = s->egr_map[eq_idx];
491 		if (unlikely(tq == NULL)) {
492 			dev_err(adapter->pdev_dev,
493 				"Egress Update QID %d TXQ=NULL\n", qid);
494 			break;
495 		}
496 		txq = container_of(tq, struct sge_eth_txq, q);
497 		if (unlikely(tq->abs_id != qid)) {
498 			dev_err(adapter->pdev_dev,
499 				"Egress Update QID %d refers to TXQ %d\n",
500 				qid, tq->abs_id);
501 			break;
502 		}
503 
504 		/*
505 		 * Restart a stopped TX Queue which has less than half of its
506 		 * TX ring in use ...
507 		 */
508 		txq->q.restarts++;
509 		netif_tx_wake_queue(txq->txq);
510 		break;
511 	}
512 
513 	default:
514 		dev_err(adapter->pdev_dev,
515 			"unexpected CPL %#x on FW event queue\n", opcode);
516 	}
517 
518 	return 0;
519 }
520 
521 /*
522  * Allocate SGE TX/RX response queues.  Determine how many sets of SGE queues
523  * to use and initializes them.  We support multiple "Queue Sets" per port if
524  * we have MSI-X, otherwise just one queue set per port.
525  */
526 static int setup_sge_queues(struct adapter *adapter)
527 {
528 	struct sge *s = &adapter->sge;
529 	int err, pidx, msix;
530 
531 	/*
532 	 * Clear "Queue Set" Free List Starving and TX Queue Mapping Error
533 	 * state.
534 	 */
535 	bitmap_zero(s->starving_fl, MAX_EGRQ);
536 
537 	/*
538 	 * If we're using MSI interrupt mode we need to set up a "forwarded
539 	 * interrupt" queue which we'll set up with our MSI vector.  The rest
540 	 * of the ingress queues will be set up to forward their interrupts to
541 	 * this queue ...  This must be first since t4vf_sge_alloc_rxq() uses
542 	 * the intrq's queue ID as the interrupt forwarding queue for the
543 	 * subsequent calls ...
544 	 */
545 	if (adapter->flags & USING_MSI) {
546 		err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false,
547 					 adapter->port[0], 0, NULL, NULL);
548 		if (err)
549 			goto err_free_queues;
550 	}
551 
552 	/*
553 	 * Allocate our ingress queue for asynchronous firmware messages.
554 	 */
555 	err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0],
556 				 MSIX_FW, NULL, fwevtq_handler);
557 	if (err)
558 		goto err_free_queues;
559 
560 	/*
561 	 * Allocate each "port"'s initial Queue Sets.  These can be changed
562 	 * later on ... up to the point where any interface on the adapter is
563 	 * brought up at which point lots of things get nailed down
564 	 * permanently ...
565 	 */
566 	msix = MSIX_IQFLINT;
567 	for_each_port(adapter, pidx) {
568 		struct net_device *dev = adapter->port[pidx];
569 		struct port_info *pi = netdev_priv(dev);
570 		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
571 		struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
572 		int qs;
573 
574 		for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
575 			err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false,
576 						 dev, msix++,
577 						 &rxq->fl, t4vf_ethrx_handler);
578 			if (err)
579 				goto err_free_queues;
580 
581 			err = t4vf_sge_alloc_eth_txq(adapter, txq, dev,
582 					     netdev_get_tx_queue(dev, qs),
583 					     s->fw_evtq.cntxt_id);
584 			if (err)
585 				goto err_free_queues;
586 
587 			rxq->rspq.idx = qs;
588 			memset(&rxq->stats, 0, sizeof(rxq->stats));
589 		}
590 	}
591 
592 	/*
593 	 * Create the reverse mappings for the queues.
594 	 */
595 	s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id;
596 	s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id;
597 	IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq;
598 	for_each_port(adapter, pidx) {
599 		struct net_device *dev = adapter->port[pidx];
600 		struct port_info *pi = netdev_priv(dev);
601 		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
602 		struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
603 		int qs;
604 
605 		for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
606 			IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq;
607 			EQ_MAP(s, txq->q.abs_id) = &txq->q;
608 
609 			/*
610 			 * The FW_IQ_CMD doesn't return the Absolute Queue IDs
611 			 * for Free Lists but since all of the Egress Queues
612 			 * (including Free Lists) have Relative Queue IDs
613 			 * which are computed as Absolute - Base Queue ID, we
614 			 * can synthesize the Absolute Queue IDs for the Free
615 			 * Lists.  This is useful for debugging purposes when
616 			 * we want to dump Queue Contexts via the PF Driver.
617 			 */
618 			rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base;
619 			EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl;
620 		}
621 	}
622 	return 0;
623 
624 err_free_queues:
625 	t4vf_free_sge_resources(adapter);
626 	return err;
627 }
628 
629 /*
630  * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive
631  * queues.  We configure the RSS CPU lookup table to distribute to the number
632  * of HW receive queues, and the response queue lookup table to narrow that
633  * down to the response queues actually configured for each "port" (Virtual
634  * Interface).  We always configure the RSS mapping for all ports since the
635  * mapping table has plenty of entries.
636  */
637 static int setup_rss(struct adapter *adapter)
638 {
639 	int pidx;
640 
641 	for_each_port(adapter, pidx) {
642 		struct port_info *pi = adap2pinfo(adapter, pidx);
643 		struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
644 		u16 rss[MAX_PORT_QSETS];
645 		int qs, err;
646 
647 		for (qs = 0; qs < pi->nqsets; qs++)
648 			rss[qs] = rxq[qs].rspq.abs_id;
649 
650 		err = t4vf_config_rss_range(adapter, pi->viid,
651 					    0, pi->rss_size, rss, pi->nqsets);
652 		if (err)
653 			return err;
654 
655 		/*
656 		 * Perform Global RSS Mode-specific initialization.
657 		 */
658 		switch (adapter->params.rss.mode) {
659 		case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL:
660 			/*
661 			 * If Tunnel All Lookup isn't specified in the global
662 			 * RSS Configuration, then we need to specify a
663 			 * default Ingress Queue for any ingress packets which
664 			 * aren't hashed.  We'll use our first ingress queue
665 			 * ...
666 			 */
667 			if (!adapter->params.rss.u.basicvirtual.tnlalllookup) {
668 				union rss_vi_config config;
669 				err = t4vf_read_rss_vi_config(adapter,
670 							      pi->viid,
671 							      &config);
672 				if (err)
673 					return err;
674 				config.basicvirtual.defaultq =
675 					rxq[0].rspq.abs_id;
676 				err = t4vf_write_rss_vi_config(adapter,
677 							       pi->viid,
678 							       &config);
679 				if (err)
680 					return err;
681 			}
682 			break;
683 		}
684 	}
685 
686 	return 0;
687 }
688 
689 /*
690  * Bring the adapter up.  Called whenever we go from no "ports" open to having
691  * one open.  This function performs the actions necessary to make an adapter
692  * operational, such as completing the initialization of HW modules, and
693  * enabling interrupts.  Must be called with the rtnl lock held.  (Note that
694  * this is called "cxgb_up" in the PF Driver.)
695  */
696 static int adapter_up(struct adapter *adapter)
697 {
698 	int err;
699 
700 	/*
701 	 * If this is the first time we've been called, perform basic
702 	 * adapter setup.  Once we've done this, many of our adapter
703 	 * parameters can no longer be changed ...
704 	 */
705 	if ((adapter->flags & FULL_INIT_DONE) == 0) {
706 		err = setup_sge_queues(adapter);
707 		if (err)
708 			return err;
709 		err = setup_rss(adapter);
710 		if (err) {
711 			t4vf_free_sge_resources(adapter);
712 			return err;
713 		}
714 
715 		if (adapter->flags & USING_MSIX)
716 			name_msix_vecs(adapter);
717 		adapter->flags |= FULL_INIT_DONE;
718 	}
719 
720 	/*
721 	 * Acquire our interrupt resources.  We only support MSI-X and MSI.
722 	 */
723 	BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
724 	if (adapter->flags & USING_MSIX)
725 		err = request_msix_queue_irqs(adapter);
726 	else
727 		err = request_irq(adapter->pdev->irq,
728 				  t4vf_intr_handler(adapter), 0,
729 				  adapter->name, adapter);
730 	if (err) {
731 		dev_err(adapter->pdev_dev, "request_irq failed, err %d\n",
732 			err);
733 		return err;
734 	}
735 
736 	/*
737 	 * Enable NAPI ingress processing and return success.
738 	 */
739 	enable_rx(adapter);
740 	t4vf_sge_start(adapter);
741 
742 	/* Initialize hash mac addr list*/
743 	INIT_LIST_HEAD(&adapter->mac_hlist);
744 	return 0;
745 }
746 
747 /*
748  * Bring the adapter down.  Called whenever the last "port" (Virtual
749  * Interface) closed.  (Note that this routine is called "cxgb_down" in the PF
750  * Driver.)
751  */
752 static void adapter_down(struct adapter *adapter)
753 {
754 	/*
755 	 * Free interrupt resources.
756 	 */
757 	if (adapter->flags & USING_MSIX)
758 		free_msix_queue_irqs(adapter);
759 	else
760 		free_irq(adapter->pdev->irq, adapter);
761 
762 	/*
763 	 * Wait for NAPI handlers to finish.
764 	 */
765 	quiesce_rx(adapter);
766 }
767 
768 /*
769  * Start up a net device.
770  */
771 static int cxgb4vf_open(struct net_device *dev)
772 {
773 	int err;
774 	struct port_info *pi = netdev_priv(dev);
775 	struct adapter *adapter = pi->adapter;
776 
777 	/*
778 	 * If this is the first interface that we're opening on the "adapter",
779 	 * bring the "adapter" up now.
780 	 */
781 	if (adapter->open_device_map == 0) {
782 		err = adapter_up(adapter);
783 		if (err)
784 			return err;
785 	}
786 
787 	/*
788 	 * Note that this interface is up and start everything up ...
789 	 */
790 	err = link_start(dev);
791 	if (err)
792 		goto err_unwind;
793 
794 	pi->vlan_id = t4vf_get_vf_vlan_acl(adapter);
795 
796 	netif_tx_start_all_queues(dev);
797 	set_bit(pi->port_id, &adapter->open_device_map);
798 	return 0;
799 
800 err_unwind:
801 	if (adapter->open_device_map == 0)
802 		adapter_down(adapter);
803 	return err;
804 }
805 
806 /*
807  * Shut down a net device.  This routine is called "cxgb_close" in the PF
808  * Driver ...
809  */
810 static int cxgb4vf_stop(struct net_device *dev)
811 {
812 	struct port_info *pi = netdev_priv(dev);
813 	struct adapter *adapter = pi->adapter;
814 
815 	netif_tx_stop_all_queues(dev);
816 	netif_carrier_off(dev);
817 	t4vf_enable_vi(adapter, pi->viid, false, false);
818 	pi->link_cfg.link_ok = 0;
819 
820 	clear_bit(pi->port_id, &adapter->open_device_map);
821 	if (adapter->open_device_map == 0)
822 		adapter_down(adapter);
823 	return 0;
824 }
825 
826 /*
827  * Translate our basic statistics into the standard "ifconfig" statistics.
828  */
829 static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
830 {
831 	struct t4vf_port_stats stats;
832 	struct port_info *pi = netdev2pinfo(dev);
833 	struct adapter *adapter = pi->adapter;
834 	struct net_device_stats *ns = &dev->stats;
835 	int err;
836 
837 	spin_lock(&adapter->stats_lock);
838 	err = t4vf_get_port_stats(adapter, pi->pidx, &stats);
839 	spin_unlock(&adapter->stats_lock);
840 
841 	memset(ns, 0, sizeof(*ns));
842 	if (err)
843 		return ns;
844 
845 	ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes +
846 			stats.tx_ucast_bytes + stats.tx_offload_bytes);
847 	ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames +
848 			  stats.tx_ucast_frames + stats.tx_offload_frames);
849 	ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes +
850 			stats.rx_ucast_bytes);
851 	ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames +
852 			  stats.rx_ucast_frames);
853 	ns->multicast = stats.rx_mcast_frames;
854 	ns->tx_errors = stats.tx_drop_frames;
855 	ns->rx_errors = stats.rx_err_frames;
856 
857 	return ns;
858 }
859 
860 static inline int cxgb4vf_set_addr_hash(struct port_info *pi)
861 {
862 	struct adapter *adapter = pi->adapter;
863 	u64 vec = 0;
864 	bool ucast = false;
865 	struct hash_mac_addr *entry;
866 
867 	/* Calculate the hash vector for the updated list and program it */
868 	list_for_each_entry(entry, &adapter->mac_hlist, list) {
869 		ucast |= is_unicast_ether_addr(entry->addr);
870 		vec |= (1ULL << hash_mac_addr(entry->addr));
871 	}
872 	return t4vf_set_addr_hash(adapter, pi->viid, ucast, vec, false);
873 }
874 
875 static int cxgb4vf_mac_sync(struct net_device *netdev, const u8 *mac_addr)
876 {
877 	struct port_info *pi = netdev_priv(netdev);
878 	struct adapter *adapter = pi->adapter;
879 	int ret;
880 	u64 mhash = 0;
881 	u64 uhash = 0;
882 	bool free = false;
883 	bool ucast = is_unicast_ether_addr(mac_addr);
884 	const u8 *maclist[1] = {mac_addr};
885 	struct hash_mac_addr *new_entry;
886 
887 	ret = t4vf_alloc_mac_filt(adapter, pi->viid, free, 1, maclist,
888 				  NULL, ucast ? &uhash : &mhash, false);
889 	if (ret < 0)
890 		goto out;
891 	/* if hash != 0, then add the addr to hash addr list
892 	 * so on the end we will calculate the hash for the
893 	 * list and program it
894 	 */
895 	if (uhash || mhash) {
896 		new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
897 		if (!new_entry)
898 			return -ENOMEM;
899 		ether_addr_copy(new_entry->addr, mac_addr);
900 		list_add_tail(&new_entry->list, &adapter->mac_hlist);
901 		ret = cxgb4vf_set_addr_hash(pi);
902 	}
903 out:
904 	return ret < 0 ? ret : 0;
905 }
906 
907 static int cxgb4vf_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
908 {
909 	struct port_info *pi = netdev_priv(netdev);
910 	struct adapter *adapter = pi->adapter;
911 	int ret;
912 	const u8 *maclist[1] = {mac_addr};
913 	struct hash_mac_addr *entry, *tmp;
914 
915 	/* If the MAC address to be removed is in the hash addr
916 	 * list, delete it from the list and update hash vector
917 	 */
918 	list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) {
919 		if (ether_addr_equal(entry->addr, mac_addr)) {
920 			list_del(&entry->list);
921 			kfree(entry);
922 			return cxgb4vf_set_addr_hash(pi);
923 		}
924 	}
925 
926 	ret = t4vf_free_mac_filt(adapter, pi->viid, 1, maclist, false);
927 	return ret < 0 ? -EINVAL : 0;
928 }
929 
930 /*
931  * Set RX properties of a port, such as promiscruity, address filters, and MTU.
932  * If @mtu is -1 it is left unchanged.
933  */
934 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
935 {
936 	struct port_info *pi = netdev_priv(dev);
937 
938 	__dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
939 	__dev_mc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
940 	return t4vf_set_rxmode(pi->adapter, pi->viid, -1,
941 			       (dev->flags & IFF_PROMISC) != 0,
942 			       (dev->flags & IFF_ALLMULTI) != 0,
943 			       1, -1, sleep_ok);
944 }
945 
946 /*
947  * Set the current receive modes on the device.
948  */
949 static void cxgb4vf_set_rxmode(struct net_device *dev)
950 {
951 	/* unfortunately we can't return errors to the stack */
952 	set_rxmode(dev, -1, false);
953 }
954 
955 /*
956  * Find the entry in the interrupt holdoff timer value array which comes
957  * closest to the specified interrupt holdoff value.
958  */
959 static int closest_timer(const struct sge *s, int us)
960 {
961 	int i, timer_idx = 0, min_delta = INT_MAX;
962 
963 	for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
964 		int delta = us - s->timer_val[i];
965 		if (delta < 0)
966 			delta = -delta;
967 		if (delta < min_delta) {
968 			min_delta = delta;
969 			timer_idx = i;
970 		}
971 	}
972 	return timer_idx;
973 }
974 
975 static int closest_thres(const struct sge *s, int thres)
976 {
977 	int i, delta, pktcnt_idx = 0, min_delta = INT_MAX;
978 
979 	for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
980 		delta = thres - s->counter_val[i];
981 		if (delta < 0)
982 			delta = -delta;
983 		if (delta < min_delta) {
984 			min_delta = delta;
985 			pktcnt_idx = i;
986 		}
987 	}
988 	return pktcnt_idx;
989 }
990 
991 /*
992  * Return a queue's interrupt hold-off time in us.  0 means no timer.
993  */
994 static unsigned int qtimer_val(const struct adapter *adapter,
995 			       const struct sge_rspq *rspq)
996 {
997 	unsigned int timer_idx = QINTR_TIMER_IDX_G(rspq->intr_params);
998 
999 	return timer_idx < SGE_NTIMERS
1000 		? adapter->sge.timer_val[timer_idx]
1001 		: 0;
1002 }
1003 
1004 /**
1005  *	set_rxq_intr_params - set a queue's interrupt holdoff parameters
1006  *	@adapter: the adapter
1007  *	@rspq: the RX response queue
1008  *	@us: the hold-off time in us, or 0 to disable timer
1009  *	@cnt: the hold-off packet count, or 0 to disable counter
1010  *
1011  *	Sets an RX response queue's interrupt hold-off time and packet count.
1012  *	At least one of the two needs to be enabled for the queue to generate
1013  *	interrupts.
1014  */
1015 static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
1016 			       unsigned int us, unsigned int cnt)
1017 {
1018 	unsigned int timer_idx;
1019 
1020 	/*
1021 	 * If both the interrupt holdoff timer and count are specified as
1022 	 * zero, default to a holdoff count of 1 ...
1023 	 */
1024 	if ((us | cnt) == 0)
1025 		cnt = 1;
1026 
1027 	/*
1028 	 * If an interrupt holdoff count has been specified, then find the
1029 	 * closest configured holdoff count and use that.  If the response
1030 	 * queue has already been created, then update its queue context
1031 	 * parameters ...
1032 	 */
1033 	if (cnt) {
1034 		int err;
1035 		u32 v, pktcnt_idx;
1036 
1037 		pktcnt_idx = closest_thres(&adapter->sge, cnt);
1038 		if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) {
1039 			v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1040 			    FW_PARAMS_PARAM_X_V(
1041 					FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1042 			    FW_PARAMS_PARAM_YZ_V(rspq->cntxt_id);
1043 			err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx);
1044 			if (err)
1045 				return err;
1046 		}
1047 		rspq->pktcnt_idx = pktcnt_idx;
1048 	}
1049 
1050 	/*
1051 	 * Compute the closest holdoff timer index from the supplied holdoff
1052 	 * timer value.
1053 	 */
1054 	timer_idx = (us == 0
1055 		     ? SGE_TIMER_RSTRT_CNTR
1056 		     : closest_timer(&adapter->sge, us));
1057 
1058 	/*
1059 	 * Update the response queue's interrupt coalescing parameters and
1060 	 * return success.
1061 	 */
1062 	rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
1063 			     QINTR_CNT_EN_V(cnt > 0));
1064 	return 0;
1065 }
1066 
1067 /*
1068  * Return a version number to identify the type of adapter.  The scheme is:
1069  * - bits 0..9: chip version
1070  * - bits 10..15: chip revision
1071  */
1072 static inline unsigned int mk_adap_vers(const struct adapter *adapter)
1073 {
1074 	/*
1075 	 * Chip version 4, revision 0x3f (cxgb4vf).
1076 	 */
1077 	return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10);
1078 }
1079 
1080 /*
1081  * Execute the specified ioctl command.
1082  */
1083 static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1084 {
1085 	int ret = 0;
1086 
1087 	switch (cmd) {
1088 	    /*
1089 	     * The VF Driver doesn't have access to any of the other
1090 	     * common Ethernet device ioctl()'s (like reading/writing
1091 	     * PHY registers, etc.
1092 	     */
1093 
1094 	default:
1095 		ret = -EOPNOTSUPP;
1096 		break;
1097 	}
1098 	return ret;
1099 }
1100 
1101 /*
1102  * Change the device's MTU.
1103  */
1104 static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
1105 {
1106 	int ret;
1107 	struct port_info *pi = netdev_priv(dev);
1108 
1109 	ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
1110 			      -1, -1, -1, -1, true);
1111 	if (!ret)
1112 		dev->mtu = new_mtu;
1113 	return ret;
1114 }
1115 
1116 static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
1117 	netdev_features_t features)
1118 {
1119 	/*
1120 	 * Since there is no support for separate rx/tx vlan accel
1121 	 * enable/disable make sure tx flag is always in same state as rx.
1122 	 */
1123 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
1124 		features |= NETIF_F_HW_VLAN_CTAG_TX;
1125 	else
1126 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
1127 
1128 	return features;
1129 }
1130 
1131 static int cxgb4vf_set_features(struct net_device *dev,
1132 	netdev_features_t features)
1133 {
1134 	struct port_info *pi = netdev_priv(dev);
1135 	netdev_features_t changed = dev->features ^ features;
1136 
1137 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1138 		t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
1139 				features & NETIF_F_HW_VLAN_CTAG_TX, 0);
1140 
1141 	return 0;
1142 }
1143 
1144 /*
1145  * Change the devices MAC address.
1146  */
1147 static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
1148 {
1149 	int ret;
1150 	struct sockaddr *addr = _addr;
1151 	struct port_info *pi = netdev_priv(dev);
1152 
1153 	if (!is_valid_ether_addr(addr->sa_data))
1154 		return -EADDRNOTAVAIL;
1155 
1156 	ret = t4vf_change_mac(pi->adapter, pi->viid, pi->xact_addr_filt,
1157 			      addr->sa_data, true);
1158 	if (ret < 0)
1159 		return ret;
1160 
1161 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1162 	pi->xact_addr_filt = ret;
1163 	return 0;
1164 }
1165 
1166 #ifdef CONFIG_NET_POLL_CONTROLLER
1167 /*
1168  * Poll all of our receive queues.  This is called outside of normal interrupt
1169  * context.
1170  */
1171 static void cxgb4vf_poll_controller(struct net_device *dev)
1172 {
1173 	struct port_info *pi = netdev_priv(dev);
1174 	struct adapter *adapter = pi->adapter;
1175 
1176 	if (adapter->flags & USING_MSIX) {
1177 		struct sge_eth_rxq *rxq;
1178 		int nqsets;
1179 
1180 		rxq = &adapter->sge.ethrxq[pi->first_qset];
1181 		for (nqsets = pi->nqsets; nqsets; nqsets--) {
1182 			t4vf_sge_intr_msix(0, &rxq->rspq);
1183 			rxq++;
1184 		}
1185 	} else
1186 		t4vf_intr_handler(adapter)(0, adapter);
1187 }
1188 #endif
1189 
1190 /*
1191  * Ethtool operations.
1192  * ===================
1193  *
1194  * Note that we don't support any ethtool operations which change the physical
1195  * state of the port to which we're linked.
1196  */
1197 
1198 /**
1199  *	from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool
1200  *	@port_type: Firmware Port Type
1201  *	@mod_type: Firmware Module Type
1202  *
1203  *	Translate Firmware Port/Module type to Ethtool Port Type.
1204  */
1205 static int from_fw_port_mod_type(enum fw_port_type port_type,
1206 				 enum fw_port_module_type mod_type)
1207 {
1208 	if (port_type == FW_PORT_TYPE_BT_SGMII ||
1209 	    port_type == FW_PORT_TYPE_BT_XFI ||
1210 	    port_type == FW_PORT_TYPE_BT_XAUI) {
1211 		return PORT_TP;
1212 	} else if (port_type == FW_PORT_TYPE_FIBER_XFI ||
1213 		   port_type == FW_PORT_TYPE_FIBER_XAUI) {
1214 		return PORT_FIBRE;
1215 	} else if (port_type == FW_PORT_TYPE_SFP ||
1216 		   port_type == FW_PORT_TYPE_QSFP_10G ||
1217 		   port_type == FW_PORT_TYPE_QSA ||
1218 		   port_type == FW_PORT_TYPE_QSFP ||
1219 		   port_type == FW_PORT_TYPE_CR4_QSFP ||
1220 		   port_type == FW_PORT_TYPE_CR_QSFP ||
1221 		   port_type == FW_PORT_TYPE_CR2_QSFP ||
1222 		   port_type == FW_PORT_TYPE_SFP28) {
1223 		if (mod_type == FW_PORT_MOD_TYPE_LR ||
1224 		    mod_type == FW_PORT_MOD_TYPE_SR ||
1225 		    mod_type == FW_PORT_MOD_TYPE_ER ||
1226 		    mod_type == FW_PORT_MOD_TYPE_LRM)
1227 			return PORT_FIBRE;
1228 		else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1229 			 mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1230 			return PORT_DA;
1231 		else
1232 			return PORT_OTHER;
1233 	} else if (port_type == FW_PORT_TYPE_KR4_100G ||
1234 		   port_type == FW_PORT_TYPE_KR_SFP28 ||
1235 		   port_type == FW_PORT_TYPE_KR_XLAUI) {
1236 		return PORT_NONE;
1237 	}
1238 
1239 	return PORT_OTHER;
1240 }
1241 
1242 /**
1243  *	fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask
1244  *	@port_type: Firmware Port Type
1245  *	@fw_caps: Firmware Port Capabilities
1246  *	@link_mode_mask: ethtool Link Mode Mask
1247  *
1248  *	Translate a Firmware Port Capabilities specification to an ethtool
1249  *	Link Mode Mask.
1250  */
1251 static void fw_caps_to_lmm(enum fw_port_type port_type,
1252 			   unsigned int fw_caps,
1253 			   unsigned long *link_mode_mask)
1254 {
1255 	#define SET_LMM(__lmm_name) \
1256 		__set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
1257 			  link_mode_mask)
1258 
1259 	#define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
1260 		do { \
1261 			if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
1262 				SET_LMM(__lmm_name); \
1263 		} while (0)
1264 
1265 	switch (port_type) {
1266 	case FW_PORT_TYPE_BT_SGMII:
1267 	case FW_PORT_TYPE_BT_XFI:
1268 	case FW_PORT_TYPE_BT_XAUI:
1269 		SET_LMM(TP);
1270 		FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full);
1271 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1272 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
1273 		break;
1274 
1275 	case FW_PORT_TYPE_KX4:
1276 	case FW_PORT_TYPE_KX:
1277 		SET_LMM(Backplane);
1278 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
1279 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
1280 		break;
1281 
1282 	case FW_PORT_TYPE_KR:
1283 		SET_LMM(Backplane);
1284 		SET_LMM(10000baseKR_Full);
1285 		break;
1286 
1287 	case FW_PORT_TYPE_BP_AP:
1288 		SET_LMM(Backplane);
1289 		SET_LMM(10000baseR_FEC);
1290 		SET_LMM(10000baseKR_Full);
1291 		SET_LMM(1000baseKX_Full);
1292 		break;
1293 
1294 	case FW_PORT_TYPE_BP4_AP:
1295 		SET_LMM(Backplane);
1296 		SET_LMM(10000baseR_FEC);
1297 		SET_LMM(10000baseKR_Full);
1298 		SET_LMM(1000baseKX_Full);
1299 		SET_LMM(10000baseKX4_Full);
1300 		break;
1301 
1302 	case FW_PORT_TYPE_FIBER_XFI:
1303 	case FW_PORT_TYPE_FIBER_XAUI:
1304 	case FW_PORT_TYPE_SFP:
1305 	case FW_PORT_TYPE_QSFP_10G:
1306 	case FW_PORT_TYPE_QSA:
1307 		SET_LMM(FIBRE);
1308 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1309 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
1310 		break;
1311 
1312 	case FW_PORT_TYPE_BP40_BA:
1313 	case FW_PORT_TYPE_QSFP:
1314 		SET_LMM(FIBRE);
1315 		SET_LMM(40000baseSR4_Full);
1316 		break;
1317 
1318 	case FW_PORT_TYPE_CR_QSFP:
1319 	case FW_PORT_TYPE_SFP28:
1320 		SET_LMM(FIBRE);
1321 		SET_LMM(25000baseCR_Full);
1322 		break;
1323 
1324 	case FW_PORT_TYPE_KR_SFP28:
1325 		SET_LMM(Backplane);
1326 		SET_LMM(25000baseKR_Full);
1327 		break;
1328 
1329 	case FW_PORT_TYPE_KR_XLAUI:
1330 		SET_LMM(Backplane);
1331 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
1332 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
1333 		FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full);
1334 		break;
1335 
1336 	case FW_PORT_TYPE_CR2_QSFP:
1337 		SET_LMM(FIBRE);
1338 		SET_LMM(50000baseSR2_Full);
1339 		break;
1340 
1341 	case FW_PORT_TYPE_KR4_100G:
1342 	case FW_PORT_TYPE_CR4_QSFP:
1343 		SET_LMM(FIBRE);
1344 		SET_LMM(100000baseCR4_Full);
1345 		break;
1346 
1347 	default:
1348 		break;
1349 	}
1350 
1351 	FW_CAPS_TO_LMM(ANEG, Autoneg);
1352 	FW_CAPS_TO_LMM(802_3_PAUSE, Pause);
1353 	FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause);
1354 
1355 	#undef FW_CAPS_TO_LMM
1356 	#undef SET_LMM
1357 }
1358 
1359 static int cxgb4vf_get_link_ksettings(struct net_device *dev,
1360 				  struct ethtool_link_ksettings *link_ksettings)
1361 {
1362 	struct port_info *pi = netdev_priv(dev);
1363 	struct ethtool_link_settings *base = &link_ksettings->base;
1364 
1365 	/* For the nonce, the Firmware doesn't send up Port State changes
1366 	 * when the Virtual Interface attached to the Port is down.  So
1367 	 * if it's down, let's grab any changes.
1368 	 */
1369 	if (!netif_running(dev))
1370 		(void)t4vf_update_port_info(pi);
1371 
1372 	ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
1373 	ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
1374 	ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
1375 
1376 	base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type);
1377 
1378 	if (pi->mdio_addr >= 0) {
1379 		base->phy_address = pi->mdio_addr;
1380 		base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII
1381 				      ? ETH_MDIO_SUPPORTS_C22
1382 				      : ETH_MDIO_SUPPORTS_C45);
1383 	} else {
1384 		base->phy_address = 255;
1385 		base->mdio_support = 0;
1386 	}
1387 
1388 	fw_caps_to_lmm(pi->port_type, pi->link_cfg.pcaps,
1389 		       link_ksettings->link_modes.supported);
1390 	fw_caps_to_lmm(pi->port_type, pi->link_cfg.acaps,
1391 		       link_ksettings->link_modes.advertising);
1392 	fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
1393 		       link_ksettings->link_modes.lp_advertising);
1394 
1395 	if (netif_carrier_ok(dev)) {
1396 		base->speed = pi->link_cfg.speed;
1397 		base->duplex = DUPLEX_FULL;
1398 	} else {
1399 		base->speed = SPEED_UNKNOWN;
1400 		base->duplex = DUPLEX_UNKNOWN;
1401 	}
1402 
1403 	base->autoneg = pi->link_cfg.autoneg;
1404 	if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)
1405 		ethtool_link_ksettings_add_link_mode(link_ksettings,
1406 						     supported, Autoneg);
1407 	if (pi->link_cfg.autoneg)
1408 		ethtool_link_ksettings_add_link_mode(link_ksettings,
1409 						     advertising, Autoneg);
1410 
1411 	return 0;
1412 }
1413 
1414 /* Translate the Firmware FEC value into the ethtool value. */
1415 static inline unsigned int fwcap_to_eth_fec(unsigned int fw_fec)
1416 {
1417 	unsigned int eth_fec = 0;
1418 
1419 	if (fw_fec & FW_PORT_CAP32_FEC_RS)
1420 		eth_fec |= ETHTOOL_FEC_RS;
1421 	if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
1422 		eth_fec |= ETHTOOL_FEC_BASER;
1423 
1424 	/* if nothing is set, then FEC is off */
1425 	if (!eth_fec)
1426 		eth_fec = ETHTOOL_FEC_OFF;
1427 
1428 	return eth_fec;
1429 }
1430 
1431 /* Translate Common Code FEC value into ethtool value. */
1432 static inline unsigned int cc_to_eth_fec(unsigned int cc_fec)
1433 {
1434 	unsigned int eth_fec = 0;
1435 
1436 	if (cc_fec & FEC_AUTO)
1437 		eth_fec |= ETHTOOL_FEC_AUTO;
1438 	if (cc_fec & FEC_RS)
1439 		eth_fec |= ETHTOOL_FEC_RS;
1440 	if (cc_fec & FEC_BASER_RS)
1441 		eth_fec |= ETHTOOL_FEC_BASER;
1442 
1443 	/* if nothing is set, then FEC is off */
1444 	if (!eth_fec)
1445 		eth_fec = ETHTOOL_FEC_OFF;
1446 
1447 	return eth_fec;
1448 }
1449 
1450 static int cxgb4vf_get_fecparam(struct net_device *dev,
1451 				struct ethtool_fecparam *fec)
1452 {
1453 	const struct port_info *pi = netdev_priv(dev);
1454 	const struct link_config *lc = &pi->link_cfg;
1455 
1456 	/* Translate the Firmware FEC Support into the ethtool value.  We
1457 	 * always support IEEE 802.3 "automatic" selection of Link FEC type if
1458 	 * any FEC is supported.
1459 	 */
1460 	fec->fec = fwcap_to_eth_fec(lc->pcaps);
1461 	if (fec->fec != ETHTOOL_FEC_OFF)
1462 		fec->fec |= ETHTOOL_FEC_AUTO;
1463 
1464 	/* Translate the current internal FEC parameters into the
1465 	 * ethtool values.
1466 	 */
1467 	fec->active_fec = cc_to_eth_fec(lc->fec);
1468 	return 0;
1469 }
1470 
1471 /*
1472  * Return our driver information.
1473  */
1474 static void cxgb4vf_get_drvinfo(struct net_device *dev,
1475 				struct ethtool_drvinfo *drvinfo)
1476 {
1477 	struct adapter *adapter = netdev2adap(dev);
1478 
1479 	strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
1480 	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
1481 	strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
1482 		sizeof(drvinfo->bus_info));
1483 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1484 		 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1485 		 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.fwrev),
1486 		 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.fwrev),
1487 		 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.fwrev),
1488 		 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.fwrev),
1489 		 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.tprev),
1490 		 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.tprev),
1491 		 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.tprev),
1492 		 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.tprev));
1493 }
1494 
1495 /*
1496  * Return current adapter message level.
1497  */
1498 static u32 cxgb4vf_get_msglevel(struct net_device *dev)
1499 {
1500 	return netdev2adap(dev)->msg_enable;
1501 }
1502 
1503 /*
1504  * Set current adapter message level.
1505  */
1506 static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
1507 {
1508 	netdev2adap(dev)->msg_enable = msglevel;
1509 }
1510 
1511 /*
1512  * Return the device's current Queue Set ring size parameters along with the
1513  * allowed maximum values.  Since ethtool doesn't understand the concept of
1514  * multi-queue devices, we just return the current values associated with the
1515  * first Queue Set.
1516  */
1517 static void cxgb4vf_get_ringparam(struct net_device *dev,
1518 				  struct ethtool_ringparam *rp)
1519 {
1520 	const struct port_info *pi = netdev_priv(dev);
1521 	const struct sge *s = &pi->adapter->sge;
1522 
1523 	rp->rx_max_pending = MAX_RX_BUFFERS;
1524 	rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1525 	rp->rx_jumbo_max_pending = 0;
1526 	rp->tx_max_pending = MAX_TXQ_ENTRIES;
1527 
1528 	rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID;
1529 	rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1530 	rp->rx_jumbo_pending = 0;
1531 	rp->tx_pending = s->ethtxq[pi->first_qset].q.size;
1532 }
1533 
1534 /*
1535  * Set the Queue Set ring size parameters for the device.  Again, since
1536  * ethtool doesn't allow for the concept of multiple queues per device, we'll
1537  * apply these new values across all of the Queue Sets associated with the
1538  * device -- after vetting them of course!
1539  */
1540 static int cxgb4vf_set_ringparam(struct net_device *dev,
1541 				 struct ethtool_ringparam *rp)
1542 {
1543 	const struct port_info *pi = netdev_priv(dev);
1544 	struct adapter *adapter = pi->adapter;
1545 	struct sge *s = &adapter->sge;
1546 	int qs;
1547 
1548 	if (rp->rx_pending > MAX_RX_BUFFERS ||
1549 	    rp->rx_jumbo_pending ||
1550 	    rp->tx_pending > MAX_TXQ_ENTRIES ||
1551 	    rp->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1552 	    rp->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1553 	    rp->rx_pending < MIN_FL_ENTRIES ||
1554 	    rp->tx_pending < MIN_TXQ_ENTRIES)
1555 		return -EINVAL;
1556 
1557 	if (adapter->flags & FULL_INIT_DONE)
1558 		return -EBUSY;
1559 
1560 	for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) {
1561 		s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID;
1562 		s->ethrxq[qs].rspq.size = rp->rx_mini_pending;
1563 		s->ethtxq[qs].q.size = rp->tx_pending;
1564 	}
1565 	return 0;
1566 }
1567 
1568 /*
1569  * Return the interrupt holdoff timer and count for the first Queue Set on the
1570  * device.  Our extension ioctl() (the cxgbtool interface) allows the
1571  * interrupt holdoff timer to be read on all of the device's Queue Sets.
1572  */
1573 static int cxgb4vf_get_coalesce(struct net_device *dev,
1574 				struct ethtool_coalesce *coalesce)
1575 {
1576 	const struct port_info *pi = netdev_priv(dev);
1577 	const struct adapter *adapter = pi->adapter;
1578 	const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq;
1579 
1580 	coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
1581 	coalesce->rx_max_coalesced_frames =
1582 		((rspq->intr_params & QINTR_CNT_EN_F)
1583 		 ? adapter->sge.counter_val[rspq->pktcnt_idx]
1584 		 : 0);
1585 	return 0;
1586 }
1587 
1588 /*
1589  * Set the RX interrupt holdoff timer and count for the first Queue Set on the
1590  * interface.  Our extension ioctl() (the cxgbtool interface) allows us to set
1591  * the interrupt holdoff timer on any of the device's Queue Sets.
1592  */
1593 static int cxgb4vf_set_coalesce(struct net_device *dev,
1594 				struct ethtool_coalesce *coalesce)
1595 {
1596 	const struct port_info *pi = netdev_priv(dev);
1597 	struct adapter *adapter = pi->adapter;
1598 
1599 	return set_rxq_intr_params(adapter,
1600 				   &adapter->sge.ethrxq[pi->first_qset].rspq,
1601 				   coalesce->rx_coalesce_usecs,
1602 				   coalesce->rx_max_coalesced_frames);
1603 }
1604 
1605 /*
1606  * Report current port link pause parameter settings.
1607  */
1608 static void cxgb4vf_get_pauseparam(struct net_device *dev,
1609 				   struct ethtool_pauseparam *pauseparam)
1610 {
1611 	struct port_info *pi = netdev_priv(dev);
1612 
1613 	pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1614 	pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0;
1615 	pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0;
1616 }
1617 
1618 /*
1619  * Identify the port by blinking the port's LED.
1620  */
1621 static int cxgb4vf_phys_id(struct net_device *dev,
1622 			   enum ethtool_phys_id_state state)
1623 {
1624 	unsigned int val;
1625 	struct port_info *pi = netdev_priv(dev);
1626 
1627 	if (state == ETHTOOL_ID_ACTIVE)
1628 		val = 0xffff;
1629 	else if (state == ETHTOOL_ID_INACTIVE)
1630 		val = 0;
1631 	else
1632 		return -EINVAL;
1633 
1634 	return t4vf_identify_port(pi->adapter, pi->viid, val);
1635 }
1636 
1637 /*
1638  * Port stats maintained per queue of the port.
1639  */
1640 struct queue_port_stats {
1641 	u64 tso;
1642 	u64 tx_csum;
1643 	u64 rx_csum;
1644 	u64 vlan_ex;
1645 	u64 vlan_ins;
1646 	u64 lro_pkts;
1647 	u64 lro_merged;
1648 };
1649 
1650 /*
1651  * Strings for the ETH_SS_STATS statistics set ("ethtool -S").  Note that
1652  * these need to match the order of statistics returned by
1653  * t4vf_get_port_stats().
1654  */
1655 static const char stats_strings[][ETH_GSTRING_LEN] = {
1656 	/*
1657 	 * These must match the layout of the t4vf_port_stats structure.
1658 	 */
1659 	"TxBroadcastBytes  ",
1660 	"TxBroadcastFrames ",
1661 	"TxMulticastBytes  ",
1662 	"TxMulticastFrames ",
1663 	"TxUnicastBytes    ",
1664 	"TxUnicastFrames   ",
1665 	"TxDroppedFrames   ",
1666 	"TxOffloadBytes    ",
1667 	"TxOffloadFrames   ",
1668 	"RxBroadcastBytes  ",
1669 	"RxBroadcastFrames ",
1670 	"RxMulticastBytes  ",
1671 	"RxMulticastFrames ",
1672 	"RxUnicastBytes    ",
1673 	"RxUnicastFrames   ",
1674 	"RxErrorFrames     ",
1675 
1676 	/*
1677 	 * These are accumulated per-queue statistics and must match the
1678 	 * order of the fields in the queue_port_stats structure.
1679 	 */
1680 	"TSO               ",
1681 	"TxCsumOffload     ",
1682 	"RxCsumGood        ",
1683 	"VLANextractions   ",
1684 	"VLANinsertions    ",
1685 	"GROPackets        ",
1686 	"GROMerged         ",
1687 };
1688 
1689 /*
1690  * Return the number of statistics in the specified statistics set.
1691  */
1692 static int cxgb4vf_get_sset_count(struct net_device *dev, int sset)
1693 {
1694 	switch (sset) {
1695 	case ETH_SS_STATS:
1696 		return ARRAY_SIZE(stats_strings);
1697 	default:
1698 		return -EOPNOTSUPP;
1699 	}
1700 	/*NOTREACHED*/
1701 }
1702 
1703 /*
1704  * Return the strings for the specified statistics set.
1705  */
1706 static void cxgb4vf_get_strings(struct net_device *dev,
1707 				u32 sset,
1708 				u8 *data)
1709 {
1710 	switch (sset) {
1711 	case ETH_SS_STATS:
1712 		memcpy(data, stats_strings, sizeof(stats_strings));
1713 		break;
1714 	}
1715 }
1716 
1717 /*
1718  * Small utility routine to accumulate queue statistics across the queues of
1719  * a "port".
1720  */
1721 static void collect_sge_port_stats(const struct adapter *adapter,
1722 				   const struct port_info *pi,
1723 				   struct queue_port_stats *stats)
1724 {
1725 	const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset];
1726 	const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
1727 	int qs;
1728 
1729 	memset(stats, 0, sizeof(*stats));
1730 	for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
1731 		stats->tso += txq->tso;
1732 		stats->tx_csum += txq->tx_cso;
1733 		stats->rx_csum += rxq->stats.rx_cso;
1734 		stats->vlan_ex += rxq->stats.vlan_ex;
1735 		stats->vlan_ins += txq->vlan_ins;
1736 		stats->lro_pkts += rxq->stats.lro_pkts;
1737 		stats->lro_merged += rxq->stats.lro_merged;
1738 	}
1739 }
1740 
1741 /*
1742  * Return the ETH_SS_STATS statistics set.
1743  */
1744 static void cxgb4vf_get_ethtool_stats(struct net_device *dev,
1745 				      struct ethtool_stats *stats,
1746 				      u64 *data)
1747 {
1748 	struct port_info *pi = netdev2pinfo(dev);
1749 	struct adapter *adapter = pi->adapter;
1750 	int err = t4vf_get_port_stats(adapter, pi->pidx,
1751 				      (struct t4vf_port_stats *)data);
1752 	if (err)
1753 		memset(data, 0, sizeof(struct t4vf_port_stats));
1754 
1755 	data += sizeof(struct t4vf_port_stats) / sizeof(u64);
1756 	collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1757 }
1758 
1759 /*
1760  * Return the size of our register map.
1761  */
1762 static int cxgb4vf_get_regs_len(struct net_device *dev)
1763 {
1764 	return T4VF_REGMAP_SIZE;
1765 }
1766 
1767 /*
1768  * Dump a block of registers, start to end inclusive, into a buffer.
1769  */
1770 static void reg_block_dump(struct adapter *adapter, void *regbuf,
1771 			   unsigned int start, unsigned int end)
1772 {
1773 	u32 *bp = regbuf + start - T4VF_REGMAP_START;
1774 
1775 	for ( ; start <= end; start += sizeof(u32)) {
1776 		/*
1777 		 * Avoid reading the Mailbox Control register since that
1778 		 * can trigger a Mailbox Ownership Arbitration cycle and
1779 		 * interfere with communication with the firmware.
1780 		 */
1781 		if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL)
1782 			*bp++ = 0xffff;
1783 		else
1784 			*bp++ = t4_read_reg(adapter, start);
1785 	}
1786 }
1787 
1788 /*
1789  * Copy our entire register map into the provided buffer.
1790  */
1791 static void cxgb4vf_get_regs(struct net_device *dev,
1792 			     struct ethtool_regs *regs,
1793 			     void *regbuf)
1794 {
1795 	struct adapter *adapter = netdev2adap(dev);
1796 
1797 	regs->version = mk_adap_vers(adapter);
1798 
1799 	/*
1800 	 * Fill in register buffer with our register map.
1801 	 */
1802 	memset(regbuf, 0, T4VF_REGMAP_SIZE);
1803 
1804 	reg_block_dump(adapter, regbuf,
1805 		       T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST,
1806 		       T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST);
1807 	reg_block_dump(adapter, regbuf,
1808 		       T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST,
1809 		       T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST);
1810 
1811 	/* T5 adds new registers in the PL Register map.
1812 	 */
1813 	reg_block_dump(adapter, regbuf,
1814 		       T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
1815 		       T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
1816 		       ? PL_VF_WHOAMI_A : PL_VF_REVISION_A));
1817 	reg_block_dump(adapter, regbuf,
1818 		       T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
1819 		       T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
1820 
1821 	reg_block_dump(adapter, regbuf,
1822 		       T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST,
1823 		       T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST);
1824 }
1825 
1826 /*
1827  * Report current Wake On LAN settings.
1828  */
1829 static void cxgb4vf_get_wol(struct net_device *dev,
1830 			    struct ethtool_wolinfo *wol)
1831 {
1832 	wol->supported = 0;
1833 	wol->wolopts = 0;
1834 	memset(&wol->sopass, 0, sizeof(wol->sopass));
1835 }
1836 
1837 /*
1838  * TCP Segmentation Offload flags which we support.
1839  */
1840 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1841 
1842 static const struct ethtool_ops cxgb4vf_ethtool_ops = {
1843 	.get_link_ksettings	= cxgb4vf_get_link_ksettings,
1844 	.get_fecparam		= cxgb4vf_get_fecparam,
1845 	.get_drvinfo		= cxgb4vf_get_drvinfo,
1846 	.get_msglevel		= cxgb4vf_get_msglevel,
1847 	.set_msglevel		= cxgb4vf_set_msglevel,
1848 	.get_ringparam		= cxgb4vf_get_ringparam,
1849 	.set_ringparam		= cxgb4vf_set_ringparam,
1850 	.get_coalesce		= cxgb4vf_get_coalesce,
1851 	.set_coalesce		= cxgb4vf_set_coalesce,
1852 	.get_pauseparam		= cxgb4vf_get_pauseparam,
1853 	.get_link		= ethtool_op_get_link,
1854 	.get_strings		= cxgb4vf_get_strings,
1855 	.set_phys_id		= cxgb4vf_phys_id,
1856 	.get_sset_count		= cxgb4vf_get_sset_count,
1857 	.get_ethtool_stats	= cxgb4vf_get_ethtool_stats,
1858 	.get_regs_len		= cxgb4vf_get_regs_len,
1859 	.get_regs		= cxgb4vf_get_regs,
1860 	.get_wol		= cxgb4vf_get_wol,
1861 };
1862 
1863 /*
1864  * /sys/kernel/debug/cxgb4vf support code and data.
1865  * ================================================
1866  */
1867 
1868 /*
1869  * Show Firmware Mailbox Command/Reply Log
1870  *
1871  * Note that we don't do any locking when dumping the Firmware Mailbox Log so
1872  * it's possible that we can catch things during a log update and therefore
1873  * see partially corrupted log entries.  But i9t's probably Good Enough(tm).
1874  * If we ever decide that we want to make sure that we're dumping a coherent
1875  * log, we'd need to perform locking in the mailbox logging and in
1876  * mboxlog_open() where we'd need to grab the entire mailbox log in one go
1877  * like we do for the Firmware Device Log.  But as stated above, meh ...
1878  */
1879 static int mboxlog_show(struct seq_file *seq, void *v)
1880 {
1881 	struct adapter *adapter = seq->private;
1882 	struct mbox_cmd_log *log = adapter->mbox_log;
1883 	struct mbox_cmd *entry;
1884 	int entry_idx, i;
1885 
1886 	if (v == SEQ_START_TOKEN) {
1887 		seq_printf(seq,
1888 			   "%10s  %15s  %5s  %5s  %s\n",
1889 			   "Seq#", "Tstamp", "Atime", "Etime",
1890 			   "Command/Reply");
1891 		return 0;
1892 	}
1893 
1894 	entry_idx = log->cursor + ((uintptr_t)v - 2);
1895 	if (entry_idx >= log->size)
1896 		entry_idx -= log->size;
1897 	entry = mbox_cmd_log_entry(log, entry_idx);
1898 
1899 	/* skip over unused entries */
1900 	if (entry->timestamp == 0)
1901 		return 0;
1902 
1903 	seq_printf(seq, "%10u  %15llu  %5d  %5d",
1904 		   entry->seqno, entry->timestamp,
1905 		   entry->access, entry->execute);
1906 	for (i = 0; i < MBOX_LEN / 8; i++) {
1907 		u64 flit = entry->cmd[i];
1908 		u32 hi = (u32)(flit >> 32);
1909 		u32 lo = (u32)flit;
1910 
1911 		seq_printf(seq, "  %08x %08x", hi, lo);
1912 	}
1913 	seq_puts(seq, "\n");
1914 	return 0;
1915 }
1916 
1917 static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos)
1918 {
1919 	struct adapter *adapter = seq->private;
1920 	struct mbox_cmd_log *log = adapter->mbox_log;
1921 
1922 	return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL);
1923 }
1924 
1925 static void *mboxlog_start(struct seq_file *seq, loff_t *pos)
1926 {
1927 	return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN;
1928 }
1929 
1930 static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos)
1931 {
1932 	++*pos;
1933 	return mboxlog_get_idx(seq, *pos);
1934 }
1935 
1936 static void mboxlog_stop(struct seq_file *seq, void *v)
1937 {
1938 }
1939 
1940 static const struct seq_operations mboxlog_seq_ops = {
1941 	.start = mboxlog_start,
1942 	.next  = mboxlog_next,
1943 	.stop  = mboxlog_stop,
1944 	.show  = mboxlog_show
1945 };
1946 
1947 static int mboxlog_open(struct inode *inode, struct file *file)
1948 {
1949 	int res = seq_open(file, &mboxlog_seq_ops);
1950 
1951 	if (!res) {
1952 		struct seq_file *seq = file->private_data;
1953 
1954 		seq->private = inode->i_private;
1955 	}
1956 	return res;
1957 }
1958 
1959 static const struct file_operations mboxlog_fops = {
1960 	.owner   = THIS_MODULE,
1961 	.open    = mboxlog_open,
1962 	.read    = seq_read,
1963 	.llseek  = seq_lseek,
1964 	.release = seq_release,
1965 };
1966 
1967 /*
1968  * Show SGE Queue Set information.  We display QPL Queues Sets per line.
1969  */
1970 #define QPL	4
1971 
1972 static int sge_qinfo_show(struct seq_file *seq, void *v)
1973 {
1974 	struct adapter *adapter = seq->private;
1975 	int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
1976 	int qs, r = (uintptr_t)v - 1;
1977 
1978 	if (r)
1979 		seq_putc(seq, '\n');
1980 
1981 	#define S3(fmt_spec, s, v) \
1982 		do {\
1983 			seq_printf(seq, "%-12s", s); \
1984 			for (qs = 0; qs < n; ++qs) \
1985 				seq_printf(seq, " %16" fmt_spec, v); \
1986 			seq_putc(seq, '\n'); \
1987 		} while (0)
1988 	#define S(s, v)		S3("s", s, v)
1989 	#define T(s, v)		S3("u", s, txq[qs].v)
1990 	#define R(s, v)		S3("u", s, rxq[qs].v)
1991 
1992 	if (r < eth_entries) {
1993 		const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
1994 		const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
1995 		int n = min(QPL, adapter->sge.ethqsets - QPL * r);
1996 
1997 		S("QType:", "Ethernet");
1998 		S("Interface:",
1999 		  (rxq[qs].rspq.netdev
2000 		   ? rxq[qs].rspq.netdev->name
2001 		   : "N/A"));
2002 		S3("d", "Port:",
2003 		   (rxq[qs].rspq.netdev
2004 		    ? ((struct port_info *)
2005 		       netdev_priv(rxq[qs].rspq.netdev))->port_id
2006 		    : -1));
2007 		T("TxQ ID:", q.abs_id);
2008 		T("TxQ size:", q.size);
2009 		T("TxQ inuse:", q.in_use);
2010 		T("TxQ PIdx:", q.pidx);
2011 		T("TxQ CIdx:", q.cidx);
2012 		R("RspQ ID:", rspq.abs_id);
2013 		R("RspQ size:", rspq.size);
2014 		R("RspQE size:", rspq.iqe_len);
2015 		S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq));
2016 		S3("u", "Intr pktcnt:",
2017 		   adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]);
2018 		R("RspQ CIdx:", rspq.cidx);
2019 		R("RspQ Gen:", rspq.gen);
2020 		R("FL ID:", fl.abs_id);
2021 		R("FL size:", fl.size - MIN_FL_RESID);
2022 		R("FL avail:", fl.avail);
2023 		R("FL PIdx:", fl.pidx);
2024 		R("FL CIdx:", fl.cidx);
2025 		return 0;
2026 	}
2027 
2028 	r -= eth_entries;
2029 	if (r == 0) {
2030 		const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
2031 
2032 		seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
2033 		seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
2034 		seq_printf(seq, "%-12s %16u\n", "Intr delay:",
2035 			   qtimer_val(adapter, evtq));
2036 		seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
2037 			   adapter->sge.counter_val[evtq->pktcnt_idx]);
2038 		seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx);
2039 		seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
2040 	} else if (r == 1) {
2041 		const struct sge_rspq *intrq = &adapter->sge.intrq;
2042 
2043 		seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue");
2044 		seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id);
2045 		seq_printf(seq, "%-12s %16u\n", "Intr delay:",
2046 			   qtimer_val(adapter, intrq));
2047 		seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
2048 			   adapter->sge.counter_val[intrq->pktcnt_idx]);
2049 		seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx);
2050 		seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen);
2051 	}
2052 
2053 	#undef R
2054 	#undef T
2055 	#undef S
2056 	#undef S3
2057 
2058 	return 0;
2059 }
2060 
2061 /*
2062  * Return the number of "entries" in our "file".  We group the multi-Queue
2063  * sections with QPL Queue Sets per "entry".  The sections of the output are:
2064  *
2065  *     Ethernet RX/TX Queue Sets
2066  *     Firmware Event Queue
2067  *     Forwarded Interrupt Queue (if in MSI mode)
2068  */
2069 static int sge_queue_entries(const struct adapter *adapter)
2070 {
2071 	return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
2072 		((adapter->flags & USING_MSI) != 0);
2073 }
2074 
2075 static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
2076 {
2077 	int entries = sge_queue_entries(seq->private);
2078 
2079 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2080 }
2081 
2082 static void sge_queue_stop(struct seq_file *seq, void *v)
2083 {
2084 }
2085 
2086 static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
2087 {
2088 	int entries = sge_queue_entries(seq->private);
2089 
2090 	++*pos;
2091 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2092 }
2093 
2094 static const struct seq_operations sge_qinfo_seq_ops = {
2095 	.start = sge_queue_start,
2096 	.next  = sge_queue_next,
2097 	.stop  = sge_queue_stop,
2098 	.show  = sge_qinfo_show
2099 };
2100 
2101 static int sge_qinfo_open(struct inode *inode, struct file *file)
2102 {
2103 	int res = seq_open(file, &sge_qinfo_seq_ops);
2104 
2105 	if (!res) {
2106 		struct seq_file *seq = file->private_data;
2107 		seq->private = inode->i_private;
2108 	}
2109 	return res;
2110 }
2111 
2112 static const struct file_operations sge_qinfo_debugfs_fops = {
2113 	.owner   = THIS_MODULE,
2114 	.open    = sge_qinfo_open,
2115 	.read    = seq_read,
2116 	.llseek  = seq_lseek,
2117 	.release = seq_release,
2118 };
2119 
2120 /*
2121  * Show SGE Queue Set statistics.  We display QPL Queues Sets per line.
2122  */
2123 #define QPL	4
2124 
2125 static int sge_qstats_show(struct seq_file *seq, void *v)
2126 {
2127 	struct adapter *adapter = seq->private;
2128 	int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
2129 	int qs, r = (uintptr_t)v - 1;
2130 
2131 	if (r)
2132 		seq_putc(seq, '\n');
2133 
2134 	#define S3(fmt, s, v) \
2135 		do { \
2136 			seq_printf(seq, "%-16s", s); \
2137 			for (qs = 0; qs < n; ++qs) \
2138 				seq_printf(seq, " %8" fmt, v); \
2139 			seq_putc(seq, '\n'); \
2140 		} while (0)
2141 	#define S(s, v)		S3("s", s, v)
2142 
2143 	#define T3(fmt, s, v)	S3(fmt, s, txq[qs].v)
2144 	#define T(s, v)		T3("lu", s, v)
2145 
2146 	#define R3(fmt, s, v)	S3(fmt, s, rxq[qs].v)
2147 	#define R(s, v)		R3("lu", s, v)
2148 
2149 	if (r < eth_entries) {
2150 		const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
2151 		const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
2152 		int n = min(QPL, adapter->sge.ethqsets - QPL * r);
2153 
2154 		S("QType:", "Ethernet");
2155 		S("Interface:",
2156 		  (rxq[qs].rspq.netdev
2157 		   ? rxq[qs].rspq.netdev->name
2158 		   : "N/A"));
2159 		R3("u", "RspQNullInts:", rspq.unhandled_irqs);
2160 		R("RxPackets:", stats.pkts);
2161 		R("RxCSO:", stats.rx_cso);
2162 		R("VLANxtract:", stats.vlan_ex);
2163 		R("LROmerged:", stats.lro_merged);
2164 		R("LROpackets:", stats.lro_pkts);
2165 		R("RxDrops:", stats.rx_drops);
2166 		T("TSO:", tso);
2167 		T("TxCSO:", tx_cso);
2168 		T("VLANins:", vlan_ins);
2169 		T("TxQFull:", q.stops);
2170 		T("TxQRestarts:", q.restarts);
2171 		T("TxMapErr:", mapping_err);
2172 		R("FLAllocErr:", fl.alloc_failed);
2173 		R("FLLrgAlcErr:", fl.large_alloc_failed);
2174 		R("FLStarving:", fl.starving);
2175 		return 0;
2176 	}
2177 
2178 	r -= eth_entries;
2179 	if (r == 0) {
2180 		const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
2181 
2182 		seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue");
2183 		seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
2184 			   evtq->unhandled_irqs);
2185 		seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx);
2186 		seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen);
2187 	} else if (r == 1) {
2188 		const struct sge_rspq *intrq = &adapter->sge.intrq;
2189 
2190 		seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue");
2191 		seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
2192 			   intrq->unhandled_irqs);
2193 		seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx);
2194 		seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen);
2195 	}
2196 
2197 	#undef R
2198 	#undef T
2199 	#undef S
2200 	#undef R3
2201 	#undef T3
2202 	#undef S3
2203 
2204 	return 0;
2205 }
2206 
2207 /*
2208  * Return the number of "entries" in our "file".  We group the multi-Queue
2209  * sections with QPL Queue Sets per "entry".  The sections of the output are:
2210  *
2211  *     Ethernet RX/TX Queue Sets
2212  *     Firmware Event Queue
2213  *     Forwarded Interrupt Queue (if in MSI mode)
2214  */
2215 static int sge_qstats_entries(const struct adapter *adapter)
2216 {
2217 	return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
2218 		((adapter->flags & USING_MSI) != 0);
2219 }
2220 
2221 static void *sge_qstats_start(struct seq_file *seq, loff_t *pos)
2222 {
2223 	int entries = sge_qstats_entries(seq->private);
2224 
2225 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2226 }
2227 
2228 static void sge_qstats_stop(struct seq_file *seq, void *v)
2229 {
2230 }
2231 
2232 static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos)
2233 {
2234 	int entries = sge_qstats_entries(seq->private);
2235 
2236 	(*pos)++;
2237 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2238 }
2239 
2240 static const struct seq_operations sge_qstats_seq_ops = {
2241 	.start = sge_qstats_start,
2242 	.next  = sge_qstats_next,
2243 	.stop  = sge_qstats_stop,
2244 	.show  = sge_qstats_show
2245 };
2246 
2247 static int sge_qstats_open(struct inode *inode, struct file *file)
2248 {
2249 	int res = seq_open(file, &sge_qstats_seq_ops);
2250 
2251 	if (res == 0) {
2252 		struct seq_file *seq = file->private_data;
2253 		seq->private = inode->i_private;
2254 	}
2255 	return res;
2256 }
2257 
2258 static const struct file_operations sge_qstats_proc_fops = {
2259 	.owner   = THIS_MODULE,
2260 	.open    = sge_qstats_open,
2261 	.read    = seq_read,
2262 	.llseek  = seq_lseek,
2263 	.release = seq_release,
2264 };
2265 
2266 /*
2267  * Show PCI-E SR-IOV Virtual Function Resource Limits.
2268  */
2269 static int resources_show(struct seq_file *seq, void *v)
2270 {
2271 	struct adapter *adapter = seq->private;
2272 	struct vf_resources *vfres = &adapter->params.vfres;
2273 
2274 	#define S(desc, fmt, var) \
2275 		seq_printf(seq, "%-60s " fmt "\n", \
2276 			   desc " (" #var "):", vfres->var)
2277 
2278 	S("Virtual Interfaces", "%d", nvi);
2279 	S("Egress Queues", "%d", neq);
2280 	S("Ethernet Control", "%d", nethctrl);
2281 	S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
2282 	S("Ingress Queues", "%d", niq);
2283 	S("Traffic Class", "%d", tc);
2284 	S("Port Access Rights Mask", "%#x", pmask);
2285 	S("MAC Address Filters", "%d", nexactf);
2286 	S("Firmware Command Read Capabilities", "%#x", r_caps);
2287 	S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
2288 
2289 	#undef S
2290 
2291 	return 0;
2292 }
2293 
2294 static int resources_open(struct inode *inode, struct file *file)
2295 {
2296 	return single_open(file, resources_show, inode->i_private);
2297 }
2298 
2299 static const struct file_operations resources_proc_fops = {
2300 	.owner   = THIS_MODULE,
2301 	.open    = resources_open,
2302 	.read    = seq_read,
2303 	.llseek  = seq_lseek,
2304 	.release = single_release,
2305 };
2306 
2307 /*
2308  * Show Virtual Interfaces.
2309  */
2310 static int interfaces_show(struct seq_file *seq, void *v)
2311 {
2312 	if (v == SEQ_START_TOKEN) {
2313 		seq_puts(seq, "Interface  Port   VIID\n");
2314 	} else {
2315 		struct adapter *adapter = seq->private;
2316 		int pidx = (uintptr_t)v - 2;
2317 		struct net_device *dev = adapter->port[pidx];
2318 		struct port_info *pi = netdev_priv(dev);
2319 
2320 		seq_printf(seq, "%9s  %4d  %#5x\n",
2321 			   dev->name, pi->port_id, pi->viid);
2322 	}
2323 	return 0;
2324 }
2325 
2326 static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos)
2327 {
2328 	return pos <= adapter->params.nports
2329 		? (void *)(uintptr_t)(pos + 1)
2330 		: NULL;
2331 }
2332 
2333 static void *interfaces_start(struct seq_file *seq, loff_t *pos)
2334 {
2335 	return *pos
2336 		? interfaces_get_idx(seq->private, *pos)
2337 		: SEQ_START_TOKEN;
2338 }
2339 
2340 static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos)
2341 {
2342 	(*pos)++;
2343 	return interfaces_get_idx(seq->private, *pos);
2344 }
2345 
2346 static void interfaces_stop(struct seq_file *seq, void *v)
2347 {
2348 }
2349 
2350 static const struct seq_operations interfaces_seq_ops = {
2351 	.start = interfaces_start,
2352 	.next  = interfaces_next,
2353 	.stop  = interfaces_stop,
2354 	.show  = interfaces_show
2355 };
2356 
2357 static int interfaces_open(struct inode *inode, struct file *file)
2358 {
2359 	int res = seq_open(file, &interfaces_seq_ops);
2360 
2361 	if (res == 0) {
2362 		struct seq_file *seq = file->private_data;
2363 		seq->private = inode->i_private;
2364 	}
2365 	return res;
2366 }
2367 
2368 static const struct file_operations interfaces_proc_fops = {
2369 	.owner   = THIS_MODULE,
2370 	.open    = interfaces_open,
2371 	.read    = seq_read,
2372 	.llseek  = seq_lseek,
2373 	.release = seq_release,
2374 };
2375 
2376 /*
2377  * /sys/kernel/debugfs/cxgb4vf/ files list.
2378  */
2379 struct cxgb4vf_debugfs_entry {
2380 	const char *name;		/* name of debugfs node */
2381 	umode_t mode;			/* file system mode */
2382 	const struct file_operations *fops;
2383 };
2384 
2385 static struct cxgb4vf_debugfs_entry debugfs_files[] = {
2386 	{ "mboxlog",    S_IRUGO, &mboxlog_fops },
2387 	{ "sge_qinfo",  S_IRUGO, &sge_qinfo_debugfs_fops },
2388 	{ "sge_qstats", S_IRUGO, &sge_qstats_proc_fops },
2389 	{ "resources",  S_IRUGO, &resources_proc_fops },
2390 	{ "interfaces", S_IRUGO, &interfaces_proc_fops },
2391 };
2392 
2393 /*
2394  * Module and device initialization and cleanup code.
2395  * ==================================================
2396  */
2397 
2398 /*
2399  * Set up out /sys/kernel/debug/cxgb4vf sub-nodes.  We assume that the
2400  * directory (debugfs_root) has already been set up.
2401  */
2402 static int setup_debugfs(struct adapter *adapter)
2403 {
2404 	int i;
2405 
2406 	BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2407 
2408 	/*
2409 	 * Debugfs support is best effort.
2410 	 */
2411 	for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
2412 		(void)debugfs_create_file(debugfs_files[i].name,
2413 				  debugfs_files[i].mode,
2414 				  adapter->debugfs_root,
2415 				  (void *)adapter,
2416 				  debugfs_files[i].fops);
2417 
2418 	return 0;
2419 }
2420 
2421 /*
2422  * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above.  We leave
2423  * it to our caller to tear down the directory (debugfs_root).
2424  */
2425 static void cleanup_debugfs(struct adapter *adapter)
2426 {
2427 	BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2428 
2429 	/*
2430 	 * Unlike our sister routine cleanup_proc(), we don't need to remove
2431 	 * individual entries because a call will be made to
2432 	 * debugfs_remove_recursive().  We just need to clean up any ancillary
2433 	 * persistent state.
2434 	 */
2435 	/* nothing to do */
2436 }
2437 
2438 /* Figure out how many Ports and Queue Sets we can support.  This depends on
2439  * knowing our Virtual Function Resources and may be called a second time if
2440  * we fall back from MSI-X to MSI Interrupt Mode.
2441  */
2442 static void size_nports_qsets(struct adapter *adapter)
2443 {
2444 	struct vf_resources *vfres = &adapter->params.vfres;
2445 	unsigned int ethqsets, pmask_nports;
2446 
2447 	/* The number of "ports" which we support is equal to the number of
2448 	 * Virtual Interfaces with which we've been provisioned.
2449 	 */
2450 	adapter->params.nports = vfres->nvi;
2451 	if (adapter->params.nports > MAX_NPORTS) {
2452 		dev_warn(adapter->pdev_dev, "only using %d of %d maximum"
2453 			 " allowed virtual interfaces\n", MAX_NPORTS,
2454 			 adapter->params.nports);
2455 		adapter->params.nports = MAX_NPORTS;
2456 	}
2457 
2458 	/* We may have been provisioned with more VIs than the number of
2459 	 * ports we're allowed to access (our Port Access Rights Mask).
2460 	 * This is obviously a configuration conflict but we don't want to
2461 	 * crash the kernel or anything silly just because of that.
2462 	 */
2463 	pmask_nports = hweight32(adapter->params.vfres.pmask);
2464 	if (pmask_nports < adapter->params.nports) {
2465 		dev_warn(adapter->pdev_dev, "only using %d of %d provisioned"
2466 			 " virtual interfaces; limited by Port Access Rights"
2467 			 " mask %#x\n", pmask_nports, adapter->params.nports,
2468 			 adapter->params.vfres.pmask);
2469 		adapter->params.nports = pmask_nports;
2470 	}
2471 
2472 	/* We need to reserve an Ingress Queue for the Asynchronous Firmware
2473 	 * Event Queue.  And if we're using MSI Interrupts, we'll also need to
2474 	 * reserve an Ingress Queue for a Forwarded Interrupts.
2475 	 *
2476 	 * The rest of the FL/Intr-capable ingress queues will be matched up
2477 	 * one-for-one with Ethernet/Control egress queues in order to form
2478 	 * "Queue Sets" which will be aportioned between the "ports".  For
2479 	 * each Queue Set, we'll need the ability to allocate two Egress
2480 	 * Contexts -- one for the Ingress Queue Free List and one for the TX
2481 	 * Ethernet Queue.
2482 	 *
2483 	 * Note that even if we're currently configured to use MSI-X
2484 	 * Interrupts (module variable msi == MSI_MSIX) we may get downgraded
2485 	 * to MSI Interrupts if we can't get enough MSI-X Interrupts.  If that
2486 	 * happens we'll need to adjust things later.
2487 	 */
2488 	ethqsets = vfres->niqflint - 1 - (msi == MSI_MSI);
2489 	if (vfres->nethctrl != ethqsets)
2490 		ethqsets = min(vfres->nethctrl, ethqsets);
2491 	if (vfres->neq < ethqsets*2)
2492 		ethqsets = vfres->neq/2;
2493 	if (ethqsets > MAX_ETH_QSETS)
2494 		ethqsets = MAX_ETH_QSETS;
2495 	adapter->sge.max_ethqsets = ethqsets;
2496 
2497 	if (adapter->sge.max_ethqsets < adapter->params.nports) {
2498 		dev_warn(adapter->pdev_dev, "only using %d of %d available"
2499 			 " virtual interfaces (too few Queue Sets)\n",
2500 			 adapter->sge.max_ethqsets, adapter->params.nports);
2501 		adapter->params.nports = adapter->sge.max_ethqsets;
2502 	}
2503 }
2504 
2505 /*
2506  * Perform early "adapter" initialization.  This is where we discover what
2507  * adapter parameters we're going to be using and initialize basic adapter
2508  * hardware support.
2509  */
2510 static int adap_init0(struct adapter *adapter)
2511 {
2512 	struct sge_params *sge_params = &adapter->params.sge;
2513 	struct sge *s = &adapter->sge;
2514 	int err;
2515 	u32 param, val = 0;
2516 
2517 	/*
2518 	 * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
2519 	 * 2.6.31 and later we can't call pci_reset_function() in order to
2520 	 * issue an FLR because of a self- deadlock on the device semaphore.
2521 	 * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
2522 	 * cases where they're needed -- for instance, some versions of KVM
2523 	 * fail to reset "Assigned Devices" when the VM reboots.  Therefore we
2524 	 * use the firmware based reset in order to reset any per function
2525 	 * state.
2526 	 */
2527 	err = t4vf_fw_reset(adapter);
2528 	if (err < 0) {
2529 		dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
2530 		return err;
2531 	}
2532 
2533 	/*
2534 	 * Grab basic operational parameters.  These will predominantly have
2535 	 * been set up by the Physical Function Driver or will be hard coded
2536 	 * into the adapter.  We just have to live with them ...  Note that
2537 	 * we _must_ get our VPD parameters before our SGE parameters because
2538 	 * we need to know the adapter's core clock from the VPD in order to
2539 	 * properly decode the SGE Timer Values.
2540 	 */
2541 	err = t4vf_get_dev_params(adapter);
2542 	if (err) {
2543 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2544 			" device parameters: err=%d\n", err);
2545 		return err;
2546 	}
2547 	err = t4vf_get_vpd_params(adapter);
2548 	if (err) {
2549 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2550 			" VPD parameters: err=%d\n", err);
2551 		return err;
2552 	}
2553 	err = t4vf_get_sge_params(adapter);
2554 	if (err) {
2555 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2556 			" SGE parameters: err=%d\n", err);
2557 		return err;
2558 	}
2559 	err = t4vf_get_rss_glb_config(adapter);
2560 	if (err) {
2561 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2562 			" RSS parameters: err=%d\n", err);
2563 		return err;
2564 	}
2565 	if (adapter->params.rss.mode !=
2566 	    FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2567 		dev_err(adapter->pdev_dev, "unable to operate with global RSS"
2568 			" mode %d\n", adapter->params.rss.mode);
2569 		return -EINVAL;
2570 	}
2571 	err = t4vf_sge_init(adapter);
2572 	if (err) {
2573 		dev_err(adapter->pdev_dev, "unable to use adapter parameters:"
2574 			" err=%d\n", err);
2575 		return err;
2576 	}
2577 
2578 	/* If we're running on newer firmware, let it know that we're
2579 	 * prepared to deal with encapsulated CPL messages.  Older
2580 	 * firmware won't understand this and we'll just get
2581 	 * unencapsulated messages ...
2582 	 */
2583 	param = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
2584 		FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
2585 	val = 1;
2586 	(void) t4vf_set_params(adapter, 1, &param, &val);
2587 
2588 	/*
2589 	 * Retrieve our RX interrupt holdoff timer values and counter
2590 	 * threshold values from the SGE parameters.
2591 	 */
2592 	s->timer_val[0] = core_ticks_to_us(adapter,
2593 		TIMERVALUE0_G(sge_params->sge_timer_value_0_and_1));
2594 	s->timer_val[1] = core_ticks_to_us(adapter,
2595 		TIMERVALUE1_G(sge_params->sge_timer_value_0_and_1));
2596 	s->timer_val[2] = core_ticks_to_us(adapter,
2597 		TIMERVALUE0_G(sge_params->sge_timer_value_2_and_3));
2598 	s->timer_val[3] = core_ticks_to_us(adapter,
2599 		TIMERVALUE1_G(sge_params->sge_timer_value_2_and_3));
2600 	s->timer_val[4] = core_ticks_to_us(adapter,
2601 		TIMERVALUE0_G(sge_params->sge_timer_value_4_and_5));
2602 	s->timer_val[5] = core_ticks_to_us(adapter,
2603 		TIMERVALUE1_G(sge_params->sge_timer_value_4_and_5));
2604 
2605 	s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold);
2606 	s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold);
2607 	s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold);
2608 	s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold);
2609 
2610 	/*
2611 	 * Grab our Virtual Interface resource allocation, extract the
2612 	 * features that we're interested in and do a bit of sanity testing on
2613 	 * what we discover.
2614 	 */
2615 	err = t4vf_get_vfres(adapter);
2616 	if (err) {
2617 		dev_err(adapter->pdev_dev, "unable to get virtual interface"
2618 			" resources: err=%d\n", err);
2619 		return err;
2620 	}
2621 
2622 	/* Check for various parameter sanity issues */
2623 	if (adapter->params.vfres.pmask == 0) {
2624 		dev_err(adapter->pdev_dev, "no port access configured\n"
2625 			"usable!\n");
2626 		return -EINVAL;
2627 	}
2628 	if (adapter->params.vfres.nvi == 0) {
2629 		dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
2630 			"usable!\n");
2631 		return -EINVAL;
2632 	}
2633 
2634 	/* Initialize nports and max_ethqsets now that we have our Virtual
2635 	 * Function Resources.
2636 	 */
2637 	size_nports_qsets(adapter);
2638 
2639 	return 0;
2640 }
2641 
2642 static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
2643 			     u8 pkt_cnt_idx, unsigned int size,
2644 			     unsigned int iqe_size)
2645 {
2646 	rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
2647 			     (pkt_cnt_idx < SGE_NCOUNTERS ?
2648 			      QINTR_CNT_EN_F : 0));
2649 	rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
2650 			    ? pkt_cnt_idx
2651 			    : 0);
2652 	rspq->iqe_len = iqe_size;
2653 	rspq->size = size;
2654 }
2655 
2656 /*
2657  * Perform default configuration of DMA queues depending on the number and
2658  * type of ports we found and the number of available CPUs.  Most settings can
2659  * be modified by the admin via ethtool and cxgbtool prior to the adapter
2660  * being brought up for the first time.
2661  */
2662 static void cfg_queues(struct adapter *adapter)
2663 {
2664 	struct sge *s = &adapter->sge;
2665 	int q10g, n10g, qidx, pidx, qs;
2666 	size_t iqe_size;
2667 
2668 	/*
2669 	 * We should not be called till we know how many Queue Sets we can
2670 	 * support.  In particular, this means that we need to know what kind
2671 	 * of interrupts we'll be using ...
2672 	 */
2673 	BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
2674 
2675 	/*
2676 	 * Count the number of 10GbE Virtual Interfaces that we have.
2677 	 */
2678 	n10g = 0;
2679 	for_each_port(adapter, pidx)
2680 		n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
2681 
2682 	/*
2683 	 * We default to 1 queue per non-10G port and up to # of cores queues
2684 	 * per 10G port.
2685 	 */
2686 	if (n10g == 0)
2687 		q10g = 0;
2688 	else {
2689 		int n1g = (adapter->params.nports - n10g);
2690 		q10g = (adapter->sge.max_ethqsets - n1g) / n10g;
2691 		if (q10g > num_online_cpus())
2692 			q10g = num_online_cpus();
2693 	}
2694 
2695 	/*
2696 	 * Allocate the "Queue Sets" to the various Virtual Interfaces.
2697 	 * The layout will be established in setup_sge_queues() when the
2698 	 * adapter is brough up for the first time.
2699 	 */
2700 	qidx = 0;
2701 	for_each_port(adapter, pidx) {
2702 		struct port_info *pi = adap2pinfo(adapter, pidx);
2703 
2704 		pi->first_qset = qidx;
2705 		pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
2706 		qidx += pi->nqsets;
2707 	}
2708 	s->ethqsets = qidx;
2709 
2710 	/*
2711 	 * The Ingress Queue Entry Size for our various Response Queues needs
2712 	 * to be big enough to accommodate the largest message we can receive
2713 	 * from the chip/firmware; which is 64 bytes ...
2714 	 */
2715 	iqe_size = 64;
2716 
2717 	/*
2718 	 * Set up default Queue Set parameters ...  Start off with the
2719 	 * shortest interrupt holdoff timer.
2720 	 */
2721 	for (qs = 0; qs < s->max_ethqsets; qs++) {
2722 		struct sge_eth_rxq *rxq = &s->ethrxq[qs];
2723 		struct sge_eth_txq *txq = &s->ethtxq[qs];
2724 
2725 		init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
2726 		rxq->fl.size = 72;
2727 		txq->q.size = 1024;
2728 	}
2729 
2730 	/*
2731 	 * The firmware event queue is used for link state changes and
2732 	 * notifications of TX DMA completions.
2733 	 */
2734 	init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size);
2735 
2736 	/*
2737 	 * The forwarded interrupt queue is used when we're in MSI interrupt
2738 	 * mode.  In this mode all interrupts associated with RX queues will
2739 	 * be forwarded to a single queue which we'll associate with our MSI
2740 	 * interrupt vector.  The messages dropped in the forwarded interrupt
2741 	 * queue will indicate which ingress queue needs servicing ...  This
2742 	 * queue needs to be large enough to accommodate all of the ingress
2743 	 * queues which are forwarding their interrupt (+1 to prevent the PIDX
2744 	 * from equalling the CIDX if every ingress queue has an outstanding
2745 	 * interrupt).  The queue doesn't need to be any larger because no
2746 	 * ingress queue will ever have more than one outstanding interrupt at
2747 	 * any time ...
2748 	 */
2749 	init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
2750 		  iqe_size);
2751 }
2752 
2753 /*
2754  * Reduce the number of Ethernet queues across all ports to at most n.
2755  * n provides at least one queue per port.
2756  */
2757 static void reduce_ethqs(struct adapter *adapter, int n)
2758 {
2759 	int i;
2760 	struct port_info *pi;
2761 
2762 	/*
2763 	 * While we have too many active Ether Queue Sets, interate across the
2764 	 * "ports" and reduce their individual Queue Set allocations.
2765 	 */
2766 	BUG_ON(n < adapter->params.nports);
2767 	while (n < adapter->sge.ethqsets)
2768 		for_each_port(adapter, i) {
2769 			pi = adap2pinfo(adapter, i);
2770 			if (pi->nqsets > 1) {
2771 				pi->nqsets--;
2772 				adapter->sge.ethqsets--;
2773 				if (adapter->sge.ethqsets <= n)
2774 					break;
2775 			}
2776 		}
2777 
2778 	/*
2779 	 * Reassign the starting Queue Sets for each of the "ports" ...
2780 	 */
2781 	n = 0;
2782 	for_each_port(adapter, i) {
2783 		pi = adap2pinfo(adapter, i);
2784 		pi->first_qset = n;
2785 		n += pi->nqsets;
2786 	}
2787 }
2788 
2789 /*
2790  * We need to grab enough MSI-X vectors to cover our interrupt needs.  Ideally
2791  * we get a separate MSI-X vector for every "Queue Set" plus any extras we
2792  * need.  Minimally we need one for every Virtual Interface plus those needed
2793  * for our "extras".  Note that this process may lower the maximum number of
2794  * allowed Queue Sets ...
2795  */
2796 static int enable_msix(struct adapter *adapter)
2797 {
2798 	int i, want, need, nqsets;
2799 	struct msix_entry entries[MSIX_ENTRIES];
2800 	struct sge *s = &adapter->sge;
2801 
2802 	for (i = 0; i < MSIX_ENTRIES; ++i)
2803 		entries[i].entry = i;
2804 
2805 	/*
2806 	 * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets"
2807 	 * plus those needed for our "extras" (for example, the firmware
2808 	 * message queue).  We _need_ at least one "Queue Set" per Virtual
2809 	 * Interface plus those needed for our "extras".  So now we get to see
2810 	 * if the song is right ...
2811 	 */
2812 	want = s->max_ethqsets + MSIX_EXTRAS;
2813 	need = adapter->params.nports + MSIX_EXTRAS;
2814 
2815 	want = pci_enable_msix_range(adapter->pdev, entries, need, want);
2816 	if (want < 0)
2817 		return want;
2818 
2819 	nqsets = want - MSIX_EXTRAS;
2820 	if (nqsets < s->max_ethqsets) {
2821 		dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
2822 			 " for %d Queue Sets\n", nqsets);
2823 		s->max_ethqsets = nqsets;
2824 		if (nqsets < s->ethqsets)
2825 			reduce_ethqs(adapter, nqsets);
2826 	}
2827 	for (i = 0; i < want; ++i)
2828 		adapter->msix_info[i].vec = entries[i].vector;
2829 
2830 	return 0;
2831 }
2832 
2833 static const struct net_device_ops cxgb4vf_netdev_ops	= {
2834 	.ndo_open		= cxgb4vf_open,
2835 	.ndo_stop		= cxgb4vf_stop,
2836 	.ndo_start_xmit		= t4vf_eth_xmit,
2837 	.ndo_get_stats		= cxgb4vf_get_stats,
2838 	.ndo_set_rx_mode	= cxgb4vf_set_rxmode,
2839 	.ndo_set_mac_address	= cxgb4vf_set_mac_addr,
2840 	.ndo_validate_addr	= eth_validate_addr,
2841 	.ndo_do_ioctl		= cxgb4vf_do_ioctl,
2842 	.ndo_change_mtu		= cxgb4vf_change_mtu,
2843 	.ndo_fix_features	= cxgb4vf_fix_features,
2844 	.ndo_set_features	= cxgb4vf_set_features,
2845 #ifdef CONFIG_NET_POLL_CONTROLLER
2846 	.ndo_poll_controller	= cxgb4vf_poll_controller,
2847 #endif
2848 };
2849 
2850 /*
2851  * "Probe" a device: initialize a device and construct all kernel and driver
2852  * state needed to manage the device.  This routine is called "init_one" in
2853  * the PF Driver ...
2854  */
2855 static int cxgb4vf_pci_probe(struct pci_dev *pdev,
2856 			     const struct pci_device_id *ent)
2857 {
2858 	int pci_using_dac;
2859 	int err, pidx;
2860 	unsigned int pmask;
2861 	struct adapter *adapter;
2862 	struct port_info *pi;
2863 	struct net_device *netdev;
2864 	unsigned int pf;
2865 
2866 	/*
2867 	 * Print our driver banner the first time we're called to initialize a
2868 	 * device.
2869 	 */
2870 	pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
2871 
2872 	/*
2873 	 * Initialize generic PCI device state.
2874 	 */
2875 	err = pci_enable_device(pdev);
2876 	if (err) {
2877 		dev_err(&pdev->dev, "cannot enable PCI device\n");
2878 		return err;
2879 	}
2880 
2881 	/*
2882 	 * Reserve PCI resources for the device.  If we can't get them some
2883 	 * other driver may have already claimed the device ...
2884 	 */
2885 	err = pci_request_regions(pdev, KBUILD_MODNAME);
2886 	if (err) {
2887 		dev_err(&pdev->dev, "cannot obtain PCI resources\n");
2888 		goto err_disable_device;
2889 	}
2890 
2891 	/*
2892 	 * Set up our DMA mask: try for 64-bit address masking first and
2893 	 * fall back to 32-bit if we can't get 64 bits ...
2894 	 */
2895 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2896 	if (err == 0) {
2897 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2898 		if (err) {
2899 			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for"
2900 				" coherent allocations\n");
2901 			goto err_release_regions;
2902 		}
2903 		pci_using_dac = 1;
2904 	} else {
2905 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2906 		if (err != 0) {
2907 			dev_err(&pdev->dev, "no usable DMA configuration\n");
2908 			goto err_release_regions;
2909 		}
2910 		pci_using_dac = 0;
2911 	}
2912 
2913 	/*
2914 	 * Enable bus mastering for the device ...
2915 	 */
2916 	pci_set_master(pdev);
2917 
2918 	/*
2919 	 * Allocate our adapter data structure and attach it to the device.
2920 	 */
2921 	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2922 	if (!adapter) {
2923 		err = -ENOMEM;
2924 		goto err_release_regions;
2925 	}
2926 	pci_set_drvdata(pdev, adapter);
2927 	adapter->pdev = pdev;
2928 	adapter->pdev_dev = &pdev->dev;
2929 
2930 	adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
2931 				    (sizeof(struct mbox_cmd) *
2932 				     T4VF_OS_LOG_MBOX_CMDS),
2933 				    GFP_KERNEL);
2934 	if (!adapter->mbox_log) {
2935 		err = -ENOMEM;
2936 		goto err_free_adapter;
2937 	}
2938 	adapter->mbox_log->size = T4VF_OS_LOG_MBOX_CMDS;
2939 
2940 	/*
2941 	 * Initialize SMP data synchronization resources.
2942 	 */
2943 	spin_lock_init(&adapter->stats_lock);
2944 	spin_lock_init(&adapter->mbox_lock);
2945 	INIT_LIST_HEAD(&adapter->mlist.list);
2946 
2947 	/*
2948 	 * Map our I/O registers in BAR0.
2949 	 */
2950 	adapter->regs = pci_ioremap_bar(pdev, 0);
2951 	if (!adapter->regs) {
2952 		dev_err(&pdev->dev, "cannot map device registers\n");
2953 		err = -ENOMEM;
2954 		goto err_free_adapter;
2955 	}
2956 
2957 	/* Wait for the device to become ready before proceeding ...
2958 	 */
2959 	err = t4vf_prep_adapter(adapter);
2960 	if (err) {
2961 		dev_err(adapter->pdev_dev, "device didn't become ready:"
2962 			" err=%d\n", err);
2963 		goto err_unmap_bar0;
2964 	}
2965 
2966 	/* For T5 and later we want to use the new BAR-based User Doorbells,
2967 	 * so we need to map BAR2 here ...
2968 	 */
2969 	if (!is_t4(adapter->params.chip)) {
2970 		adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
2971 					   pci_resource_len(pdev, 2));
2972 		if (!adapter->bar2) {
2973 			dev_err(adapter->pdev_dev, "cannot map BAR2 doorbells\n");
2974 			err = -ENOMEM;
2975 			goto err_unmap_bar0;
2976 		}
2977 	}
2978 	/*
2979 	 * Initialize adapter level features.
2980 	 */
2981 	adapter->name = pci_name(pdev);
2982 	adapter->msg_enable = DFLT_MSG_ENABLE;
2983 
2984 	/* If possible, we use PCIe Relaxed Ordering Attribute to deliver
2985 	 * Ingress Packet Data to Free List Buffers in order to allow for
2986 	 * chipset performance optimizations between the Root Complex and
2987 	 * Memory Controllers.  (Messages to the associated Ingress Queue
2988 	 * notifying new Packet Placement in the Free Lists Buffers will be
2989 	 * send without the Relaxed Ordering Attribute thus guaranteeing that
2990 	 * all preceding PCIe Transaction Layer Packets will be processed
2991 	 * first.)  But some Root Complexes have various issues with Upstream
2992 	 * Transaction Layer Packets with the Relaxed Ordering Attribute set.
2993 	 * The PCIe devices which under the Root Complexes will be cleared the
2994 	 * Relaxed Ordering bit in the configuration space, So we check our
2995 	 * PCIe configuration space to see if it's flagged with advice against
2996 	 * using Relaxed Ordering.
2997 	 */
2998 	if (!pcie_relaxed_ordering_enabled(pdev))
2999 		adapter->flags |= ROOT_NO_RELAXED_ORDERING;
3000 
3001 	err = adap_init0(adapter);
3002 	if (err)
3003 		goto err_unmap_bar;
3004 
3005 	/*
3006 	 * Allocate our "adapter ports" and stitch everything together.
3007 	 */
3008 	pmask = adapter->params.vfres.pmask;
3009 	pf = t4vf_get_pf_from_vf(adapter);
3010 	for_each_port(adapter, pidx) {
3011 		int port_id, viid;
3012 		u8 mac[ETH_ALEN];
3013 		unsigned int naddr = 1;
3014 
3015 		/*
3016 		 * We simplistically allocate our virtual interfaces
3017 		 * sequentially across the port numbers to which we have
3018 		 * access rights.  This should be configurable in some manner
3019 		 * ...
3020 		 */
3021 		if (pmask == 0)
3022 			break;
3023 		port_id = ffs(pmask) - 1;
3024 		pmask &= ~(1 << port_id);
3025 		viid = t4vf_alloc_vi(adapter, port_id);
3026 		if (viid < 0) {
3027 			dev_err(&pdev->dev, "cannot allocate VI for port %d:"
3028 				" err=%d\n", port_id, viid);
3029 			err = viid;
3030 			goto err_free_dev;
3031 		}
3032 
3033 		/*
3034 		 * Allocate our network device and stitch things together.
3035 		 */
3036 		netdev = alloc_etherdev_mq(sizeof(struct port_info),
3037 					   MAX_PORT_QSETS);
3038 		if (netdev == NULL) {
3039 			t4vf_free_vi(adapter, viid);
3040 			err = -ENOMEM;
3041 			goto err_free_dev;
3042 		}
3043 		adapter->port[pidx] = netdev;
3044 		SET_NETDEV_DEV(netdev, &pdev->dev);
3045 		pi = netdev_priv(netdev);
3046 		pi->adapter = adapter;
3047 		pi->pidx = pidx;
3048 		pi->port_id = port_id;
3049 		pi->viid = viid;
3050 
3051 		/*
3052 		 * Initialize the starting state of our "port" and register
3053 		 * it.
3054 		 */
3055 		pi->xact_addr_filt = -1;
3056 		netif_carrier_off(netdev);
3057 		netdev->irq = pdev->irq;
3058 
3059 		netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
3060 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3061 			NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
3062 		netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
3063 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3064 			NETIF_F_HIGHDMA;
3065 		netdev->features = netdev->hw_features |
3066 				   NETIF_F_HW_VLAN_CTAG_TX;
3067 		if (pci_using_dac)
3068 			netdev->features |= NETIF_F_HIGHDMA;
3069 
3070 		netdev->priv_flags |= IFF_UNICAST_FLT;
3071 		netdev->min_mtu = 81;
3072 		netdev->max_mtu = ETH_MAX_MTU;
3073 
3074 		netdev->netdev_ops = &cxgb4vf_netdev_ops;
3075 		netdev->ethtool_ops = &cxgb4vf_ethtool_ops;
3076 		netdev->dev_port = pi->port_id;
3077 
3078 		/*
3079 		 * Initialize the hardware/software state for the port.
3080 		 */
3081 		err = t4vf_port_init(adapter, pidx);
3082 		if (err) {
3083 			dev_err(&pdev->dev, "cannot initialize port %d\n",
3084 				pidx);
3085 			goto err_free_dev;
3086 		}
3087 
3088 		err = t4vf_get_vf_mac_acl(adapter, pf, &naddr, mac);
3089 		if (err) {
3090 			dev_err(&pdev->dev,
3091 				"unable to determine MAC ACL address, "
3092 				"continuing anyway.. (status %d)\n", err);
3093 		} else if (naddr && adapter->params.vfres.nvi == 1) {
3094 			struct sockaddr addr;
3095 
3096 			ether_addr_copy(addr.sa_data, mac);
3097 			err = cxgb4vf_set_mac_addr(netdev, &addr);
3098 			if (err) {
3099 				dev_err(&pdev->dev,
3100 					"unable to set MAC address %pM\n",
3101 					mac);
3102 				goto err_free_dev;
3103 			}
3104 			dev_info(&pdev->dev,
3105 				 "Using assigned MAC ACL: %pM\n", mac);
3106 		}
3107 	}
3108 
3109 	/* See what interrupts we'll be using.  If we've been configured to
3110 	 * use MSI-X interrupts, try to enable them but fall back to using
3111 	 * MSI interrupts if we can't enable MSI-X interrupts.  If we can't
3112 	 * get MSI interrupts we bail with the error.
3113 	 */
3114 	if (msi == MSI_MSIX && enable_msix(adapter) == 0)
3115 		adapter->flags |= USING_MSIX;
3116 	else {
3117 		if (msi == MSI_MSIX) {
3118 			dev_info(adapter->pdev_dev,
3119 				 "Unable to use MSI-X Interrupts; falling "
3120 				 "back to MSI Interrupts\n");
3121 
3122 			/* We're going to need a Forwarded Interrupt Queue so
3123 			 * that may cut into how many Queue Sets we can
3124 			 * support.
3125 			 */
3126 			msi = MSI_MSI;
3127 			size_nports_qsets(adapter);
3128 		}
3129 		err = pci_enable_msi(pdev);
3130 		if (err) {
3131 			dev_err(&pdev->dev, "Unable to allocate MSI Interrupts;"
3132 				" err=%d\n", err);
3133 			goto err_free_dev;
3134 		}
3135 		adapter->flags |= USING_MSI;
3136 	}
3137 
3138 	/* Now that we know how many "ports" we have and what interrupt
3139 	 * mechanism we're going to use, we can configure our queue resources.
3140 	 */
3141 	cfg_queues(adapter);
3142 
3143 	/*
3144 	 * The "card" is now ready to go.  If any errors occur during device
3145 	 * registration we do not fail the whole "card" but rather proceed
3146 	 * only with the ports we manage to register successfully.  However we
3147 	 * must register at least one net device.
3148 	 */
3149 	for_each_port(adapter, pidx) {
3150 		struct port_info *pi = netdev_priv(adapter->port[pidx]);
3151 		netdev = adapter->port[pidx];
3152 		if (netdev == NULL)
3153 			continue;
3154 
3155 		netif_set_real_num_tx_queues(netdev, pi->nqsets);
3156 		netif_set_real_num_rx_queues(netdev, pi->nqsets);
3157 
3158 		err = register_netdev(netdev);
3159 		if (err) {
3160 			dev_warn(&pdev->dev, "cannot register net device %s,"
3161 				 " skipping\n", netdev->name);
3162 			continue;
3163 		}
3164 
3165 		set_bit(pidx, &adapter->registered_device_map);
3166 	}
3167 	if (adapter->registered_device_map == 0) {
3168 		dev_err(&pdev->dev, "could not register any net devices\n");
3169 		goto err_disable_interrupts;
3170 	}
3171 
3172 	/*
3173 	 * Set up our debugfs entries.
3174 	 */
3175 	if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
3176 		adapter->debugfs_root =
3177 			debugfs_create_dir(pci_name(pdev),
3178 					   cxgb4vf_debugfs_root);
3179 		if (IS_ERR_OR_NULL(adapter->debugfs_root))
3180 			dev_warn(&pdev->dev, "could not create debugfs"
3181 				 " directory");
3182 		else
3183 			setup_debugfs(adapter);
3184 	}
3185 
3186 	/*
3187 	 * Print a short notice on the existence and configuration of the new
3188 	 * VF network device ...
3189 	 */
3190 	for_each_port(adapter, pidx) {
3191 		dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n",
3192 			 adapter->port[pidx]->name,
3193 			 (adapter->flags & USING_MSIX) ? "MSI-X" :
3194 			 (adapter->flags & USING_MSI)  ? "MSI" : "");
3195 	}
3196 
3197 	/*
3198 	 * Return success!
3199 	 */
3200 	return 0;
3201 
3202 	/*
3203 	 * Error recovery and exit code.  Unwind state that's been created
3204 	 * so far and return the error.
3205 	 */
3206 err_disable_interrupts:
3207 	if (adapter->flags & USING_MSIX) {
3208 		pci_disable_msix(adapter->pdev);
3209 		adapter->flags &= ~USING_MSIX;
3210 	} else if (adapter->flags & USING_MSI) {
3211 		pci_disable_msi(adapter->pdev);
3212 		adapter->flags &= ~USING_MSI;
3213 	}
3214 
3215 err_free_dev:
3216 	for_each_port(adapter, pidx) {
3217 		netdev = adapter->port[pidx];
3218 		if (netdev == NULL)
3219 			continue;
3220 		pi = netdev_priv(netdev);
3221 		t4vf_free_vi(adapter, pi->viid);
3222 		if (test_bit(pidx, &adapter->registered_device_map))
3223 			unregister_netdev(netdev);
3224 		free_netdev(netdev);
3225 	}
3226 
3227 err_unmap_bar:
3228 	if (!is_t4(adapter->params.chip))
3229 		iounmap(adapter->bar2);
3230 
3231 err_unmap_bar0:
3232 	iounmap(adapter->regs);
3233 
3234 err_free_adapter:
3235 	kfree(adapter->mbox_log);
3236 	kfree(adapter);
3237 
3238 err_release_regions:
3239 	pci_release_regions(pdev);
3240 	pci_clear_master(pdev);
3241 
3242 err_disable_device:
3243 	pci_disable_device(pdev);
3244 
3245 	return err;
3246 }
3247 
3248 /*
3249  * "Remove" a device: tear down all kernel and driver state created in the
3250  * "probe" routine and quiesce the device (disable interrupts, etc.).  (Note
3251  * that this is called "remove_one" in the PF Driver.)
3252  */
3253 static void cxgb4vf_pci_remove(struct pci_dev *pdev)
3254 {
3255 	struct adapter *adapter = pci_get_drvdata(pdev);
3256 
3257 	/*
3258 	 * Tear down driver state associated with device.
3259 	 */
3260 	if (adapter) {
3261 		int pidx;
3262 
3263 		/*
3264 		 * Stop all of our activity.  Unregister network port,
3265 		 * disable interrupts, etc.
3266 		 */
3267 		for_each_port(adapter, pidx)
3268 			if (test_bit(pidx, &adapter->registered_device_map))
3269 				unregister_netdev(adapter->port[pidx]);
3270 		t4vf_sge_stop(adapter);
3271 		if (adapter->flags & USING_MSIX) {
3272 			pci_disable_msix(adapter->pdev);
3273 			adapter->flags &= ~USING_MSIX;
3274 		} else if (adapter->flags & USING_MSI) {
3275 			pci_disable_msi(adapter->pdev);
3276 			adapter->flags &= ~USING_MSI;
3277 		}
3278 
3279 		/*
3280 		 * Tear down our debugfs entries.
3281 		 */
3282 		if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
3283 			cleanup_debugfs(adapter);
3284 			debugfs_remove_recursive(adapter->debugfs_root);
3285 		}
3286 
3287 		/*
3288 		 * Free all of the various resources which we've acquired ...
3289 		 */
3290 		t4vf_free_sge_resources(adapter);
3291 		for_each_port(adapter, pidx) {
3292 			struct net_device *netdev = adapter->port[pidx];
3293 			struct port_info *pi;
3294 
3295 			if (netdev == NULL)
3296 				continue;
3297 
3298 			pi = netdev_priv(netdev);
3299 			t4vf_free_vi(adapter, pi->viid);
3300 			free_netdev(netdev);
3301 		}
3302 		iounmap(adapter->regs);
3303 		if (!is_t4(adapter->params.chip))
3304 			iounmap(adapter->bar2);
3305 		kfree(adapter->mbox_log);
3306 		kfree(adapter);
3307 	}
3308 
3309 	/*
3310 	 * Disable the device and release its PCI resources.
3311 	 */
3312 	pci_disable_device(pdev);
3313 	pci_clear_master(pdev);
3314 	pci_release_regions(pdev);
3315 }
3316 
3317 /*
3318  * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
3319  * delivery.
3320  */
3321 static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
3322 {
3323 	struct adapter *adapter;
3324 	int pidx;
3325 
3326 	adapter = pci_get_drvdata(pdev);
3327 	if (!adapter)
3328 		return;
3329 
3330 	/* Disable all Virtual Interfaces.  This will shut down the
3331 	 * delivery of all ingress packets into the chip for these
3332 	 * Virtual Interfaces.
3333 	 */
3334 	for_each_port(adapter, pidx)
3335 		if (test_bit(pidx, &adapter->registered_device_map))
3336 			unregister_netdev(adapter->port[pidx]);
3337 
3338 	/* Free up all Queues which will prevent further DMA and
3339 	 * Interrupts allowing various internal pathways to drain.
3340 	 */
3341 	t4vf_sge_stop(adapter);
3342 	if (adapter->flags & USING_MSIX) {
3343 		pci_disable_msix(adapter->pdev);
3344 		adapter->flags &= ~USING_MSIX;
3345 	} else if (adapter->flags & USING_MSI) {
3346 		pci_disable_msi(adapter->pdev);
3347 		adapter->flags &= ~USING_MSI;
3348 	}
3349 
3350 	/*
3351 	 * Free up all Queues which will prevent further DMA and
3352 	 * Interrupts allowing various internal pathways to drain.
3353 	 */
3354 	t4vf_free_sge_resources(adapter);
3355 	pci_set_drvdata(pdev, NULL);
3356 }
3357 
3358 /* Macros needed to support the PCI Device ID Table ...
3359  */
3360 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
3361 	static const struct pci_device_id cxgb4vf_pci_tbl[] = {
3362 #define CH_PCI_DEVICE_ID_FUNCTION	0x8
3363 
3364 #define CH_PCI_ID_TABLE_ENTRY(devid) \
3365 		{ PCI_VDEVICE(CHELSIO, (devid)), 0 }
3366 
3367 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } }
3368 
3369 #include "../cxgb4/t4_pci_id_tbl.h"
3370 
3371 MODULE_DESCRIPTION(DRV_DESC);
3372 MODULE_AUTHOR("Chelsio Communications");
3373 MODULE_LICENSE("Dual BSD/GPL");
3374 MODULE_VERSION(DRV_VERSION);
3375 MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl);
3376 
3377 static struct pci_driver cxgb4vf_driver = {
3378 	.name		= KBUILD_MODNAME,
3379 	.id_table	= cxgb4vf_pci_tbl,
3380 	.probe		= cxgb4vf_pci_probe,
3381 	.remove		= cxgb4vf_pci_remove,
3382 	.shutdown	= cxgb4vf_pci_shutdown,
3383 };
3384 
3385 /*
3386  * Initialize global driver state.
3387  */
3388 static int __init cxgb4vf_module_init(void)
3389 {
3390 	int ret;
3391 
3392 	/*
3393 	 * Vet our module parameters.
3394 	 */
3395 	if (msi != MSI_MSIX && msi != MSI_MSI) {
3396 		pr_warn("bad module parameter msi=%d; must be %d (MSI-X or MSI) or %d (MSI)\n",
3397 			msi, MSI_MSIX, MSI_MSI);
3398 		return -EINVAL;
3399 	}
3400 
3401 	/* Debugfs support is optional, just warn if this fails */
3402 	cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
3403 	if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
3404 		pr_warn("could not create debugfs entry, continuing\n");
3405 
3406 	ret = pci_register_driver(&cxgb4vf_driver);
3407 	if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
3408 		debugfs_remove(cxgb4vf_debugfs_root);
3409 	return ret;
3410 }
3411 
3412 /*
3413  * Tear down global driver state.
3414  */
3415 static void __exit cxgb4vf_module_exit(void)
3416 {
3417 	pci_unregister_driver(&cxgb4vf_driver);
3418 	debugfs_remove(cxgb4vf_debugfs_root);
3419 }
3420 
3421 module_init(cxgb4vf_module_init);
3422 module_exit(cxgb4vf_module_exit);
3423