1 /*
2  * Copyright (C) 2015 Cavium, Inc.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of version 2 of the GNU General Public License
6  * as published by the Free Software Foundation.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/if_vlan.h>
14 #include <linux/etherdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/log2.h>
17 #include <linux/prefetch.h>
18 #include <linux/irq.h>
19 
20 #include "nic_reg.h"
21 #include "nic.h"
22 #include "nicvf_queues.h"
23 #include "thunder_bgx.h"
24 
25 #define DRV_NAME	"thunder-nicvf"
26 #define DRV_VERSION	"1.0"
27 
28 /* Supported devices */
29 static const struct pci_device_id nicvf_id_table[] = {
30 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
31 			 PCI_DEVICE_ID_THUNDER_NIC_VF,
32 			 PCI_VENDOR_ID_CAVIUM, 0xA134) },
33 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
34 			 PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
35 			 PCI_VENDOR_ID_CAVIUM, 0xA11E) },
36 	{ 0, }  /* end of table */
37 };
38 
39 MODULE_AUTHOR("Sunil Goutham");
40 MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver");
41 MODULE_LICENSE("GPL v2");
42 MODULE_VERSION(DRV_VERSION);
43 MODULE_DEVICE_TABLE(pci, nicvf_id_table);
44 
45 static int debug = 0x00;
46 module_param(debug, int, 0644);
47 MODULE_PARM_DESC(debug, "Debug message level bitmap");
48 
49 static int cpi_alg = CPI_ALG_NONE;
50 module_param(cpi_alg, int, S_IRUGO);
51 MODULE_PARM_DESC(cpi_alg,
52 		 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
53 
54 static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
55 {
56 	if (nic->sqs_mode)
57 		return qidx + ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS);
58 	else
59 		return qidx;
60 }
61 
62 static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
63 					  struct sk_buff *skb)
64 {
65 	if (skb->len <= 64)
66 		nic->drv_stats.rx_frames_64++;
67 	else if (skb->len <= 127)
68 		nic->drv_stats.rx_frames_127++;
69 	else if (skb->len <= 255)
70 		nic->drv_stats.rx_frames_255++;
71 	else if (skb->len <= 511)
72 		nic->drv_stats.rx_frames_511++;
73 	else if (skb->len <= 1023)
74 		nic->drv_stats.rx_frames_1023++;
75 	else if (skb->len <= 1518)
76 		nic->drv_stats.rx_frames_1518++;
77 	else
78 		nic->drv_stats.rx_frames_jumbo++;
79 }
80 
81 /* The Cavium ThunderX network controller can *only* be found in SoCs
82  * containing the ThunderX ARM64 CPU implementation.  All accesses to the device
83  * registers on this platform are implicitly strongly ordered with respect
84  * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
85  * with no memory barriers in this driver.  The readq()/writeq() functions add
86  * explicit ordering operation which in this case are redundant, and only
87  * add overhead.
88  */
89 
90 /* Register read/write APIs */
91 void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val)
92 {
93 	writeq_relaxed(val, nic->reg_base + offset);
94 }
95 
96 u64 nicvf_reg_read(struct nicvf *nic, u64 offset)
97 {
98 	return readq_relaxed(nic->reg_base + offset);
99 }
100 
101 void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
102 			   u64 qidx, u64 val)
103 {
104 	void __iomem *addr = nic->reg_base + offset;
105 
106 	writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT));
107 }
108 
109 u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
110 {
111 	void __iomem *addr = nic->reg_base + offset;
112 
113 	return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT));
114 }
115 
116 /* VF -> PF mailbox communication */
117 static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
118 {
119 	u64 *msg = (u64 *)mbx;
120 
121 	nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
122 	nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
123 }
124 
125 int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
126 {
127 	int timeout = NIC_MBOX_MSG_TIMEOUT;
128 	int sleep = 10;
129 
130 	nic->pf_acked = false;
131 	nic->pf_nacked = false;
132 
133 	nicvf_write_to_mbx(nic, mbx);
134 
135 	/* Wait for previous message to be acked, timeout 2sec */
136 	while (!nic->pf_acked) {
137 		if (nic->pf_nacked)
138 			return -EINVAL;
139 		msleep(sleep);
140 		if (nic->pf_acked)
141 			break;
142 		timeout -= sleep;
143 		if (!timeout) {
144 			netdev_err(nic->netdev,
145 				   "PF didn't ack to mbox msg %d from VF%d\n",
146 				   (mbx->msg.msg & 0xFF), nic->vf_id);
147 			return -EBUSY;
148 		}
149 	}
150 	return 0;
151 }
152 
153 /* Checks if VF is able to comminicate with PF
154 * and also gets the VNIC number this VF is associated to.
155 */
156 static int nicvf_check_pf_ready(struct nicvf *nic)
157 {
158 	union nic_mbx mbx = {};
159 
160 	mbx.msg.msg = NIC_MBOX_MSG_READY;
161 	if (nicvf_send_msg_to_pf(nic, &mbx)) {
162 		netdev_err(nic->netdev,
163 			   "PF didn't respond to READY msg\n");
164 		return 0;
165 	}
166 
167 	return 1;
168 }
169 
170 static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
171 {
172 	if (bgx->rx)
173 		nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
174 	else
175 		nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
176 }
177 
178 static void  nicvf_handle_mbx_intr(struct nicvf *nic)
179 {
180 	union nic_mbx mbx = {};
181 	u64 *mbx_data;
182 	u64 mbx_addr;
183 	int i;
184 
185 	mbx_addr = NIC_VF_PF_MAILBOX_0_1;
186 	mbx_data = (u64 *)&mbx;
187 
188 	for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
189 		*mbx_data = nicvf_reg_read(nic, mbx_addr);
190 		mbx_data++;
191 		mbx_addr += sizeof(u64);
192 	}
193 
194 	netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg);
195 	switch (mbx.msg.msg) {
196 	case NIC_MBOX_MSG_READY:
197 		nic->pf_acked = true;
198 		nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
199 		nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
200 		nic->node = mbx.nic_cfg.node_id;
201 		if (!nic->set_mac_pending)
202 			ether_addr_copy(nic->netdev->dev_addr,
203 					mbx.nic_cfg.mac_addr);
204 		nic->sqs_mode = mbx.nic_cfg.sqs_mode;
205 		nic->loopback_supported = mbx.nic_cfg.loopback_supported;
206 		nic->link_up = false;
207 		nic->duplex = 0;
208 		nic->speed = 0;
209 		break;
210 	case NIC_MBOX_MSG_ACK:
211 		nic->pf_acked = true;
212 		break;
213 	case NIC_MBOX_MSG_NACK:
214 		nic->pf_nacked = true;
215 		break;
216 	case NIC_MBOX_MSG_RSS_SIZE:
217 		nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
218 		nic->pf_acked = true;
219 		break;
220 	case NIC_MBOX_MSG_BGX_STATS:
221 		nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
222 		nic->pf_acked = true;
223 		break;
224 	case NIC_MBOX_MSG_BGX_LINK_CHANGE:
225 		nic->pf_acked = true;
226 		nic->link_up = mbx.link_status.link_up;
227 		nic->duplex = mbx.link_status.duplex;
228 		nic->speed = mbx.link_status.speed;
229 		if (nic->link_up) {
230 			netdev_info(nic->netdev, "%s: Link is Up %d Mbps %s\n",
231 				    nic->netdev->name, nic->speed,
232 				    nic->duplex == DUPLEX_FULL ?
233 				"Full duplex" : "Half duplex");
234 			netif_carrier_on(nic->netdev);
235 			netif_tx_start_all_queues(nic->netdev);
236 		} else {
237 			netdev_info(nic->netdev, "%s: Link is Down\n",
238 				    nic->netdev->name);
239 			netif_carrier_off(nic->netdev);
240 			netif_tx_stop_all_queues(nic->netdev);
241 		}
242 		break;
243 	case NIC_MBOX_MSG_ALLOC_SQS:
244 		nic->sqs_count = mbx.sqs_alloc.qs_count;
245 		nic->pf_acked = true;
246 		break;
247 	case NIC_MBOX_MSG_SNICVF_PTR:
248 		/* Primary VF: make note of secondary VF's pointer
249 		 * to be used while packet transmission.
250 		 */
251 		nic->snicvf[mbx.nicvf.sqs_id] =
252 			(struct nicvf *)mbx.nicvf.nicvf;
253 		nic->pf_acked = true;
254 		break;
255 	case NIC_MBOX_MSG_PNICVF_PTR:
256 		/* Secondary VF/Qset: make note of primary VF's pointer
257 		 * to be used while packet reception, to handover packet
258 		 * to primary VF's netdev.
259 		 */
260 		nic->pnicvf = (struct nicvf *)mbx.nicvf.nicvf;
261 		nic->pf_acked = true;
262 		break;
263 	default:
264 		netdev_err(nic->netdev,
265 			   "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
266 		break;
267 	}
268 	nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
269 }
270 
271 static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev)
272 {
273 	union nic_mbx mbx = {};
274 
275 	mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
276 	mbx.mac.vf_id = nic->vf_id;
277 	ether_addr_copy(mbx.mac.mac_addr, netdev->dev_addr);
278 
279 	return nicvf_send_msg_to_pf(nic, &mbx);
280 }
281 
282 static void nicvf_config_cpi(struct nicvf *nic)
283 {
284 	union nic_mbx mbx = {};
285 
286 	mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
287 	mbx.cpi_cfg.vf_id = nic->vf_id;
288 	mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
289 	mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
290 
291 	nicvf_send_msg_to_pf(nic, &mbx);
292 }
293 
294 static void nicvf_get_rss_size(struct nicvf *nic)
295 {
296 	union nic_mbx mbx = {};
297 
298 	mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
299 	mbx.rss_size.vf_id = nic->vf_id;
300 	nicvf_send_msg_to_pf(nic, &mbx);
301 }
302 
303 void nicvf_config_rss(struct nicvf *nic)
304 {
305 	union nic_mbx mbx = {};
306 	struct nicvf_rss_info *rss = &nic->rss_info;
307 	int ind_tbl_len = rss->rss_size;
308 	int i, nextq = 0;
309 
310 	mbx.rss_cfg.vf_id = nic->vf_id;
311 	mbx.rss_cfg.hash_bits = rss->hash_bits;
312 	while (ind_tbl_len) {
313 		mbx.rss_cfg.tbl_offset = nextq;
314 		mbx.rss_cfg.tbl_len = min(ind_tbl_len,
315 					       RSS_IND_TBL_LEN_PER_MBX_MSG);
316 		mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ?
317 			  NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
318 
319 		for (i = 0; i < mbx.rss_cfg.tbl_len; i++)
320 			mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++];
321 
322 		nicvf_send_msg_to_pf(nic, &mbx);
323 
324 		ind_tbl_len -= mbx.rss_cfg.tbl_len;
325 	}
326 }
327 
328 void nicvf_set_rss_key(struct nicvf *nic)
329 {
330 	struct nicvf_rss_info *rss = &nic->rss_info;
331 	u64 key_addr = NIC_VNIC_RSS_KEY_0_4;
332 	int idx;
333 
334 	for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
335 		nicvf_reg_write(nic, key_addr, rss->key[idx]);
336 		key_addr += sizeof(u64);
337 	}
338 }
339 
340 static int nicvf_rss_init(struct nicvf *nic)
341 {
342 	struct nicvf_rss_info *rss = &nic->rss_info;
343 	int idx;
344 
345 	nicvf_get_rss_size(nic);
346 
347 	if (cpi_alg != CPI_ALG_NONE) {
348 		rss->enable = false;
349 		rss->hash_bits = 0;
350 		return 0;
351 	}
352 
353 	rss->enable = true;
354 
355 	/* Using the HW reset value for now */
356 	rss->key[0] = 0xFEED0BADFEED0BADULL;
357 	rss->key[1] = 0xFEED0BADFEED0BADULL;
358 	rss->key[2] = 0xFEED0BADFEED0BADULL;
359 	rss->key[3] = 0xFEED0BADFEED0BADULL;
360 	rss->key[4] = 0xFEED0BADFEED0BADULL;
361 
362 	nicvf_set_rss_key(nic);
363 
364 	rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
365 	nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg);
366 
367 	rss->hash_bits =  ilog2(rounddown_pow_of_two(rss->rss_size));
368 
369 	for (idx = 0; idx < rss->rss_size; idx++)
370 		rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx,
371 							       nic->rx_queues);
372 	nicvf_config_rss(nic);
373 	return 1;
374 }
375 
376 /* Request PF to allocate additional Qsets */
377 static void nicvf_request_sqs(struct nicvf *nic)
378 {
379 	union nic_mbx mbx = {};
380 	int sqs;
381 	int sqs_count = nic->sqs_count;
382 	int rx_queues = 0, tx_queues = 0;
383 
384 	/* Only primary VF should request */
385 	if (nic->sqs_mode ||  !nic->sqs_count)
386 		return;
387 
388 	mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
389 	mbx.sqs_alloc.vf_id = nic->vf_id;
390 	mbx.sqs_alloc.qs_count = nic->sqs_count;
391 	if (nicvf_send_msg_to_pf(nic, &mbx)) {
392 		/* No response from PF */
393 		nic->sqs_count = 0;
394 		return;
395 	}
396 
397 	/* Return if no Secondary Qsets available */
398 	if (!nic->sqs_count)
399 		return;
400 
401 	if (nic->rx_queues > MAX_RCV_QUEUES_PER_QS)
402 		rx_queues = nic->rx_queues - MAX_RCV_QUEUES_PER_QS;
403 	if (nic->tx_queues > MAX_SND_QUEUES_PER_QS)
404 		tx_queues = nic->tx_queues - MAX_SND_QUEUES_PER_QS;
405 
406 	/* Set no of Rx/Tx queues in each of the SQsets */
407 	for (sqs = 0; sqs < nic->sqs_count; sqs++) {
408 		mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
409 		mbx.nicvf.vf_id = nic->vf_id;
410 		mbx.nicvf.sqs_id = sqs;
411 		nicvf_send_msg_to_pf(nic, &mbx);
412 
413 		nic->snicvf[sqs]->sqs_id = sqs;
414 		if (rx_queues > MAX_RCV_QUEUES_PER_QS) {
415 			nic->snicvf[sqs]->qs->rq_cnt = MAX_RCV_QUEUES_PER_QS;
416 			rx_queues -= MAX_RCV_QUEUES_PER_QS;
417 		} else {
418 			nic->snicvf[sqs]->qs->rq_cnt = rx_queues;
419 			rx_queues = 0;
420 		}
421 
422 		if (tx_queues > MAX_SND_QUEUES_PER_QS) {
423 			nic->snicvf[sqs]->qs->sq_cnt = MAX_SND_QUEUES_PER_QS;
424 			tx_queues -= MAX_SND_QUEUES_PER_QS;
425 		} else {
426 			nic->snicvf[sqs]->qs->sq_cnt = tx_queues;
427 			tx_queues = 0;
428 		}
429 
430 		nic->snicvf[sqs]->qs->cq_cnt =
431 		max(nic->snicvf[sqs]->qs->rq_cnt, nic->snicvf[sqs]->qs->sq_cnt);
432 
433 		/* Initialize secondary Qset's queues and its interrupts */
434 		nicvf_open(nic->snicvf[sqs]->netdev);
435 	}
436 
437 	/* Update stack with actual Rx/Tx queue count allocated */
438 	if (sqs_count != nic->sqs_count)
439 		nicvf_set_real_num_queues(nic->netdev,
440 					  nic->tx_queues, nic->rx_queues);
441 }
442 
443 /* Send this Qset's nicvf pointer to PF.
444  * PF inturn sends primary VF's nicvf struct to secondary Qsets/VFs
445  * so that packets received by these Qsets can use primary VF's netdev
446  */
447 static void nicvf_send_vf_struct(struct nicvf *nic)
448 {
449 	union nic_mbx mbx = {};
450 
451 	mbx.nicvf.msg = NIC_MBOX_MSG_NICVF_PTR;
452 	mbx.nicvf.sqs_mode = nic->sqs_mode;
453 	mbx.nicvf.nicvf = (u64)nic;
454 	nicvf_send_msg_to_pf(nic, &mbx);
455 }
456 
457 static void nicvf_get_primary_vf_struct(struct nicvf *nic)
458 {
459 	union nic_mbx mbx = {};
460 
461 	mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
462 	nicvf_send_msg_to_pf(nic, &mbx);
463 }
464 
465 int nicvf_set_real_num_queues(struct net_device *netdev,
466 			      int tx_queues, int rx_queues)
467 {
468 	int err = 0;
469 
470 	err = netif_set_real_num_tx_queues(netdev, tx_queues);
471 	if (err) {
472 		netdev_err(netdev,
473 			   "Failed to set no of Tx queues: %d\n", tx_queues);
474 		return err;
475 	}
476 
477 	err = netif_set_real_num_rx_queues(netdev, rx_queues);
478 	if (err)
479 		netdev_err(netdev,
480 			   "Failed to set no of Rx queues: %d\n", rx_queues);
481 	return err;
482 }
483 
484 static int nicvf_init_resources(struct nicvf *nic)
485 {
486 	int err;
487 	union nic_mbx mbx = {};
488 
489 	mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
490 
491 	/* Enable Qset */
492 	nicvf_qset_config(nic, true);
493 
494 	/* Initialize queues and HW for data transfer */
495 	err = nicvf_config_data_transfer(nic, true);
496 	if (err) {
497 		netdev_err(nic->netdev,
498 			   "Failed to alloc/config VF's QSet resources\n");
499 		return err;
500 	}
501 
502 	/* Send VF config done msg to PF */
503 	nicvf_write_to_mbx(nic, &mbx);
504 
505 	return 0;
506 }
507 
508 static void nicvf_snd_pkt_handler(struct net_device *netdev,
509 				  struct cmp_queue *cq,
510 				  struct cqe_send_t *cqe_tx, int cqe_type)
511 {
512 	struct sk_buff *skb = NULL;
513 	struct nicvf *nic = netdev_priv(netdev);
514 	struct snd_queue *sq;
515 	struct sq_hdr_subdesc *hdr;
516 
517 	sq = &nic->qs->sq[cqe_tx->sq_idx];
518 
519 	hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
520 	if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
521 		return;
522 
523 	netdev_dbg(nic->netdev,
524 		   "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
525 		   __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
526 		   cqe_tx->sqe_ptr, hdr->subdesc_cnt);
527 
528 	nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
529 	skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
530 	/* For TSO offloaded packets only one SQE will have a valid SKB */
531 	if (skb) {
532 		nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
533 		prefetch(skb);
534 		dev_consume_skb_any(skb);
535 		sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
536 	} else {
537 		/* In case of HW TSO, HW sends a CQE for each segment of a TSO
538 		 * packet instead of a single CQE for the whole TSO packet
539 		 * transmitted. Each of this CQE points to the same SQE, so
540 		 * avoid freeing same SQE multiple times.
541 		 */
542 		if (!nic->hw_tso)
543 			nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
544 	}
545 }
546 
547 static inline void nicvf_set_rxhash(struct net_device *netdev,
548 				    struct cqe_rx_t *cqe_rx,
549 				    struct sk_buff *skb)
550 {
551 	u8 hash_type;
552 	u32 hash;
553 
554 	if (!(netdev->features & NETIF_F_RXHASH))
555 		return;
556 
557 	switch (cqe_rx->rss_alg) {
558 	case RSS_ALG_TCP_IP:
559 	case RSS_ALG_UDP_IP:
560 		hash_type = PKT_HASH_TYPE_L4;
561 		hash = cqe_rx->rss_tag;
562 		break;
563 	case RSS_ALG_IP:
564 		hash_type = PKT_HASH_TYPE_L3;
565 		hash = cqe_rx->rss_tag;
566 		break;
567 	default:
568 		hash_type = PKT_HASH_TYPE_NONE;
569 		hash = 0;
570 	}
571 
572 	skb_set_hash(skb, hash, hash_type);
573 }
574 
575 static void nicvf_rcv_pkt_handler(struct net_device *netdev,
576 				  struct napi_struct *napi,
577 				  struct cmp_queue *cq,
578 				  struct cqe_rx_t *cqe_rx, int cqe_type)
579 {
580 	struct sk_buff *skb;
581 	struct nicvf *nic = netdev_priv(netdev);
582 	int err = 0;
583 	int rq_idx;
584 
585 	rq_idx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
586 
587 	if (nic->sqs_mode) {
588 		/* Use primary VF's 'nicvf' struct */
589 		nic = nic->pnicvf;
590 		netdev = nic->netdev;
591 	}
592 
593 	/* Check for errors */
594 	err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
595 	if (err && !cqe_rx->rb_cnt)
596 		return;
597 
598 	skb = nicvf_get_rcv_skb(nic, cqe_rx);
599 	if (!skb) {
600 		netdev_dbg(nic->netdev, "Packet not received\n");
601 		return;
602 	}
603 
604 	if (netif_msg_pktdata(nic)) {
605 		netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name,
606 			    skb, skb->len);
607 		print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
608 			       skb->data, skb->len, true);
609 	}
610 
611 	/* If error packet, drop it here */
612 	if (err) {
613 		dev_kfree_skb_any(skb);
614 		return;
615 	}
616 
617 	nicvf_set_rx_frame_cnt(nic, skb);
618 
619 	nicvf_set_rxhash(netdev, cqe_rx, skb);
620 
621 	skb_record_rx_queue(skb, rq_idx);
622 	if (netdev->hw_features & NETIF_F_RXCSUM) {
623 		/* HW by default verifies TCP/UDP/SCTP checksums */
624 		skb->ip_summed = CHECKSUM_UNNECESSARY;
625 	} else {
626 		skb_checksum_none_assert(skb);
627 	}
628 
629 	skb->protocol = eth_type_trans(skb, netdev);
630 
631 	/* Check for stripped VLAN */
632 	if (cqe_rx->vlan_found && cqe_rx->vlan_stripped)
633 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
634 				       ntohs((__force __be16)cqe_rx->vlan_tci));
635 
636 	if (napi && (netdev->features & NETIF_F_GRO))
637 		napi_gro_receive(napi, skb);
638 	else
639 		netif_receive_skb(skb);
640 }
641 
642 static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
643 				 struct napi_struct *napi, int budget)
644 {
645 	int processed_cqe, work_done = 0, tx_done = 0;
646 	int cqe_count, cqe_head;
647 	struct nicvf *nic = netdev_priv(netdev);
648 	struct queue_set *qs = nic->qs;
649 	struct cmp_queue *cq = &qs->cq[cq_idx];
650 	struct cqe_rx_t *cq_desc;
651 	struct netdev_queue *txq;
652 
653 	spin_lock_bh(&cq->lock);
654 loop:
655 	processed_cqe = 0;
656 	/* Get no of valid CQ entries to process */
657 	cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
658 	cqe_count &= CQ_CQE_COUNT;
659 	if (!cqe_count)
660 		goto done;
661 
662 	/* Get head of the valid CQ entries */
663 	cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
664 	cqe_head &= 0xFFFF;
665 
666 	netdev_dbg(nic->netdev, "%s CQ%d cqe_count %d cqe_head %d\n",
667 		   __func__, cq_idx, cqe_count, cqe_head);
668 	while (processed_cqe < cqe_count) {
669 		/* Get the CQ descriptor */
670 		cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
671 		cqe_head++;
672 		cqe_head &= (cq->dmem.q_len - 1);
673 		/* Initiate prefetch for next descriptor */
674 		prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
675 
676 		if ((work_done >= budget) && napi &&
677 		    (cq_desc->cqe_type != CQE_TYPE_SEND)) {
678 			break;
679 		}
680 
681 		netdev_dbg(nic->netdev, "CQ%d cq_desc->cqe_type %d\n",
682 			   cq_idx, cq_desc->cqe_type);
683 		switch (cq_desc->cqe_type) {
684 		case CQE_TYPE_RX:
685 			nicvf_rcv_pkt_handler(netdev, napi, cq,
686 					      cq_desc, CQE_TYPE_RX);
687 			work_done++;
688 		break;
689 		case CQE_TYPE_SEND:
690 			nicvf_snd_pkt_handler(netdev, cq,
691 					      (void *)cq_desc, CQE_TYPE_SEND);
692 			tx_done++;
693 		break;
694 		case CQE_TYPE_INVALID:
695 		case CQE_TYPE_RX_SPLIT:
696 		case CQE_TYPE_RX_TCP:
697 		case CQE_TYPE_SEND_PTP:
698 			/* Ignore for now */
699 		break;
700 		}
701 		processed_cqe++;
702 	}
703 	netdev_dbg(nic->netdev,
704 		   "%s CQ%d processed_cqe %d work_done %d budget %d\n",
705 		   __func__, cq_idx, processed_cqe, work_done, budget);
706 
707 	/* Ring doorbell to inform H/W to reuse processed CQEs */
708 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
709 			      cq_idx, processed_cqe);
710 
711 	if ((work_done < budget) && napi)
712 		goto loop;
713 
714 done:
715 	/* Wakeup TXQ if its stopped earlier due to SQ full */
716 	if (tx_done) {
717 		netdev = nic->pnicvf->netdev;
718 		txq = netdev_get_tx_queue(netdev,
719 					  nicvf_netdev_qidx(nic, cq_idx));
720 		nic = nic->pnicvf;
721 		if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
722 			netif_tx_start_queue(txq);
723 			nic->drv_stats.txq_wake++;
724 			if (netif_msg_tx_err(nic))
725 				netdev_warn(netdev,
726 					    "%s: Transmit queue wakeup SQ%d\n",
727 					    netdev->name, cq_idx);
728 		}
729 	}
730 
731 	spin_unlock_bh(&cq->lock);
732 	return work_done;
733 }
734 
735 static int nicvf_poll(struct napi_struct *napi, int budget)
736 {
737 	u64  cq_head;
738 	int  work_done = 0;
739 	struct net_device *netdev = napi->dev;
740 	struct nicvf *nic = netdev_priv(netdev);
741 	struct nicvf_cq_poll *cq;
742 
743 	cq = container_of(napi, struct nicvf_cq_poll, napi);
744 	work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
745 
746 	if (work_done < budget) {
747 		/* Slow packet rate, exit polling */
748 		napi_complete(napi);
749 		/* Re-enable interrupts */
750 		cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD,
751 					       cq->cq_idx);
752 		nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
753 		nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD,
754 				      cq->cq_idx, cq_head);
755 		nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
756 	}
757 	return work_done;
758 }
759 
760 /* Qset error interrupt handler
761  *
762  * As of now only CQ errors are handled
763  */
764 static void nicvf_handle_qs_err(unsigned long data)
765 {
766 	struct nicvf *nic = (struct nicvf *)data;
767 	struct queue_set *qs = nic->qs;
768 	int qidx;
769 	u64 status;
770 
771 	netif_tx_disable(nic->netdev);
772 
773 	/* Check if it is CQ err */
774 	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
775 		status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
776 					      qidx);
777 		if (!(status & CQ_ERR_MASK))
778 			continue;
779 		/* Process already queued CQEs and reconfig CQ */
780 		nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
781 		nicvf_sq_disable(nic, qidx);
782 		nicvf_cq_intr_handler(nic->netdev, qidx, NULL, 0);
783 		nicvf_cmp_queue_config(nic, qs, qidx, true);
784 		nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx);
785 		nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
786 
787 		nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
788 	}
789 
790 	netif_tx_start_all_queues(nic->netdev);
791 	/* Re-enable Qset error interrupt */
792 	nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
793 }
794 
795 static void nicvf_dump_intr_status(struct nicvf *nic)
796 {
797 	if (netif_msg_intr(nic))
798 		netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n",
799 			    nic->netdev->name, nicvf_reg_read(nic, NIC_VF_INT));
800 }
801 
802 static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
803 {
804 	struct nicvf *nic = (struct nicvf *)nicvf_irq;
805 	u64 intr;
806 
807 	nicvf_dump_intr_status(nic);
808 
809 	intr = nicvf_reg_read(nic, NIC_VF_INT);
810 	/* Check for spurious interrupt */
811 	if (!(intr & NICVF_INTR_MBOX_MASK))
812 		return IRQ_HANDLED;
813 
814 	nicvf_handle_mbx_intr(nic);
815 
816 	return IRQ_HANDLED;
817 }
818 
819 static irqreturn_t nicvf_intr_handler(int irq, void *cq_irq)
820 {
821 	struct nicvf_cq_poll *cq_poll = (struct nicvf_cq_poll *)cq_irq;
822 	struct nicvf *nic = cq_poll->nicvf;
823 	int qidx = cq_poll->cq_idx;
824 
825 	nicvf_dump_intr_status(nic);
826 
827 	/* Disable interrupts */
828 	nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
829 
830 	/* Schedule NAPI */
831 	napi_schedule(&cq_poll->napi);
832 
833 	/* Clear interrupt */
834 	nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
835 
836 	return IRQ_HANDLED;
837 }
838 
839 static irqreturn_t nicvf_rbdr_intr_handler(int irq, void *nicvf_irq)
840 {
841 	struct nicvf *nic = (struct nicvf *)nicvf_irq;
842 	u8 qidx;
843 
844 
845 	nicvf_dump_intr_status(nic);
846 
847 	/* Disable RBDR interrupt and schedule softirq */
848 	for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) {
849 		if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
850 			continue;
851 		nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
852 		tasklet_hi_schedule(&nic->rbdr_task);
853 		/* Clear interrupt */
854 		nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
855 	}
856 
857 	return IRQ_HANDLED;
858 }
859 
860 static irqreturn_t nicvf_qs_err_intr_handler(int irq, void *nicvf_irq)
861 {
862 	struct nicvf *nic = (struct nicvf *)nicvf_irq;
863 
864 	nicvf_dump_intr_status(nic);
865 
866 	/* Disable Qset err interrupt and schedule softirq */
867 	nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
868 	tasklet_hi_schedule(&nic->qs_err_task);
869 	nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
870 
871 	return IRQ_HANDLED;
872 }
873 
874 static int nicvf_enable_msix(struct nicvf *nic)
875 {
876 	int ret, vec;
877 
878 	nic->num_vec = NIC_VF_MSIX_VECTORS;
879 
880 	for (vec = 0; vec < nic->num_vec; vec++)
881 		nic->msix_entries[vec].entry = vec;
882 
883 	ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
884 	if (ret) {
885 		netdev_err(nic->netdev,
886 			   "Req for #%d msix vectors failed\n", nic->num_vec);
887 		return 0;
888 	}
889 	nic->msix_enabled = 1;
890 	return 1;
891 }
892 
893 static void nicvf_disable_msix(struct nicvf *nic)
894 {
895 	if (nic->msix_enabled) {
896 		pci_disable_msix(nic->pdev);
897 		nic->msix_enabled = 0;
898 		nic->num_vec = 0;
899 	}
900 }
901 
902 static int nicvf_register_interrupts(struct nicvf *nic)
903 {
904 	int irq, ret = 0;
905 	int vector;
906 
907 	for_each_cq_irq(irq)
908 		sprintf(nic->irq_name[irq], "NICVF%d CQ%d",
909 			nic->vf_id, irq);
910 
911 	for_each_sq_irq(irq)
912 		sprintf(nic->irq_name[irq], "NICVF%d SQ%d",
913 			nic->vf_id, irq - NICVF_INTR_ID_SQ);
914 
915 	for_each_rbdr_irq(irq)
916 		sprintf(nic->irq_name[irq], "NICVF%d RBDR%d",
917 			nic->vf_id, irq - NICVF_INTR_ID_RBDR);
918 
919 	/* Register CQ interrupts */
920 	for (irq = 0; irq < nic->qs->cq_cnt; irq++) {
921 		vector = nic->msix_entries[irq].vector;
922 		ret = request_irq(vector, nicvf_intr_handler,
923 				  0, nic->irq_name[irq], nic->napi[irq]);
924 		if (ret)
925 			goto err;
926 		nic->irq_allocated[irq] = true;
927 	}
928 
929 	/* Register RBDR interrupt */
930 	for (irq = NICVF_INTR_ID_RBDR;
931 	     irq < (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt); irq++) {
932 		vector = nic->msix_entries[irq].vector;
933 		ret = request_irq(vector, nicvf_rbdr_intr_handler,
934 				  0, nic->irq_name[irq], nic);
935 		if (ret)
936 			goto err;
937 		nic->irq_allocated[irq] = true;
938 	}
939 
940 	/* Register QS error interrupt */
941 	sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR],
942 		"NICVF%d Qset error", nic->vf_id);
943 	irq = NICVF_INTR_ID_QS_ERR;
944 	ret = request_irq(nic->msix_entries[irq].vector,
945 			  nicvf_qs_err_intr_handler,
946 			  0, nic->irq_name[irq], nic);
947 	if (!ret)
948 		nic->irq_allocated[irq] = true;
949 
950 err:
951 	if (ret)
952 		netdev_err(nic->netdev, "request_irq failed, vector %d\n", irq);
953 
954 	return ret;
955 }
956 
957 static void nicvf_unregister_interrupts(struct nicvf *nic)
958 {
959 	int irq;
960 
961 	/* Free registered interrupts */
962 	for (irq = 0; irq < nic->num_vec; irq++) {
963 		if (!nic->irq_allocated[irq])
964 			continue;
965 
966 		if (irq < NICVF_INTR_ID_SQ)
967 			free_irq(nic->msix_entries[irq].vector, nic->napi[irq]);
968 		else
969 			free_irq(nic->msix_entries[irq].vector, nic);
970 
971 		nic->irq_allocated[irq] = false;
972 	}
973 
974 	/* Disable MSI-X */
975 	nicvf_disable_msix(nic);
976 }
977 
978 /* Initialize MSIX vectors and register MISC interrupt.
979  * Send READY message to PF to check if its alive
980  */
981 static int nicvf_register_misc_interrupt(struct nicvf *nic)
982 {
983 	int ret = 0;
984 	int irq = NICVF_INTR_ID_MISC;
985 
986 	/* Return if mailbox interrupt is already registered */
987 	if (nic->msix_enabled)
988 		return 0;
989 
990 	/* Enable MSI-X */
991 	if (!nicvf_enable_msix(nic))
992 		return 1;
993 
994 	sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
995 	/* Register Misc interrupt */
996 	ret = request_irq(nic->msix_entries[irq].vector,
997 			  nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic);
998 
999 	if (ret)
1000 		return ret;
1001 	nic->irq_allocated[irq] = true;
1002 
1003 	/* Enable mailbox interrupt */
1004 	nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
1005 
1006 	/* Check if VF is able to communicate with PF */
1007 	if (!nicvf_check_pf_ready(nic)) {
1008 		nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1009 		nicvf_unregister_interrupts(nic);
1010 		return 1;
1011 	}
1012 
1013 	return 0;
1014 }
1015 
1016 static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
1017 {
1018 	struct nicvf *nic = netdev_priv(netdev);
1019 	int qid = skb_get_queue_mapping(skb);
1020 	struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid);
1021 
1022 	/* Check for minimum packet length */
1023 	if (skb->len <= ETH_HLEN) {
1024 		dev_kfree_skb(skb);
1025 		return NETDEV_TX_OK;
1026 	}
1027 
1028 	if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
1029 		netif_tx_stop_queue(txq);
1030 		nic->drv_stats.txq_stop++;
1031 		if (netif_msg_tx_err(nic))
1032 			netdev_warn(netdev,
1033 				    "%s: Transmit ring full, stopping SQ%d\n",
1034 				    netdev->name, qid);
1035 		return NETDEV_TX_BUSY;
1036 	}
1037 
1038 	return NETDEV_TX_OK;
1039 }
1040 
1041 static inline void nicvf_free_cq_poll(struct nicvf *nic)
1042 {
1043 	struct nicvf_cq_poll *cq_poll;
1044 	int qidx;
1045 
1046 	for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
1047 		cq_poll = nic->napi[qidx];
1048 		if (!cq_poll)
1049 			continue;
1050 		nic->napi[qidx] = NULL;
1051 		kfree(cq_poll);
1052 	}
1053 }
1054 
1055 int nicvf_stop(struct net_device *netdev)
1056 {
1057 	int irq, qidx;
1058 	struct nicvf *nic = netdev_priv(netdev);
1059 	struct queue_set *qs = nic->qs;
1060 	struct nicvf_cq_poll *cq_poll = NULL;
1061 	union nic_mbx mbx = {};
1062 
1063 	mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1064 	nicvf_send_msg_to_pf(nic, &mbx);
1065 
1066 	netif_carrier_off(netdev);
1067 	netif_tx_stop_all_queues(nic->netdev);
1068 	nic->link_up = false;
1069 
1070 	/* Teardown secondary qsets first */
1071 	if (!nic->sqs_mode) {
1072 		for (qidx = 0; qidx < nic->sqs_count; qidx++) {
1073 			if (!nic->snicvf[qidx])
1074 				continue;
1075 			nicvf_stop(nic->snicvf[qidx]->netdev);
1076 			nic->snicvf[qidx] = NULL;
1077 		}
1078 	}
1079 
1080 	/* Disable RBDR & QS error interrupts */
1081 	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
1082 		nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1083 		nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1084 	}
1085 	nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1086 	nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1087 
1088 	/* Wait for pending IRQ handlers to finish */
1089 	for (irq = 0; irq < nic->num_vec; irq++)
1090 		synchronize_irq(nic->msix_entries[irq].vector);
1091 
1092 	tasklet_kill(&nic->rbdr_task);
1093 	tasklet_kill(&nic->qs_err_task);
1094 	if (nic->rb_work_scheduled)
1095 		cancel_delayed_work_sync(&nic->rbdr_work);
1096 
1097 	for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
1098 		cq_poll = nic->napi[qidx];
1099 		if (!cq_poll)
1100 			continue;
1101 		napi_synchronize(&cq_poll->napi);
1102 		/* CQ intr is enabled while napi_complete,
1103 		 * so disable it now
1104 		 */
1105 		nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
1106 		nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
1107 		napi_disable(&cq_poll->napi);
1108 		netif_napi_del(&cq_poll->napi);
1109 	}
1110 
1111 	netif_tx_disable(netdev);
1112 
1113 	/* Free resources */
1114 	nicvf_config_data_transfer(nic, false);
1115 
1116 	/* Disable HW Qset */
1117 	nicvf_qset_config(nic, false);
1118 
1119 	/* disable mailbox interrupt */
1120 	nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1121 
1122 	nicvf_unregister_interrupts(nic);
1123 
1124 	nicvf_free_cq_poll(nic);
1125 
1126 	/* Clear multiqset info */
1127 	nic->pnicvf = nic;
1128 	nic->sqs_count = 0;
1129 
1130 	return 0;
1131 }
1132 
1133 int nicvf_open(struct net_device *netdev)
1134 {
1135 	int err, qidx;
1136 	struct nicvf *nic = netdev_priv(netdev);
1137 	struct queue_set *qs = nic->qs;
1138 	struct nicvf_cq_poll *cq_poll = NULL;
1139 
1140 	nic->mtu = netdev->mtu;
1141 
1142 	netif_carrier_off(netdev);
1143 
1144 	err = nicvf_register_misc_interrupt(nic);
1145 	if (err)
1146 		return err;
1147 
1148 	/* Register NAPI handler for processing CQEs */
1149 	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1150 		cq_poll = kzalloc(sizeof(*cq_poll), GFP_KERNEL);
1151 		if (!cq_poll) {
1152 			err = -ENOMEM;
1153 			goto napi_del;
1154 		}
1155 		cq_poll->cq_idx = qidx;
1156 		cq_poll->nicvf = nic;
1157 		netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
1158 			       NAPI_POLL_WEIGHT);
1159 		napi_enable(&cq_poll->napi);
1160 		nic->napi[qidx] = cq_poll;
1161 	}
1162 
1163 	/* Check if we got MAC address from PF or else generate a radom MAC */
1164 	if (is_zero_ether_addr(netdev->dev_addr)) {
1165 		eth_hw_addr_random(netdev);
1166 		nicvf_hw_set_mac_addr(nic, netdev);
1167 	}
1168 
1169 	if (nic->set_mac_pending) {
1170 		nic->set_mac_pending = false;
1171 		nicvf_hw_set_mac_addr(nic, netdev);
1172 	}
1173 
1174 	/* Init tasklet for handling Qset err interrupt */
1175 	tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err,
1176 		     (unsigned long)nic);
1177 
1178 	/* Init RBDR tasklet which will refill RBDR */
1179 	tasklet_init(&nic->rbdr_task, nicvf_rbdr_task,
1180 		     (unsigned long)nic);
1181 	INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work);
1182 
1183 	/* Configure CPI alorithm */
1184 	nic->cpi_alg = cpi_alg;
1185 	if (!nic->sqs_mode)
1186 		nicvf_config_cpi(nic);
1187 
1188 	nicvf_request_sqs(nic);
1189 	if (nic->sqs_mode)
1190 		nicvf_get_primary_vf_struct(nic);
1191 
1192 	/* Configure receive side scaling */
1193 	if (!nic->sqs_mode)
1194 		nicvf_rss_init(nic);
1195 
1196 	err = nicvf_register_interrupts(nic);
1197 	if (err)
1198 		goto cleanup;
1199 
1200 	/* Initialize the queues */
1201 	err = nicvf_init_resources(nic);
1202 	if (err)
1203 		goto cleanup;
1204 
1205 	/* Make sure queue initialization is written */
1206 	wmb();
1207 
1208 	nicvf_reg_write(nic, NIC_VF_INT, -1);
1209 	/* Enable Qset err interrupt */
1210 	nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
1211 
1212 	/* Enable completion queue interrupt */
1213 	for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1214 		nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
1215 
1216 	/* Enable RBDR threshold interrupt */
1217 	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1218 		nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
1219 
1220 	nic->drv_stats.txq_stop = 0;
1221 	nic->drv_stats.txq_wake = 0;
1222 
1223 	return 0;
1224 cleanup:
1225 	nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1226 	nicvf_unregister_interrupts(nic);
1227 	tasklet_kill(&nic->qs_err_task);
1228 	tasklet_kill(&nic->rbdr_task);
1229 napi_del:
1230 	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1231 		cq_poll = nic->napi[qidx];
1232 		if (!cq_poll)
1233 			continue;
1234 		napi_disable(&cq_poll->napi);
1235 		netif_napi_del(&cq_poll->napi);
1236 	}
1237 	nicvf_free_cq_poll(nic);
1238 	return err;
1239 }
1240 
1241 static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
1242 {
1243 	union nic_mbx mbx = {};
1244 
1245 	mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
1246 	mbx.frs.max_frs = mtu;
1247 	mbx.frs.vf_id = nic->vf_id;
1248 
1249 	return nicvf_send_msg_to_pf(nic, &mbx);
1250 }
1251 
1252 static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
1253 {
1254 	struct nicvf *nic = netdev_priv(netdev);
1255 
1256 	if (new_mtu > NIC_HW_MAX_FRS)
1257 		return -EINVAL;
1258 
1259 	if (new_mtu < NIC_HW_MIN_FRS)
1260 		return -EINVAL;
1261 
1262 	if (nicvf_update_hw_max_frs(nic, new_mtu))
1263 		return -EINVAL;
1264 	netdev->mtu = new_mtu;
1265 	nic->mtu = new_mtu;
1266 
1267 	return 0;
1268 }
1269 
1270 static int nicvf_set_mac_address(struct net_device *netdev, void *p)
1271 {
1272 	struct sockaddr *addr = p;
1273 	struct nicvf *nic = netdev_priv(netdev);
1274 
1275 	if (!is_valid_ether_addr(addr->sa_data))
1276 		return -EADDRNOTAVAIL;
1277 
1278 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1279 
1280 	if (nic->msix_enabled) {
1281 		if (nicvf_hw_set_mac_addr(nic, netdev))
1282 			return -EBUSY;
1283 	} else {
1284 		nic->set_mac_pending = true;
1285 	}
1286 
1287 	return 0;
1288 }
1289 
1290 void nicvf_update_lmac_stats(struct nicvf *nic)
1291 {
1292 	int stat = 0;
1293 	union nic_mbx mbx = {};
1294 
1295 	if (!netif_running(nic->netdev))
1296 		return;
1297 
1298 	mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
1299 	mbx.bgx_stats.vf_id = nic->vf_id;
1300 	/* Rx stats */
1301 	mbx.bgx_stats.rx = 1;
1302 	while (stat < BGX_RX_STATS_COUNT) {
1303 		mbx.bgx_stats.idx = stat;
1304 		if (nicvf_send_msg_to_pf(nic, &mbx))
1305 			return;
1306 		stat++;
1307 	}
1308 
1309 	stat = 0;
1310 
1311 	/* Tx stats */
1312 	mbx.bgx_stats.rx = 0;
1313 	while (stat < BGX_TX_STATS_COUNT) {
1314 		mbx.bgx_stats.idx = stat;
1315 		if (nicvf_send_msg_to_pf(nic, &mbx))
1316 			return;
1317 		stat++;
1318 	}
1319 }
1320 
1321 void nicvf_update_stats(struct nicvf *nic)
1322 {
1323 	int qidx;
1324 	struct nicvf_hw_stats *stats = &nic->hw_stats;
1325 	struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1326 	struct queue_set *qs = nic->qs;
1327 
1328 #define GET_RX_STATS(reg) \
1329 	nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
1330 #define GET_TX_STATS(reg) \
1331 	nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
1332 
1333 	stats->rx_bytes = GET_RX_STATS(RX_OCTS);
1334 	stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST);
1335 	stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST);
1336 	stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST);
1337 	stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
1338 	stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
1339 	stats->rx_drop_red = GET_RX_STATS(RX_RED);
1340 	stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS);
1341 	stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
1342 	stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS);
1343 	stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
1344 	stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
1345 	stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
1346 	stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
1347 
1348 	stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS);
1349 	stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST);
1350 	stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST);
1351 	stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
1352 	stats->tx_drops = GET_TX_STATS(TX_DROP);
1353 
1354 	drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
1355 				  stats->tx_bcast_frames_ok +
1356 				  stats->tx_mcast_frames_ok;
1357 	drv_stats->rx_drops = stats->rx_drop_red +
1358 			      stats->rx_drop_overrun;
1359 	drv_stats->tx_drops = stats->tx_drops;
1360 
1361 	/* Update RQ and SQ stats */
1362 	for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1363 		nicvf_update_rq_stats(nic, qidx);
1364 	for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1365 		nicvf_update_sq_stats(nic, qidx);
1366 }
1367 
1368 static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
1369 					    struct rtnl_link_stats64 *stats)
1370 {
1371 	struct nicvf *nic = netdev_priv(netdev);
1372 	struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
1373 	struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1374 
1375 	nicvf_update_stats(nic);
1376 
1377 	stats->rx_bytes = hw_stats->rx_bytes;
1378 	stats->rx_packets = drv_stats->rx_frames_ok;
1379 	stats->rx_dropped = drv_stats->rx_drops;
1380 	stats->multicast = hw_stats->rx_mcast_frames;
1381 
1382 	stats->tx_bytes = hw_stats->tx_bytes_ok;
1383 	stats->tx_packets = drv_stats->tx_frames_ok;
1384 	stats->tx_dropped = drv_stats->tx_drops;
1385 
1386 	return stats;
1387 }
1388 
1389 static void nicvf_tx_timeout(struct net_device *dev)
1390 {
1391 	struct nicvf *nic = netdev_priv(dev);
1392 
1393 	if (netif_msg_tx_err(nic))
1394 		netdev_warn(dev, "%s: Transmit timed out, resetting\n",
1395 			    dev->name);
1396 
1397 	schedule_work(&nic->reset_task);
1398 }
1399 
1400 static void nicvf_reset_task(struct work_struct *work)
1401 {
1402 	struct nicvf *nic;
1403 
1404 	nic = container_of(work, struct nicvf, reset_task);
1405 
1406 	if (!netif_running(nic->netdev))
1407 		return;
1408 
1409 	nicvf_stop(nic->netdev);
1410 	nicvf_open(nic->netdev);
1411 	nic->netdev->trans_start = jiffies;
1412 }
1413 
1414 static int nicvf_config_loopback(struct nicvf *nic,
1415 				 netdev_features_t features)
1416 {
1417 	union nic_mbx mbx = {};
1418 
1419 	mbx.lbk.msg = NIC_MBOX_MSG_LOOPBACK;
1420 	mbx.lbk.vf_id = nic->vf_id;
1421 	mbx.lbk.enable = (features & NETIF_F_LOOPBACK) != 0;
1422 
1423 	return nicvf_send_msg_to_pf(nic, &mbx);
1424 }
1425 
1426 static netdev_features_t nicvf_fix_features(struct net_device *netdev,
1427 					    netdev_features_t features)
1428 {
1429 	struct nicvf *nic = netdev_priv(netdev);
1430 
1431 	if ((features & NETIF_F_LOOPBACK) &&
1432 	    netif_running(netdev) && !nic->loopback_supported)
1433 		features &= ~NETIF_F_LOOPBACK;
1434 
1435 	return features;
1436 }
1437 
1438 static int nicvf_set_features(struct net_device *netdev,
1439 			      netdev_features_t features)
1440 {
1441 	struct nicvf *nic = netdev_priv(netdev);
1442 	netdev_features_t changed = features ^ netdev->features;
1443 
1444 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1445 		nicvf_config_vlan_stripping(nic, features);
1446 
1447 	if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
1448 		return nicvf_config_loopback(nic, features);
1449 
1450 	return 0;
1451 }
1452 
1453 static const struct net_device_ops nicvf_netdev_ops = {
1454 	.ndo_open		= nicvf_open,
1455 	.ndo_stop		= nicvf_stop,
1456 	.ndo_start_xmit		= nicvf_xmit,
1457 	.ndo_change_mtu		= nicvf_change_mtu,
1458 	.ndo_set_mac_address	= nicvf_set_mac_address,
1459 	.ndo_get_stats64	= nicvf_get_stats64,
1460 	.ndo_tx_timeout         = nicvf_tx_timeout,
1461 	.ndo_fix_features       = nicvf_fix_features,
1462 	.ndo_set_features       = nicvf_set_features,
1463 };
1464 
1465 static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1466 {
1467 	struct device *dev = &pdev->dev;
1468 	struct net_device *netdev;
1469 	struct nicvf *nic;
1470 	int    err, qcount;
1471 
1472 	err = pci_enable_device(pdev);
1473 	if (err) {
1474 		dev_err(dev, "Failed to enable PCI device\n");
1475 		return err;
1476 	}
1477 
1478 	err = pci_request_regions(pdev, DRV_NAME);
1479 	if (err) {
1480 		dev_err(dev, "PCI request regions failed 0x%x\n", err);
1481 		goto err_disable_device;
1482 	}
1483 
1484 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
1485 	if (err) {
1486 		dev_err(dev, "Unable to get usable DMA configuration\n");
1487 		goto err_release_regions;
1488 	}
1489 
1490 	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
1491 	if (err) {
1492 		dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n");
1493 		goto err_release_regions;
1494 	}
1495 
1496 	qcount = MAX_CMP_QUEUES_PER_QS;
1497 
1498 	/* Restrict multiqset support only for host bound VFs */
1499 	if (pdev->is_virtfn) {
1500 		/* Set max number of queues per VF */
1501 		qcount = roundup(num_online_cpus(), MAX_CMP_QUEUES_PER_QS);
1502 		qcount = min(qcount,
1503 			     (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS);
1504 	}
1505 
1506 	netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount, qcount);
1507 	if (!netdev) {
1508 		err = -ENOMEM;
1509 		goto err_release_regions;
1510 	}
1511 
1512 	pci_set_drvdata(pdev, netdev);
1513 
1514 	SET_NETDEV_DEV(netdev, &pdev->dev);
1515 
1516 	nic = netdev_priv(netdev);
1517 	nic->netdev = netdev;
1518 	nic->pdev = pdev;
1519 	nic->pnicvf = nic;
1520 	nic->max_queues = qcount;
1521 
1522 	/* MAP VF's configuration registers */
1523 	nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1524 	if (!nic->reg_base) {
1525 		dev_err(dev, "Cannot map config register space, aborting\n");
1526 		err = -ENOMEM;
1527 		goto err_free_netdev;
1528 	}
1529 
1530 	err = nicvf_set_qset_resources(nic);
1531 	if (err)
1532 		goto err_free_netdev;
1533 
1534 	/* Check if PF is alive and get MAC address for this VF */
1535 	err = nicvf_register_misc_interrupt(nic);
1536 	if (err)
1537 		goto err_free_netdev;
1538 
1539 	nicvf_send_vf_struct(nic);
1540 
1541 	/* Check if this VF is in QS only mode */
1542 	if (nic->sqs_mode)
1543 		return 0;
1544 
1545 	err = nicvf_set_real_num_queues(netdev, nic->tx_queues, nic->rx_queues);
1546 	if (err)
1547 		goto err_unregister_interrupts;
1548 
1549 	netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
1550 			       NETIF_F_TSO | NETIF_F_GRO |
1551 			       NETIF_F_HW_VLAN_CTAG_RX);
1552 
1553 	netdev->hw_features |= NETIF_F_RXHASH;
1554 
1555 	netdev->features |= netdev->hw_features;
1556 	netdev->hw_features |= NETIF_F_LOOPBACK;
1557 
1558 	netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
1559 
1560 	if (!pass1_silicon(nic->pdev))
1561 		nic->hw_tso = true;
1562 
1563 	netdev->netdev_ops = &nicvf_netdev_ops;
1564 	netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
1565 
1566 	INIT_WORK(&nic->reset_task, nicvf_reset_task);
1567 
1568 	err = register_netdev(netdev);
1569 	if (err) {
1570 		dev_err(dev, "Failed to register netdevice\n");
1571 		goto err_unregister_interrupts;
1572 	}
1573 
1574 	nic->msg_enable = debug;
1575 
1576 	nicvf_set_ethtool_ops(netdev);
1577 
1578 	return 0;
1579 
1580 err_unregister_interrupts:
1581 	nicvf_unregister_interrupts(nic);
1582 err_free_netdev:
1583 	pci_set_drvdata(pdev, NULL);
1584 	free_netdev(netdev);
1585 err_release_regions:
1586 	pci_release_regions(pdev);
1587 err_disable_device:
1588 	pci_disable_device(pdev);
1589 	return err;
1590 }
1591 
1592 static void nicvf_remove(struct pci_dev *pdev)
1593 {
1594 	struct net_device *netdev = pci_get_drvdata(pdev);
1595 	struct nicvf *nic;
1596 	struct net_device *pnetdev;
1597 
1598 	if (!netdev)
1599 		return;
1600 
1601 	nic = netdev_priv(netdev);
1602 	pnetdev = nic->pnicvf->netdev;
1603 
1604 	/* Check if this Qset is assigned to different VF.
1605 	 * If yes, clean primary and all secondary Qsets.
1606 	 */
1607 	if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
1608 		unregister_netdev(pnetdev);
1609 	nicvf_unregister_interrupts(nic);
1610 	pci_set_drvdata(pdev, NULL);
1611 	free_netdev(netdev);
1612 	pci_release_regions(pdev);
1613 	pci_disable_device(pdev);
1614 }
1615 
1616 static void nicvf_shutdown(struct pci_dev *pdev)
1617 {
1618 	nicvf_remove(pdev);
1619 }
1620 
1621 static struct pci_driver nicvf_driver = {
1622 	.name = DRV_NAME,
1623 	.id_table = nicvf_id_table,
1624 	.probe = nicvf_probe,
1625 	.remove = nicvf_remove,
1626 	.shutdown = nicvf_shutdown,
1627 };
1628 
1629 static int __init nicvf_init_module(void)
1630 {
1631 	pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
1632 
1633 	return pci_register_driver(&nicvf_driver);
1634 }
1635 
1636 static void __exit nicvf_cleanup_module(void)
1637 {
1638 	pci_unregister_driver(&nicvf_driver);
1639 }
1640 
1641 module_init(nicvf_init_module);
1642 module_exit(nicvf_cleanup_module);
1643