1 /*
2  * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  *
5  * This program is free software; you may redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; version 2 of the License.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16  * SOFTWARE.
17  *
18  */
19 
20 #include <linux/module.h>
21 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/errno.h>
24 #include <linux/types.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/workqueue.h>
28 #include <linux/pci.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/if.h>
32 #include <linux/if_ether.h>
33 #include <linux/if_vlan.h>
34 #include <linux/in.h>
35 #include <linux/ip.h>
36 #include <linux/ipv6.h>
37 #include <linux/tcp.h>
38 #include <linux/rtnetlink.h>
39 #include <linux/prefetch.h>
40 #include <net/ip6_checksum.h>
41 #include <linux/ktime.h>
42 #include <linux/numa.h>
43 #ifdef CONFIG_RFS_ACCEL
44 #include <linux/cpu_rmap.h>
45 #endif
46 #include <linux/crash_dump.h>
47 #include <net/busy_poll.h>
48 #include <net/vxlan.h>
49 
50 #include "cq_enet_desc.h"
51 #include "vnic_dev.h"
52 #include "vnic_intr.h"
53 #include "vnic_stats.h"
54 #include "vnic_vic.h"
55 #include "enic_res.h"
56 #include "enic.h"
57 #include "enic_dev.h"
58 #include "enic_pp.h"
59 #include "enic_clsf.h"
60 
61 #define ENIC_NOTIFY_TIMER_PERIOD	(2 * HZ)
62 #define WQ_ENET_MAX_DESC_LEN		(1 << WQ_ENET_LEN_BITS)
63 #define MAX_TSO				(1 << 16)
64 #define ENIC_DESC_MAX_SPLITS		(MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
65 
66 #define PCI_DEVICE_ID_CISCO_VIC_ENET         0x0043  /* ethernet vnic */
67 #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN     0x0044  /* enet dynamic vnic */
68 #define PCI_DEVICE_ID_CISCO_VIC_ENET_VF      0x0071  /* enet SRIOV VF */
69 
70 #define RX_COPYBREAK_DEFAULT		256
71 
72 /* Supported devices */
73 static const struct pci_device_id enic_id_table[] = {
74 	{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
75 	{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
76 	{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
77 	{ 0, }	/* end of table */
78 };
79 
80 MODULE_DESCRIPTION(DRV_DESCRIPTION);
81 MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
82 MODULE_LICENSE("GPL");
83 MODULE_DEVICE_TABLE(pci, enic_id_table);
84 
85 #define ENIC_LARGE_PKT_THRESHOLD		1000
86 #define ENIC_MAX_COALESCE_TIMERS		10
87 /*  Interrupt moderation table, which will be used to decide the
88  *  coalescing timer values
89  *  {rx_rate in Mbps, mapping percentage of the range}
90  */
91 static struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
92 	{4000,  0},
93 	{4400, 10},
94 	{5060, 20},
95 	{5230, 30},
96 	{5540, 40},
97 	{5820, 50},
98 	{6120, 60},
99 	{6435, 70},
100 	{6745, 80},
101 	{7000, 90},
102 	{0xFFFFFFFF, 100}
103 };
104 
105 /* This table helps the driver to pick different ranges for rx coalescing
106  * timer depending on the link speed.
107  */
108 static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
109 	{0,  0}, /* 0  - 4  Gbps */
110 	{0,  3}, /* 4  - 10 Gbps */
111 	{3,  6}, /* 10 - 40 Gbps */
112 };
113 
enic_init_affinity_hint(struct enic * enic)114 static void enic_init_affinity_hint(struct enic *enic)
115 {
116 	int numa_node = dev_to_node(&enic->pdev->dev);
117 	int i;
118 
119 	for (i = 0; i < enic->intr_count; i++) {
120 		if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i) ||
121 		    (cpumask_available(enic->msix[i].affinity_mask) &&
122 		     !cpumask_empty(enic->msix[i].affinity_mask)))
123 			continue;
124 		if (zalloc_cpumask_var(&enic->msix[i].affinity_mask,
125 				       GFP_KERNEL))
126 			cpumask_set_cpu(cpumask_local_spread(i, numa_node),
127 					enic->msix[i].affinity_mask);
128 	}
129 }
130 
enic_free_affinity_hint(struct enic * enic)131 static void enic_free_affinity_hint(struct enic *enic)
132 {
133 	int i;
134 
135 	for (i = 0; i < enic->intr_count; i++) {
136 		if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i))
137 			continue;
138 		free_cpumask_var(enic->msix[i].affinity_mask);
139 	}
140 }
141 
enic_set_affinity_hint(struct enic * enic)142 static void enic_set_affinity_hint(struct enic *enic)
143 {
144 	int i;
145 	int err;
146 
147 	for (i = 0; i < enic->intr_count; i++) {
148 		if (enic_is_err_intr(enic, i)		||
149 		    enic_is_notify_intr(enic, i)	||
150 		    !cpumask_available(enic->msix[i].affinity_mask) ||
151 		    cpumask_empty(enic->msix[i].affinity_mask))
152 			continue;
153 		err = irq_update_affinity_hint(enic->msix_entry[i].vector,
154 					       enic->msix[i].affinity_mask);
155 		if (err)
156 			netdev_warn(enic->netdev, "irq_update_affinity_hint failed, err %d\n",
157 				    err);
158 	}
159 
160 	for (i = 0; i < enic->wq_count; i++) {
161 		int wq_intr = enic_msix_wq_intr(enic, i);
162 
163 		if (cpumask_available(enic->msix[wq_intr].affinity_mask) &&
164 		    !cpumask_empty(enic->msix[wq_intr].affinity_mask))
165 			netif_set_xps_queue(enic->netdev,
166 					    enic->msix[wq_intr].affinity_mask,
167 					    i);
168 	}
169 }
170 
enic_unset_affinity_hint(struct enic * enic)171 static void enic_unset_affinity_hint(struct enic *enic)
172 {
173 	int i;
174 
175 	for (i = 0; i < enic->intr_count; i++)
176 		irq_update_affinity_hint(enic->msix_entry[i].vector, NULL);
177 }
178 
enic_udp_tunnel_set_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)179 static int enic_udp_tunnel_set_port(struct net_device *netdev,
180 				    unsigned int table, unsigned int entry,
181 				    struct udp_tunnel_info *ti)
182 {
183 	struct enic *enic = netdev_priv(netdev);
184 	int err;
185 
186 	spin_lock_bh(&enic->devcmd_lock);
187 
188 	err = vnic_dev_overlay_offload_cfg(enic->vdev,
189 					   OVERLAY_CFG_VXLAN_PORT_UPDATE,
190 					   ntohs(ti->port));
191 	if (err)
192 		goto error;
193 
194 	err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN,
195 					    enic->vxlan.patch_level);
196 	if (err)
197 		goto error;
198 
199 	enic->vxlan.vxlan_udp_port_number = ntohs(ti->port);
200 error:
201 	spin_unlock_bh(&enic->devcmd_lock);
202 
203 	return err;
204 }
205 
enic_udp_tunnel_unset_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)206 static int enic_udp_tunnel_unset_port(struct net_device *netdev,
207 				      unsigned int table, unsigned int entry,
208 				      struct udp_tunnel_info *ti)
209 {
210 	struct enic *enic = netdev_priv(netdev);
211 	int err;
212 
213 	spin_lock_bh(&enic->devcmd_lock);
214 
215 	err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN,
216 					    OVERLAY_OFFLOAD_DISABLE);
217 	if (err)
218 		goto unlock;
219 
220 	enic->vxlan.vxlan_udp_port_number = 0;
221 
222 unlock:
223 	spin_unlock_bh(&enic->devcmd_lock);
224 
225 	return err;
226 }
227 
228 static const struct udp_tunnel_nic_info enic_udp_tunnels = {
229 	.set_port	= enic_udp_tunnel_set_port,
230 	.unset_port	= enic_udp_tunnel_unset_port,
231 	.tables		= {
232 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
233 	},
234 }, enic_udp_tunnels_v4 = {
235 	.set_port	= enic_udp_tunnel_set_port,
236 	.unset_port	= enic_udp_tunnel_unset_port,
237 	.flags		= UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
238 	.tables		= {
239 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
240 	},
241 };
242 
enic_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)243 static netdev_features_t enic_features_check(struct sk_buff *skb,
244 					     struct net_device *dev,
245 					     netdev_features_t features)
246 {
247 	const struct ethhdr *eth = (struct ethhdr *)skb_inner_mac_header(skb);
248 	struct enic *enic = netdev_priv(dev);
249 	struct udphdr *udph;
250 	u16 port = 0;
251 	u8 proto;
252 
253 	if (!skb->encapsulation)
254 		return features;
255 
256 	features = vxlan_features_check(skb, features);
257 
258 	switch (vlan_get_protocol(skb)) {
259 	case htons(ETH_P_IPV6):
260 		if (!(enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6))
261 			goto out;
262 		proto = ipv6_hdr(skb)->nexthdr;
263 		break;
264 	case htons(ETH_P_IP):
265 		proto = ip_hdr(skb)->protocol;
266 		break;
267 	default:
268 		goto out;
269 	}
270 
271 	switch (eth->h_proto) {
272 	case ntohs(ETH_P_IPV6):
273 		if (!(enic->vxlan.flags & ENIC_VXLAN_INNER_IPV6))
274 			goto out;
275 		fallthrough;
276 	case ntohs(ETH_P_IP):
277 		break;
278 	default:
279 		goto out;
280 	}
281 
282 
283 	if (proto == IPPROTO_UDP) {
284 		udph = udp_hdr(skb);
285 		port = be16_to_cpu(udph->dest);
286 	}
287 
288 	/* HW supports offload of only one UDP port. Remove CSUM and GSO MASK
289 	 * for other UDP port tunnels
290 	 */
291 	if (port  != enic->vxlan.vxlan_udp_port_number)
292 		goto out;
293 
294 	return features;
295 
296 out:
297 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
298 }
299 
enic_is_dynamic(struct enic * enic)300 int enic_is_dynamic(struct enic *enic)
301 {
302 	return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
303 }
304 
enic_sriov_enabled(struct enic * enic)305 int enic_sriov_enabled(struct enic *enic)
306 {
307 	return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0;
308 }
309 
enic_is_sriov_vf(struct enic * enic)310 static int enic_is_sriov_vf(struct enic *enic)
311 {
312 	return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
313 }
314 
enic_is_valid_vf(struct enic * enic,int vf)315 int enic_is_valid_vf(struct enic *enic, int vf)
316 {
317 #ifdef CONFIG_PCI_IOV
318 	return vf >= 0 && vf < enic->num_vfs;
319 #else
320 	return 0;
321 #endif
322 }
323 
enic_free_wq_buf(struct vnic_wq * wq,struct vnic_wq_buf * buf)324 static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
325 {
326 	struct enic *enic = vnic_dev_priv(wq->vdev);
327 
328 	if (buf->sop)
329 		dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
330 				 DMA_TO_DEVICE);
331 	else
332 		dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len,
333 			       DMA_TO_DEVICE);
334 
335 	if (buf->os_buf)
336 		dev_kfree_skb_any(buf->os_buf);
337 }
338 
enic_wq_free_buf(struct vnic_wq * wq,struct cq_desc * cq_desc,struct vnic_wq_buf * buf,void * opaque)339 static void enic_wq_free_buf(struct vnic_wq *wq,
340 	struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
341 {
342 	enic_free_wq_buf(wq, buf);
343 }
344 
enic_wq_service(struct vnic_dev * vdev,struct cq_desc * cq_desc,u8 type,u16 q_number,u16 completed_index,void * opaque)345 static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
346 	u8 type, u16 q_number, u16 completed_index, void *opaque)
347 {
348 	struct enic *enic = vnic_dev_priv(vdev);
349 
350 	spin_lock(&enic->wq_lock[q_number]);
351 
352 	vnic_wq_service(&enic->wq[q_number], cq_desc,
353 		completed_index, enic_wq_free_buf,
354 		opaque);
355 
356 	if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
357 	    vnic_wq_desc_avail(&enic->wq[q_number]) >=
358 	    (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
359 		netif_wake_subqueue(enic->netdev, q_number);
360 
361 	spin_unlock(&enic->wq_lock[q_number]);
362 
363 	return 0;
364 }
365 
enic_log_q_error(struct enic * enic)366 static bool enic_log_q_error(struct enic *enic)
367 {
368 	unsigned int i;
369 	u32 error_status;
370 	bool err = false;
371 
372 	for (i = 0; i < enic->wq_count; i++) {
373 		error_status = vnic_wq_error_status(&enic->wq[i]);
374 		err |= error_status;
375 		if (error_status)
376 			netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
377 				i, error_status);
378 	}
379 
380 	for (i = 0; i < enic->rq_count; i++) {
381 		error_status = vnic_rq_error_status(&enic->rq[i]);
382 		err |= error_status;
383 		if (error_status)
384 			netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
385 				i, error_status);
386 	}
387 
388 	return err;
389 }
390 
enic_msglvl_check(struct enic * enic)391 static void enic_msglvl_check(struct enic *enic)
392 {
393 	u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
394 
395 	if (msg_enable != enic->msg_enable) {
396 		netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
397 			enic->msg_enable, msg_enable);
398 		enic->msg_enable = msg_enable;
399 	}
400 }
401 
enic_mtu_check(struct enic * enic)402 static void enic_mtu_check(struct enic *enic)
403 {
404 	u32 mtu = vnic_dev_mtu(enic->vdev);
405 	struct net_device *netdev = enic->netdev;
406 
407 	if (mtu && mtu != enic->port_mtu) {
408 		enic->port_mtu = mtu;
409 		if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
410 			mtu = max_t(int, ENIC_MIN_MTU,
411 				min_t(int, ENIC_MAX_MTU, mtu));
412 			if (mtu != netdev->mtu)
413 				schedule_work(&enic->change_mtu_work);
414 		} else {
415 			if (mtu < netdev->mtu)
416 				netdev_warn(netdev,
417 					"interface MTU (%d) set higher "
418 					"than switch port MTU (%d)\n",
419 					netdev->mtu, mtu);
420 		}
421 	}
422 }
423 
enic_link_check(struct enic * enic)424 static void enic_link_check(struct enic *enic)
425 {
426 	int link_status = vnic_dev_link_status(enic->vdev);
427 	int carrier_ok = netif_carrier_ok(enic->netdev);
428 
429 	if (link_status && !carrier_ok) {
430 		netdev_info(enic->netdev, "Link UP\n");
431 		netif_carrier_on(enic->netdev);
432 	} else if (!link_status && carrier_ok) {
433 		netdev_info(enic->netdev, "Link DOWN\n");
434 		netif_carrier_off(enic->netdev);
435 	}
436 }
437 
enic_notify_check(struct enic * enic)438 static void enic_notify_check(struct enic *enic)
439 {
440 	enic_msglvl_check(enic);
441 	enic_mtu_check(enic);
442 	enic_link_check(enic);
443 }
444 
445 #define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
446 
enic_isr_legacy(int irq,void * data)447 static irqreturn_t enic_isr_legacy(int irq, void *data)
448 {
449 	struct net_device *netdev = data;
450 	struct enic *enic = netdev_priv(netdev);
451 	unsigned int io_intr = ENIC_LEGACY_IO_INTR;
452 	unsigned int err_intr = ENIC_LEGACY_ERR_INTR;
453 	unsigned int notify_intr = ENIC_LEGACY_NOTIFY_INTR;
454 	u32 pba;
455 
456 	vnic_intr_mask(&enic->intr[io_intr]);
457 
458 	pba = vnic_intr_legacy_pba(enic->legacy_pba);
459 	if (!pba) {
460 		vnic_intr_unmask(&enic->intr[io_intr]);
461 		return IRQ_NONE;	/* not our interrupt */
462 	}
463 
464 	if (ENIC_TEST_INTR(pba, notify_intr)) {
465 		enic_notify_check(enic);
466 		vnic_intr_return_all_credits(&enic->intr[notify_intr]);
467 	}
468 
469 	if (ENIC_TEST_INTR(pba, err_intr)) {
470 		vnic_intr_return_all_credits(&enic->intr[err_intr]);
471 		enic_log_q_error(enic);
472 		/* schedule recovery from WQ/RQ error */
473 		schedule_work(&enic->reset);
474 		return IRQ_HANDLED;
475 	}
476 
477 	if (ENIC_TEST_INTR(pba, io_intr))
478 		napi_schedule_irqoff(&enic->napi[0]);
479 	else
480 		vnic_intr_unmask(&enic->intr[io_intr]);
481 
482 	return IRQ_HANDLED;
483 }
484 
enic_isr_msi(int irq,void * data)485 static irqreturn_t enic_isr_msi(int irq, void *data)
486 {
487 	struct enic *enic = data;
488 
489 	/* With MSI, there is no sharing of interrupts, so this is
490 	 * our interrupt and there is no need to ack it.  The device
491 	 * is not providing per-vector masking, so the OS will not
492 	 * write to PCI config space to mask/unmask the interrupt.
493 	 * We're using mask_on_assertion for MSI, so the device
494 	 * automatically masks the interrupt when the interrupt is
495 	 * generated.  Later, when exiting polling, the interrupt
496 	 * will be unmasked (see enic_poll).
497 	 *
498 	 * Also, the device uses the same PCIe Traffic Class (TC)
499 	 * for Memory Write data and MSI, so there are no ordering
500 	 * issues; the MSI will always arrive at the Root Complex
501 	 * _after_ corresponding Memory Writes (i.e. descriptor
502 	 * writes).
503 	 */
504 
505 	napi_schedule_irqoff(&enic->napi[0]);
506 
507 	return IRQ_HANDLED;
508 }
509 
enic_isr_msix(int irq,void * data)510 static irqreturn_t enic_isr_msix(int irq, void *data)
511 {
512 	struct napi_struct *napi = data;
513 
514 	napi_schedule_irqoff(napi);
515 
516 	return IRQ_HANDLED;
517 }
518 
enic_isr_msix_err(int irq,void * data)519 static irqreturn_t enic_isr_msix_err(int irq, void *data)
520 {
521 	struct enic *enic = data;
522 	unsigned int intr = enic_msix_err_intr(enic);
523 
524 	vnic_intr_return_all_credits(&enic->intr[intr]);
525 
526 	if (enic_log_q_error(enic))
527 		/* schedule recovery from WQ/RQ error */
528 		schedule_work(&enic->reset);
529 
530 	return IRQ_HANDLED;
531 }
532 
enic_isr_msix_notify(int irq,void * data)533 static irqreturn_t enic_isr_msix_notify(int irq, void *data)
534 {
535 	struct enic *enic = data;
536 	unsigned int intr = enic_msix_notify_intr(enic);
537 
538 	enic_notify_check(enic);
539 	vnic_intr_return_all_credits(&enic->intr[intr]);
540 
541 	return IRQ_HANDLED;
542 }
543 
enic_queue_wq_skb_cont(struct enic * enic,struct vnic_wq * wq,struct sk_buff * skb,unsigned int len_left,int loopback)544 static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq,
545 				  struct sk_buff *skb, unsigned int len_left,
546 				  int loopback)
547 {
548 	const skb_frag_t *frag;
549 	dma_addr_t dma_addr;
550 
551 	/* Queue additional data fragments */
552 	for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
553 		len_left -= skb_frag_size(frag);
554 		dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0,
555 					    skb_frag_size(frag),
556 					    DMA_TO_DEVICE);
557 		if (unlikely(enic_dma_map_check(enic, dma_addr)))
558 			return -ENOMEM;
559 		enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag),
560 					(len_left == 0),	/* EOP? */
561 					loopback);
562 	}
563 
564 	return 0;
565 }
566 
enic_queue_wq_skb_vlan(struct enic * enic,struct vnic_wq * wq,struct sk_buff * skb,int vlan_tag_insert,unsigned int vlan_tag,int loopback)567 static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
568 				  struct sk_buff *skb, int vlan_tag_insert,
569 				  unsigned int vlan_tag, int loopback)
570 {
571 	unsigned int head_len = skb_headlen(skb);
572 	unsigned int len_left = skb->len - head_len;
573 	int eop = (len_left == 0);
574 	dma_addr_t dma_addr;
575 	int err = 0;
576 
577 	dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len,
578 				  DMA_TO_DEVICE);
579 	if (unlikely(enic_dma_map_check(enic, dma_addr)))
580 		return -ENOMEM;
581 
582 	/* Queue the main skb fragment. The fragments are no larger
583 	 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
584 	 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
585 	 * per fragment is queued.
586 	 */
587 	enic_queue_wq_desc(wq, skb, dma_addr, head_len,	vlan_tag_insert,
588 			   vlan_tag, eop, loopback);
589 
590 	if (!eop)
591 		err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
592 
593 	return err;
594 }
595 
enic_queue_wq_skb_csum_l4(struct enic * enic,struct vnic_wq * wq,struct sk_buff * skb,int vlan_tag_insert,unsigned int vlan_tag,int loopback)596 static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
597 				     struct sk_buff *skb, int vlan_tag_insert,
598 				     unsigned int vlan_tag, int loopback)
599 {
600 	unsigned int head_len = skb_headlen(skb);
601 	unsigned int len_left = skb->len - head_len;
602 	unsigned int hdr_len = skb_checksum_start_offset(skb);
603 	unsigned int csum_offset = hdr_len + skb->csum_offset;
604 	int eop = (len_left == 0);
605 	dma_addr_t dma_addr;
606 	int err = 0;
607 
608 	dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len,
609 				  DMA_TO_DEVICE);
610 	if (unlikely(enic_dma_map_check(enic, dma_addr)))
611 		return -ENOMEM;
612 
613 	/* Queue the main skb fragment. The fragments are no larger
614 	 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
615 	 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
616 	 * per fragment is queued.
617 	 */
618 	enic_queue_wq_desc_csum_l4(wq, skb, dma_addr, head_len,	csum_offset,
619 				   hdr_len, vlan_tag_insert, vlan_tag, eop,
620 				   loopback);
621 
622 	if (!eop)
623 		err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
624 
625 	return err;
626 }
627 
enic_preload_tcp_csum_encap(struct sk_buff * skb)628 static void enic_preload_tcp_csum_encap(struct sk_buff *skb)
629 {
630 	const struct ethhdr *eth = (struct ethhdr *)skb_inner_mac_header(skb);
631 
632 	switch (eth->h_proto) {
633 	case ntohs(ETH_P_IP):
634 		inner_ip_hdr(skb)->check = 0;
635 		inner_tcp_hdr(skb)->check =
636 			~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
637 					   inner_ip_hdr(skb)->daddr, 0,
638 					   IPPROTO_TCP, 0);
639 		break;
640 	case ntohs(ETH_P_IPV6):
641 		inner_tcp_hdr(skb)->check =
642 			~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
643 					 &inner_ipv6_hdr(skb)->daddr, 0,
644 					 IPPROTO_TCP, 0);
645 		break;
646 	default:
647 		WARN_ONCE(1, "Non ipv4/ipv6 inner pkt for encap offload");
648 		break;
649 	}
650 }
651 
enic_preload_tcp_csum(struct sk_buff * skb)652 static void enic_preload_tcp_csum(struct sk_buff *skb)
653 {
654 	/* Preload TCP csum field with IP pseudo hdr calculated
655 	 * with IP length set to zero.  HW will later add in length
656 	 * to each TCP segment resulting from the TSO.
657 	 */
658 
659 	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
660 		ip_hdr(skb)->check = 0;
661 		tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
662 			ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
663 	} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
664 		tcp_v6_gso_csum_prep(skb);
665 	}
666 }
667 
enic_queue_wq_skb_tso(struct enic * enic,struct vnic_wq * wq,struct sk_buff * skb,unsigned int mss,int vlan_tag_insert,unsigned int vlan_tag,int loopback)668 static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
669 				 struct sk_buff *skb, unsigned int mss,
670 				 int vlan_tag_insert, unsigned int vlan_tag,
671 				 int loopback)
672 {
673 	unsigned int frag_len_left = skb_headlen(skb);
674 	unsigned int len_left = skb->len - frag_len_left;
675 	int eop = (len_left == 0);
676 	unsigned int offset = 0;
677 	unsigned int hdr_len;
678 	dma_addr_t dma_addr;
679 	unsigned int len;
680 	skb_frag_t *frag;
681 
682 	if (skb->encapsulation) {
683 		hdr_len = skb_inner_tcp_all_headers(skb);
684 		enic_preload_tcp_csum_encap(skb);
685 	} else {
686 		hdr_len = skb_tcp_all_headers(skb);
687 		enic_preload_tcp_csum(skb);
688 	}
689 
690 	/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
691 	 * for the main skb fragment
692 	 */
693 	while (frag_len_left) {
694 		len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
695 		dma_addr = dma_map_single(&enic->pdev->dev,
696 					  skb->data + offset, len,
697 					  DMA_TO_DEVICE);
698 		if (unlikely(enic_dma_map_check(enic, dma_addr)))
699 			return -ENOMEM;
700 		enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len,
701 				       vlan_tag_insert, vlan_tag,
702 				       eop && (len == frag_len_left), loopback);
703 		frag_len_left -= len;
704 		offset += len;
705 	}
706 
707 	if (eop)
708 		return 0;
709 
710 	/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
711 	 * for additional data fragments
712 	 */
713 	for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
714 		len_left -= skb_frag_size(frag);
715 		frag_len_left = skb_frag_size(frag);
716 		offset = 0;
717 
718 		while (frag_len_left) {
719 			len = min(frag_len_left,
720 				(unsigned int)WQ_ENET_MAX_DESC_LEN);
721 			dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
722 						    offset, len,
723 						    DMA_TO_DEVICE);
724 			if (unlikely(enic_dma_map_check(enic, dma_addr)))
725 				return -ENOMEM;
726 			enic_queue_wq_desc_cont(wq, skb, dma_addr, len,
727 						(len_left == 0) &&
728 						 (len == frag_len_left),/*EOP*/
729 						loopback);
730 			frag_len_left -= len;
731 			offset += len;
732 		}
733 	}
734 
735 	return 0;
736 }
737 
enic_queue_wq_skb_encap(struct enic * enic,struct vnic_wq * wq,struct sk_buff * skb,int vlan_tag_insert,unsigned int vlan_tag,int loopback)738 static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
739 					  struct sk_buff *skb,
740 					  int vlan_tag_insert,
741 					  unsigned int vlan_tag, int loopback)
742 {
743 	unsigned int head_len = skb_headlen(skb);
744 	unsigned int len_left = skb->len - head_len;
745 	/* Hardware will overwrite the checksum fields, calculating from
746 	 * scratch and ignoring the value placed by software.
747 	 * Offload mode = 00
748 	 * mss[2], mss[1], mss[0] bits are set
749 	 */
750 	unsigned int mss_or_csum = 7;
751 	int eop = (len_left == 0);
752 	dma_addr_t dma_addr;
753 	int err = 0;
754 
755 	dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len,
756 				  DMA_TO_DEVICE);
757 	if (unlikely(enic_dma_map_check(enic, dma_addr)))
758 		return -ENOMEM;
759 
760 	enic_queue_wq_desc_ex(wq, skb, dma_addr, head_len, mss_or_csum, 0,
761 			      vlan_tag_insert, vlan_tag,
762 			      WQ_ENET_OFFLOAD_MODE_CSUM, eop, 1 /* SOP */, eop,
763 			      loopback);
764 	if (!eop)
765 		err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
766 
767 	return err;
768 }
769 
enic_queue_wq_skb(struct enic * enic,struct vnic_wq * wq,struct sk_buff * skb)770 static inline int enic_queue_wq_skb(struct enic *enic,
771 	struct vnic_wq *wq, struct sk_buff *skb)
772 {
773 	unsigned int mss = skb_shinfo(skb)->gso_size;
774 	unsigned int vlan_tag = 0;
775 	int vlan_tag_insert = 0;
776 	int loopback = 0;
777 	int err;
778 
779 	if (skb_vlan_tag_present(skb)) {
780 		/* VLAN tag from trunking driver */
781 		vlan_tag_insert = 1;
782 		vlan_tag = skb_vlan_tag_get(skb);
783 	} else if (enic->loop_enable) {
784 		vlan_tag = enic->loop_tag;
785 		loopback = 1;
786 	}
787 
788 	if (mss)
789 		err = enic_queue_wq_skb_tso(enic, wq, skb, mss,
790 					    vlan_tag_insert, vlan_tag,
791 					    loopback);
792 	else if (skb->encapsulation)
793 		err = enic_queue_wq_skb_encap(enic, wq, skb, vlan_tag_insert,
794 					      vlan_tag, loopback);
795 	else if	(skb->ip_summed == CHECKSUM_PARTIAL)
796 		err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert,
797 						vlan_tag, loopback);
798 	else
799 		err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert,
800 					     vlan_tag, loopback);
801 	if (unlikely(err)) {
802 		struct vnic_wq_buf *buf;
803 
804 		buf = wq->to_use->prev;
805 		/* while not EOP of previous pkt && queue not empty.
806 		 * For all non EOP bufs, os_buf is NULL.
807 		 */
808 		while (!buf->os_buf && (buf->next != wq->to_clean)) {
809 			enic_free_wq_buf(wq, buf);
810 			wq->ring.desc_avail++;
811 			buf = buf->prev;
812 		}
813 		wq->to_use = buf->next;
814 		dev_kfree_skb(skb);
815 	}
816 	return err;
817 }
818 
819 /* netif_tx_lock held, process context with BHs disabled, or BH */
enic_hard_start_xmit(struct sk_buff * skb,struct net_device * netdev)820 static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
821 	struct net_device *netdev)
822 {
823 	struct enic *enic = netdev_priv(netdev);
824 	struct vnic_wq *wq;
825 	unsigned int txq_map;
826 	struct netdev_queue *txq;
827 
828 	if (skb->len <= 0) {
829 		dev_kfree_skb_any(skb);
830 		return NETDEV_TX_OK;
831 	}
832 
833 	txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
834 	wq = &enic->wq[txq_map];
835 	txq = netdev_get_tx_queue(netdev, txq_map);
836 
837 	/* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
838 	 * which is very likely.  In the off chance it's going to take
839 	 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
840 	 */
841 
842 	if (skb_shinfo(skb)->gso_size == 0 &&
843 	    skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
844 	    skb_linearize(skb)) {
845 		dev_kfree_skb_any(skb);
846 		return NETDEV_TX_OK;
847 	}
848 
849 	spin_lock(&enic->wq_lock[txq_map]);
850 
851 	if (vnic_wq_desc_avail(wq) <
852 	    skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
853 		netif_tx_stop_queue(txq);
854 		/* This is a hard error, log it */
855 		netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
856 		spin_unlock(&enic->wq_lock[txq_map]);
857 		return NETDEV_TX_BUSY;
858 	}
859 
860 	if (enic_queue_wq_skb(enic, wq, skb))
861 		goto error;
862 
863 	if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
864 		netif_tx_stop_queue(txq);
865 	skb_tx_timestamp(skb);
866 	if (!netdev_xmit_more() || netif_xmit_stopped(txq))
867 		vnic_wq_doorbell(wq);
868 
869 error:
870 	spin_unlock(&enic->wq_lock[txq_map]);
871 
872 	return NETDEV_TX_OK;
873 }
874 
875 /* dev_base_lock rwlock held, nominally process context */
enic_get_stats(struct net_device * netdev,struct rtnl_link_stats64 * net_stats)876 static void enic_get_stats(struct net_device *netdev,
877 			   struct rtnl_link_stats64 *net_stats)
878 {
879 	struct enic *enic = netdev_priv(netdev);
880 	struct vnic_stats *stats;
881 	int err;
882 
883 	err = enic_dev_stats_dump(enic, &stats);
884 	/* return only when dma_alloc_coherent fails in vnic_dev_stats_dump
885 	 * For other failures, like devcmd failure, we return previously
886 	 * recorded stats.
887 	 */
888 	if (err == -ENOMEM)
889 		return;
890 
891 	net_stats->tx_packets = stats->tx.tx_frames_ok;
892 	net_stats->tx_bytes = stats->tx.tx_bytes_ok;
893 	net_stats->tx_errors = stats->tx.tx_errors;
894 	net_stats->tx_dropped = stats->tx.tx_drops;
895 
896 	net_stats->rx_packets = stats->rx.rx_frames_ok;
897 	net_stats->rx_bytes = stats->rx.rx_bytes_ok;
898 	net_stats->rx_errors = stats->rx.rx_errors;
899 	net_stats->multicast = stats->rx.rx_multicast_frames_ok;
900 	net_stats->rx_over_errors = enic->rq_truncated_pkts;
901 	net_stats->rx_crc_errors = enic->rq_bad_fcs;
902 	net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
903 }
904 
enic_mc_sync(struct net_device * netdev,const u8 * mc_addr)905 static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr)
906 {
907 	struct enic *enic = netdev_priv(netdev);
908 
909 	if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) {
910 		unsigned int mc_count = netdev_mc_count(netdev);
911 
912 		netdev_warn(netdev, "Registering only %d out of %d multicast addresses\n",
913 			    ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
914 
915 		return -ENOSPC;
916 	}
917 
918 	enic_dev_add_addr(enic, mc_addr);
919 	enic->mc_count++;
920 
921 	return 0;
922 }
923 
enic_mc_unsync(struct net_device * netdev,const u8 * mc_addr)924 static int enic_mc_unsync(struct net_device *netdev, const u8 *mc_addr)
925 {
926 	struct enic *enic = netdev_priv(netdev);
927 
928 	enic_dev_del_addr(enic, mc_addr);
929 	enic->mc_count--;
930 
931 	return 0;
932 }
933 
enic_uc_sync(struct net_device * netdev,const u8 * uc_addr)934 static int enic_uc_sync(struct net_device *netdev, const u8 *uc_addr)
935 {
936 	struct enic *enic = netdev_priv(netdev);
937 
938 	if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) {
939 		unsigned int uc_count = netdev_uc_count(netdev);
940 
941 		netdev_warn(netdev, "Registering only %d out of %d unicast addresses\n",
942 			    ENIC_UNICAST_PERFECT_FILTERS, uc_count);
943 
944 		return -ENOSPC;
945 	}
946 
947 	enic_dev_add_addr(enic, uc_addr);
948 	enic->uc_count++;
949 
950 	return 0;
951 }
952 
enic_uc_unsync(struct net_device * netdev,const u8 * uc_addr)953 static int enic_uc_unsync(struct net_device *netdev, const u8 *uc_addr)
954 {
955 	struct enic *enic = netdev_priv(netdev);
956 
957 	enic_dev_del_addr(enic, uc_addr);
958 	enic->uc_count--;
959 
960 	return 0;
961 }
962 
enic_reset_addr_lists(struct enic * enic)963 void enic_reset_addr_lists(struct enic *enic)
964 {
965 	struct net_device *netdev = enic->netdev;
966 
967 	__dev_uc_unsync(netdev, NULL);
968 	__dev_mc_unsync(netdev, NULL);
969 
970 	enic->mc_count = 0;
971 	enic->uc_count = 0;
972 	enic->flags = 0;
973 }
974 
enic_set_mac_addr(struct net_device * netdev,char * addr)975 static int enic_set_mac_addr(struct net_device *netdev, char *addr)
976 {
977 	struct enic *enic = netdev_priv(netdev);
978 
979 	if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
980 		if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
981 			return -EADDRNOTAVAIL;
982 	} else {
983 		if (!is_valid_ether_addr(addr))
984 			return -EADDRNOTAVAIL;
985 	}
986 
987 	eth_hw_addr_set(netdev, addr);
988 
989 	return 0;
990 }
991 
enic_set_mac_address_dynamic(struct net_device * netdev,void * p)992 static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
993 {
994 	struct enic *enic = netdev_priv(netdev);
995 	struct sockaddr *saddr = p;
996 	char *addr = saddr->sa_data;
997 	int err;
998 
999 	if (netif_running(enic->netdev)) {
1000 		err = enic_dev_del_station_addr(enic);
1001 		if (err)
1002 			return err;
1003 	}
1004 
1005 	err = enic_set_mac_addr(netdev, addr);
1006 	if (err)
1007 		return err;
1008 
1009 	if (netif_running(enic->netdev)) {
1010 		err = enic_dev_add_station_addr(enic);
1011 		if (err)
1012 			return err;
1013 	}
1014 
1015 	return err;
1016 }
1017 
enic_set_mac_address(struct net_device * netdev,void * p)1018 static int enic_set_mac_address(struct net_device *netdev, void *p)
1019 {
1020 	struct sockaddr *saddr = p;
1021 	char *addr = saddr->sa_data;
1022 	struct enic *enic = netdev_priv(netdev);
1023 	int err;
1024 
1025 	err = enic_dev_del_station_addr(enic);
1026 	if (err)
1027 		return err;
1028 
1029 	err = enic_set_mac_addr(netdev, addr);
1030 	if (err)
1031 		return err;
1032 
1033 	return enic_dev_add_station_addr(enic);
1034 }
1035 
1036 /* netif_tx_lock held, BHs disabled */
enic_set_rx_mode(struct net_device * netdev)1037 static void enic_set_rx_mode(struct net_device *netdev)
1038 {
1039 	struct enic *enic = netdev_priv(netdev);
1040 	int directed = 1;
1041 	int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
1042 	int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
1043 	int promisc = (netdev->flags & IFF_PROMISC) ||
1044 		netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
1045 	int allmulti = (netdev->flags & IFF_ALLMULTI) ||
1046 		netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
1047 	unsigned int flags = netdev->flags |
1048 		(allmulti ? IFF_ALLMULTI : 0) |
1049 		(promisc ? IFF_PROMISC : 0);
1050 
1051 	if (enic->flags != flags) {
1052 		enic->flags = flags;
1053 		enic_dev_packet_filter(enic, directed,
1054 			multicast, broadcast, promisc, allmulti);
1055 	}
1056 
1057 	if (!promisc) {
1058 		__dev_uc_sync(netdev, enic_uc_sync, enic_uc_unsync);
1059 		if (!allmulti)
1060 			__dev_mc_sync(netdev, enic_mc_sync, enic_mc_unsync);
1061 	}
1062 }
1063 
1064 /* netif_tx_lock held, BHs disabled */
enic_tx_timeout(struct net_device * netdev,unsigned int txqueue)1065 static void enic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1066 {
1067 	struct enic *enic = netdev_priv(netdev);
1068 	schedule_work(&enic->tx_hang_reset);
1069 }
1070 
enic_set_vf_mac(struct net_device * netdev,int vf,u8 * mac)1071 static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1072 {
1073 	struct enic *enic = netdev_priv(netdev);
1074 	struct enic_port_profile *pp;
1075 	int err;
1076 
1077 	ENIC_PP_BY_INDEX(enic, vf, pp, &err);
1078 	if (err)
1079 		return err;
1080 
1081 	if (is_valid_ether_addr(mac) || is_zero_ether_addr(mac)) {
1082 		if (vf == PORT_SELF_VF) {
1083 			memcpy(pp->vf_mac, mac, ETH_ALEN);
1084 			return 0;
1085 		} else {
1086 			/*
1087 			 * For sriov vf's set the mac in hw
1088 			 */
1089 			ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
1090 				vnic_dev_set_mac_addr, mac);
1091 			return enic_dev_status_to_errno(err);
1092 		}
1093 	} else
1094 		return -EINVAL;
1095 }
1096 
enic_set_vf_port(struct net_device * netdev,int vf,struct nlattr * port[])1097 static int enic_set_vf_port(struct net_device *netdev, int vf,
1098 	struct nlattr *port[])
1099 {
1100 	static const u8 zero_addr[ETH_ALEN] = {};
1101 	struct enic *enic = netdev_priv(netdev);
1102 	struct enic_port_profile prev_pp;
1103 	struct enic_port_profile *pp;
1104 	int err = 0, restore_pp = 1;
1105 
1106 	ENIC_PP_BY_INDEX(enic, vf, pp, &err);
1107 	if (err)
1108 		return err;
1109 
1110 	if (!port[IFLA_PORT_REQUEST])
1111 		return -EOPNOTSUPP;
1112 
1113 	memcpy(&prev_pp, pp, sizeof(*enic->pp));
1114 	memset(pp, 0, sizeof(*enic->pp));
1115 
1116 	pp->set |= ENIC_SET_REQUEST;
1117 	pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
1118 
1119 	if (port[IFLA_PORT_PROFILE]) {
1120 		if (nla_len(port[IFLA_PORT_PROFILE]) != PORT_PROFILE_MAX) {
1121 			memcpy(pp, &prev_pp, sizeof(*pp));
1122 			return -EINVAL;
1123 		}
1124 		pp->set |= ENIC_SET_NAME;
1125 		memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
1126 			PORT_PROFILE_MAX);
1127 	}
1128 
1129 	if (port[IFLA_PORT_INSTANCE_UUID]) {
1130 		if (nla_len(port[IFLA_PORT_INSTANCE_UUID]) != PORT_UUID_MAX) {
1131 			memcpy(pp, &prev_pp, sizeof(*pp));
1132 			return -EINVAL;
1133 		}
1134 		pp->set |= ENIC_SET_INSTANCE;
1135 		memcpy(pp->instance_uuid,
1136 			nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
1137 	}
1138 
1139 	if (port[IFLA_PORT_HOST_UUID]) {
1140 		if (nla_len(port[IFLA_PORT_HOST_UUID]) != PORT_UUID_MAX) {
1141 			memcpy(pp, &prev_pp, sizeof(*pp));
1142 			return -EINVAL;
1143 		}
1144 		pp->set |= ENIC_SET_HOST;
1145 		memcpy(pp->host_uuid,
1146 			nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
1147 	}
1148 
1149 	if (vf == PORT_SELF_VF) {
1150 		/* Special case handling: mac came from IFLA_VF_MAC */
1151 		if (!is_zero_ether_addr(prev_pp.vf_mac))
1152 			memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN);
1153 
1154 		if (is_zero_ether_addr(netdev->dev_addr))
1155 			eth_hw_addr_random(netdev);
1156 	} else {
1157 		/* SR-IOV VF: get mac from adapter */
1158 		ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
1159 			vnic_dev_get_mac_addr, pp->mac_addr);
1160 		if (err) {
1161 			netdev_err(netdev, "Error getting mac for vf %d\n", vf);
1162 			memcpy(pp, &prev_pp, sizeof(*pp));
1163 			return enic_dev_status_to_errno(err);
1164 		}
1165 	}
1166 
1167 	err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp);
1168 	if (err) {
1169 		if (restore_pp) {
1170 			/* Things are still the way they were: Implicit
1171 			 * DISASSOCIATE failed
1172 			 */
1173 			memcpy(pp, &prev_pp, sizeof(*pp));
1174 		} else {
1175 			memset(pp, 0, sizeof(*pp));
1176 			if (vf == PORT_SELF_VF)
1177 				eth_hw_addr_set(netdev, zero_addr);
1178 		}
1179 	} else {
1180 		/* Set flag to indicate that the port assoc/disassoc
1181 		 * request has been sent out to fw
1182 		 */
1183 		pp->set |= ENIC_PORT_REQUEST_APPLIED;
1184 
1185 		/* If DISASSOCIATE, clean up all assigned/saved macaddresses */
1186 		if (pp->request == PORT_REQUEST_DISASSOCIATE) {
1187 			eth_zero_addr(pp->mac_addr);
1188 			if (vf == PORT_SELF_VF)
1189 				eth_hw_addr_set(netdev, zero_addr);
1190 		}
1191 	}
1192 
1193 	if (vf == PORT_SELF_VF)
1194 		eth_zero_addr(pp->vf_mac);
1195 
1196 	return err;
1197 }
1198 
enic_get_vf_port(struct net_device * netdev,int vf,struct sk_buff * skb)1199 static int enic_get_vf_port(struct net_device *netdev, int vf,
1200 	struct sk_buff *skb)
1201 {
1202 	struct enic *enic = netdev_priv(netdev);
1203 	u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
1204 	struct enic_port_profile *pp;
1205 	int err;
1206 
1207 	ENIC_PP_BY_INDEX(enic, vf, pp, &err);
1208 	if (err)
1209 		return err;
1210 
1211 	if (!(pp->set & ENIC_PORT_REQUEST_APPLIED))
1212 		return -ENODATA;
1213 
1214 	err = enic_process_get_pp_request(enic, vf, pp->request, &response);
1215 	if (err)
1216 		return err;
1217 
1218 	if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) ||
1219 	    nla_put_u16(skb, IFLA_PORT_RESPONSE, response) ||
1220 	    ((pp->set & ENIC_SET_NAME) &&
1221 	     nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) ||
1222 	    ((pp->set & ENIC_SET_INSTANCE) &&
1223 	     nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
1224 		     pp->instance_uuid)) ||
1225 	    ((pp->set & ENIC_SET_HOST) &&
1226 	     nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid)))
1227 		goto nla_put_failure;
1228 	return 0;
1229 
1230 nla_put_failure:
1231 	return -EMSGSIZE;
1232 }
1233 
enic_free_rq_buf(struct vnic_rq * rq,struct vnic_rq_buf * buf)1234 static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
1235 {
1236 	struct enic *enic = vnic_dev_priv(rq->vdev);
1237 
1238 	if (!buf->os_buf)
1239 		return;
1240 
1241 	dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
1242 			 DMA_FROM_DEVICE);
1243 	dev_kfree_skb_any(buf->os_buf);
1244 	buf->os_buf = NULL;
1245 }
1246 
enic_rq_alloc_buf(struct vnic_rq * rq)1247 static int enic_rq_alloc_buf(struct vnic_rq *rq)
1248 {
1249 	struct enic *enic = vnic_dev_priv(rq->vdev);
1250 	struct net_device *netdev = enic->netdev;
1251 	struct sk_buff *skb;
1252 	unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
1253 	unsigned int os_buf_index = 0;
1254 	dma_addr_t dma_addr;
1255 	struct vnic_rq_buf *buf = rq->to_use;
1256 
1257 	if (buf->os_buf) {
1258 		enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
1259 				   buf->len);
1260 
1261 		return 0;
1262 	}
1263 	skb = netdev_alloc_skb_ip_align(netdev, len);
1264 	if (!skb)
1265 		return -ENOMEM;
1266 
1267 	dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len,
1268 				  DMA_FROM_DEVICE);
1269 	if (unlikely(enic_dma_map_check(enic, dma_addr))) {
1270 		dev_kfree_skb(skb);
1271 		return -ENOMEM;
1272 	}
1273 
1274 	enic_queue_rq_desc(rq, skb, os_buf_index,
1275 		dma_addr, len);
1276 
1277 	return 0;
1278 }
1279 
enic_intr_update_pkt_size(struct vnic_rx_bytes_counter * pkt_size,u32 pkt_len)1280 static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
1281 				      u32 pkt_len)
1282 {
1283 	if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
1284 		pkt_size->large_pkt_bytes_cnt += pkt_len;
1285 	else
1286 		pkt_size->small_pkt_bytes_cnt += pkt_len;
1287 }
1288 
enic_rxcopybreak(struct net_device * netdev,struct sk_buff ** skb,struct vnic_rq_buf * buf,u16 len)1289 static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
1290 			     struct vnic_rq_buf *buf, u16 len)
1291 {
1292 	struct enic *enic = netdev_priv(netdev);
1293 	struct sk_buff *new_skb;
1294 
1295 	if (len > enic->rx_copybreak)
1296 		return false;
1297 	new_skb = netdev_alloc_skb_ip_align(netdev, len);
1298 	if (!new_skb)
1299 		return false;
1300 	dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, len,
1301 				DMA_FROM_DEVICE);
1302 	memcpy(new_skb->data, (*skb)->data, len);
1303 	*skb = new_skb;
1304 
1305 	return true;
1306 }
1307 
enic_rq_indicate_buf(struct vnic_rq * rq,struct cq_desc * cq_desc,struct vnic_rq_buf * buf,int skipped,void * opaque)1308 static void enic_rq_indicate_buf(struct vnic_rq *rq,
1309 	struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
1310 	int skipped, void *opaque)
1311 {
1312 	struct enic *enic = vnic_dev_priv(rq->vdev);
1313 	struct net_device *netdev = enic->netdev;
1314 	struct sk_buff *skb;
1315 	struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1316 
1317 	u8 type, color, eop, sop, ingress_port, vlan_stripped;
1318 	u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
1319 	u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
1320 	u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
1321 	u8 packet_error;
1322 	u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
1323 	u32 rss_hash;
1324 	bool outer_csum_ok = true, encap = false;
1325 
1326 	if (skipped)
1327 		return;
1328 
1329 	skb = buf->os_buf;
1330 
1331 	cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
1332 		&type, &color, &q_number, &completed_index,
1333 		&ingress_port, &fcoe, &eop, &sop, &rss_type,
1334 		&csum_not_calc, &rss_hash, &bytes_written,
1335 		&packet_error, &vlan_stripped, &vlan_tci, &checksum,
1336 		&fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
1337 		&fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
1338 		&ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
1339 		&fcs_ok);
1340 
1341 	if (packet_error) {
1342 
1343 		if (!fcs_ok) {
1344 			if (bytes_written > 0)
1345 				enic->rq_bad_fcs++;
1346 			else if (bytes_written == 0)
1347 				enic->rq_truncated_pkts++;
1348 		}
1349 
1350 		dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
1351 				 DMA_FROM_DEVICE);
1352 		dev_kfree_skb_any(skb);
1353 		buf->os_buf = NULL;
1354 
1355 		return;
1356 	}
1357 
1358 	if (eop && bytes_written > 0) {
1359 
1360 		/* Good receive
1361 		 */
1362 
1363 		if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
1364 			buf->os_buf = NULL;
1365 			dma_unmap_single(&enic->pdev->dev, buf->dma_addr,
1366 					 buf->len, DMA_FROM_DEVICE);
1367 		}
1368 		prefetch(skb->data - NET_IP_ALIGN);
1369 
1370 		skb_put(skb, bytes_written);
1371 		skb->protocol = eth_type_trans(skb, netdev);
1372 		skb_record_rx_queue(skb, q_number);
1373 		if ((netdev->features & NETIF_F_RXHASH) && rss_hash &&
1374 		    (type == 3)) {
1375 			switch (rss_type) {
1376 			case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
1377 			case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
1378 			case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
1379 				skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
1380 				break;
1381 			case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
1382 			case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
1383 			case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
1384 				skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
1385 				break;
1386 			}
1387 		}
1388 		if (enic->vxlan.vxlan_udp_port_number) {
1389 			switch (enic->vxlan.patch_level) {
1390 			case 0:
1391 				if (fcoe) {
1392 					encap = true;
1393 					outer_csum_ok = fcoe_fc_crc_ok;
1394 				}
1395 				break;
1396 			case 2:
1397 				if ((type == 7) &&
1398 				    (rss_hash & BIT(0))) {
1399 					encap = true;
1400 					outer_csum_ok = (rss_hash & BIT(1)) &&
1401 							(rss_hash & BIT(2));
1402 				}
1403 				break;
1404 			}
1405 		}
1406 
1407 		/* Hardware does not provide whole packet checksum. It only
1408 		 * provides pseudo checksum. Since hw validates the packet
1409 		 * checksum but not provide us the checksum value. use
1410 		 * CHECSUM_UNNECESSARY.
1411 		 *
1412 		 * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is
1413 		 * inner csum_ok. outer_csum_ok is set by hw when outer udp
1414 		 * csum is correct or is zero.
1415 		 */
1416 		if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
1417 		    tcp_udp_csum_ok && outer_csum_ok &&
1418 		    (ipv4_csum_ok || ipv6)) {
1419 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1420 			skb->csum_level = encap;
1421 		}
1422 
1423 		if (vlan_stripped)
1424 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
1425 
1426 		skb_mark_napi_id(skb, &enic->napi[rq->index]);
1427 		if (!(netdev->features & NETIF_F_GRO))
1428 			netif_receive_skb(skb);
1429 		else
1430 			napi_gro_receive(&enic->napi[q_number], skb);
1431 		if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1432 			enic_intr_update_pkt_size(&cq->pkt_size_counter,
1433 						  bytes_written);
1434 	} else {
1435 
1436 		/* Buffer overflow
1437 		 */
1438 
1439 		dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
1440 				 DMA_FROM_DEVICE);
1441 		dev_kfree_skb_any(skb);
1442 		buf->os_buf = NULL;
1443 	}
1444 }
1445 
enic_rq_service(struct vnic_dev * vdev,struct cq_desc * cq_desc,u8 type,u16 q_number,u16 completed_index,void * opaque)1446 static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1447 	u8 type, u16 q_number, u16 completed_index, void *opaque)
1448 {
1449 	struct enic *enic = vnic_dev_priv(vdev);
1450 
1451 	vnic_rq_service(&enic->rq[q_number], cq_desc,
1452 		completed_index, VNIC_RQ_RETURN_DESC,
1453 		enic_rq_indicate_buf, opaque);
1454 
1455 	return 0;
1456 }
1457 
enic_set_int_moderation(struct enic * enic,struct vnic_rq * rq)1458 static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
1459 {
1460 	unsigned int intr = enic_msix_rq_intr(enic, rq->index);
1461 	struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1462 	u32 timer = cq->tobe_rx_coal_timeval;
1463 
1464 	if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
1465 		vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
1466 		cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
1467 	}
1468 }
1469 
enic_calc_int_moderation(struct enic * enic,struct vnic_rq * rq)1470 static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
1471 {
1472 	struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
1473 	struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1474 	struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
1475 	int index;
1476 	u32 timer;
1477 	u32 range_start;
1478 	u32 traffic;
1479 	u64 delta;
1480 	ktime_t now = ktime_get();
1481 
1482 	delta = ktime_us_delta(now, cq->prev_ts);
1483 	if (delta < ENIC_AIC_TS_BREAK)
1484 		return;
1485 	cq->prev_ts = now;
1486 
1487 	traffic = pkt_size_counter->large_pkt_bytes_cnt +
1488 		  pkt_size_counter->small_pkt_bytes_cnt;
1489 	/* The table takes Mbps
1490 	 * traffic *= 8    => bits
1491 	 * traffic *= (10^6 / delta)    => bps
1492 	 * traffic /= 10^6     => Mbps
1493 	 *
1494 	 * Combining, traffic *= (8 / delta)
1495 	 */
1496 
1497 	traffic <<= 3;
1498 	traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
1499 
1500 	for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
1501 		if (traffic < mod_table[index].rx_rate)
1502 			break;
1503 	range_start = (pkt_size_counter->small_pkt_bytes_cnt >
1504 		       pkt_size_counter->large_pkt_bytes_cnt << 1) ?
1505 		      rx_coal->small_pkt_range_start :
1506 		      rx_coal->large_pkt_range_start;
1507 	timer = range_start + ((rx_coal->range_end - range_start) *
1508 			       mod_table[index].range_percent / 100);
1509 	/* Damping */
1510 	cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
1511 
1512 	pkt_size_counter->large_pkt_bytes_cnt = 0;
1513 	pkt_size_counter->small_pkt_bytes_cnt = 0;
1514 }
1515 
enic_poll(struct napi_struct * napi,int budget)1516 static int enic_poll(struct napi_struct *napi, int budget)
1517 {
1518 	struct net_device *netdev = napi->dev;
1519 	struct enic *enic = netdev_priv(netdev);
1520 	unsigned int cq_rq = enic_cq_rq(enic, 0);
1521 	unsigned int cq_wq = enic_cq_wq(enic, 0);
1522 	unsigned int intr = ENIC_LEGACY_IO_INTR;
1523 	unsigned int rq_work_to_do = budget;
1524 	unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
1525 	unsigned int  work_done, rq_work_done = 0, wq_work_done;
1526 	int err;
1527 
1528 	wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
1529 				       enic_wq_service, NULL);
1530 
1531 	if (budget > 0)
1532 		rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
1533 			rq_work_to_do, enic_rq_service, NULL);
1534 
1535 	/* Accumulate intr event credits for this polling
1536 	 * cycle.  An intr event is the completion of a
1537 	 * a WQ or RQ packet.
1538 	 */
1539 
1540 	work_done = rq_work_done + wq_work_done;
1541 
1542 	if (work_done > 0)
1543 		vnic_intr_return_credits(&enic->intr[intr],
1544 			work_done,
1545 			0 /* don't unmask intr */,
1546 			0 /* don't reset intr timer */);
1547 
1548 	err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1549 
1550 	/* Buffer allocation failed. Stay in polling
1551 	 * mode so we can try to fill the ring again.
1552 	 */
1553 
1554 	if (err)
1555 		rq_work_done = rq_work_to_do;
1556 	if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1557 		/* Call the function which refreshes the intr coalescing timer
1558 		 * value based on the traffic.
1559 		 */
1560 		enic_calc_int_moderation(enic, &enic->rq[0]);
1561 
1562 	if ((rq_work_done < budget) && napi_complete_done(napi, rq_work_done)) {
1563 
1564 		/* Some work done, but not enough to stay in polling,
1565 		 * exit polling
1566 		 */
1567 
1568 		if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1569 			enic_set_int_moderation(enic, &enic->rq[0]);
1570 		vnic_intr_unmask(&enic->intr[intr]);
1571 	}
1572 
1573 	return rq_work_done;
1574 }
1575 
1576 #ifdef CONFIG_RFS_ACCEL
enic_free_rx_cpu_rmap(struct enic * enic)1577 static void enic_free_rx_cpu_rmap(struct enic *enic)
1578 {
1579 	free_irq_cpu_rmap(enic->netdev->rx_cpu_rmap);
1580 	enic->netdev->rx_cpu_rmap = NULL;
1581 }
1582 
enic_set_rx_cpu_rmap(struct enic * enic)1583 static void enic_set_rx_cpu_rmap(struct enic *enic)
1584 {
1585 	int i, res;
1586 
1587 	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
1588 		enic->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(enic->rq_count);
1589 		if (unlikely(!enic->netdev->rx_cpu_rmap))
1590 			return;
1591 		for (i = 0; i < enic->rq_count; i++) {
1592 			res = irq_cpu_rmap_add(enic->netdev->rx_cpu_rmap,
1593 					       enic->msix_entry[i].vector);
1594 			if (unlikely(res)) {
1595 				enic_free_rx_cpu_rmap(enic);
1596 				return;
1597 			}
1598 		}
1599 	}
1600 }
1601 
1602 #else
1603 
enic_free_rx_cpu_rmap(struct enic * enic)1604 static void enic_free_rx_cpu_rmap(struct enic *enic)
1605 {
1606 }
1607 
enic_set_rx_cpu_rmap(struct enic * enic)1608 static void enic_set_rx_cpu_rmap(struct enic *enic)
1609 {
1610 }
1611 
1612 #endif /* CONFIG_RFS_ACCEL */
1613 
enic_poll_msix_wq(struct napi_struct * napi,int budget)1614 static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
1615 {
1616 	struct net_device *netdev = napi->dev;
1617 	struct enic *enic = netdev_priv(netdev);
1618 	unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count;
1619 	struct vnic_wq *wq = &enic->wq[wq_index];
1620 	unsigned int cq;
1621 	unsigned int intr;
1622 	unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
1623 	unsigned int wq_work_done;
1624 	unsigned int wq_irq;
1625 
1626 	wq_irq = wq->index;
1627 	cq = enic_cq_wq(enic, wq_irq);
1628 	intr = enic_msix_wq_intr(enic, wq_irq);
1629 	wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do,
1630 				       enic_wq_service, NULL);
1631 
1632 	vnic_intr_return_credits(&enic->intr[intr], wq_work_done,
1633 				 0 /* don't unmask intr */,
1634 				 1 /* reset intr timer */);
1635 	if (!wq_work_done) {
1636 		napi_complete(napi);
1637 		vnic_intr_unmask(&enic->intr[intr]);
1638 		return 0;
1639 	}
1640 
1641 	return budget;
1642 }
1643 
enic_poll_msix_rq(struct napi_struct * napi,int budget)1644 static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
1645 {
1646 	struct net_device *netdev = napi->dev;
1647 	struct enic *enic = netdev_priv(netdev);
1648 	unsigned int rq = (napi - &enic->napi[0]);
1649 	unsigned int cq = enic_cq_rq(enic, rq);
1650 	unsigned int intr = enic_msix_rq_intr(enic, rq);
1651 	unsigned int work_to_do = budget;
1652 	unsigned int work_done = 0;
1653 	int err;
1654 
1655 	/* Service RQ
1656 	 */
1657 
1658 	if (budget > 0)
1659 		work_done = vnic_cq_service(&enic->cq[cq],
1660 			work_to_do, enic_rq_service, NULL);
1661 
1662 	/* Return intr event credits for this polling
1663 	 * cycle.  An intr event is the completion of a
1664 	 * RQ packet.
1665 	 */
1666 
1667 	if (work_done > 0)
1668 		vnic_intr_return_credits(&enic->intr[intr],
1669 			work_done,
1670 			0 /* don't unmask intr */,
1671 			0 /* don't reset intr timer */);
1672 
1673 	err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
1674 
1675 	/* Buffer allocation failed. Stay in polling mode
1676 	 * so we can try to fill the ring again.
1677 	 */
1678 
1679 	if (err)
1680 		work_done = work_to_do;
1681 	if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1682 		/* Call the function which refreshes the intr coalescing timer
1683 		 * value based on the traffic.
1684 		 */
1685 		enic_calc_int_moderation(enic, &enic->rq[rq]);
1686 
1687 	if ((work_done < budget) && napi_complete_done(napi, work_done)) {
1688 
1689 		/* Some work done, but not enough to stay in polling,
1690 		 * exit polling
1691 		 */
1692 
1693 		if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1694 			enic_set_int_moderation(enic, &enic->rq[rq]);
1695 		vnic_intr_unmask(&enic->intr[intr]);
1696 	}
1697 
1698 	return work_done;
1699 }
1700 
enic_notify_timer(struct timer_list * t)1701 static void enic_notify_timer(struct timer_list *t)
1702 {
1703 	struct enic *enic = from_timer(enic, t, notify_timer);
1704 
1705 	enic_notify_check(enic);
1706 
1707 	mod_timer(&enic->notify_timer,
1708 		round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD));
1709 }
1710 
enic_free_intr(struct enic * enic)1711 static void enic_free_intr(struct enic *enic)
1712 {
1713 	struct net_device *netdev = enic->netdev;
1714 	unsigned int i;
1715 
1716 	enic_free_rx_cpu_rmap(enic);
1717 	switch (vnic_dev_get_intr_mode(enic->vdev)) {
1718 	case VNIC_DEV_INTR_MODE_INTX:
1719 		free_irq(enic->pdev->irq, netdev);
1720 		break;
1721 	case VNIC_DEV_INTR_MODE_MSI:
1722 		free_irq(enic->pdev->irq, enic);
1723 		break;
1724 	case VNIC_DEV_INTR_MODE_MSIX:
1725 		for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1726 			if (enic->msix[i].requested)
1727 				free_irq(enic->msix_entry[i].vector,
1728 					enic->msix[i].devid);
1729 		break;
1730 	default:
1731 		break;
1732 	}
1733 }
1734 
enic_request_intr(struct enic * enic)1735 static int enic_request_intr(struct enic *enic)
1736 {
1737 	struct net_device *netdev = enic->netdev;
1738 	unsigned int i, intr;
1739 	int err = 0;
1740 
1741 	enic_set_rx_cpu_rmap(enic);
1742 	switch (vnic_dev_get_intr_mode(enic->vdev)) {
1743 
1744 	case VNIC_DEV_INTR_MODE_INTX:
1745 
1746 		err = request_irq(enic->pdev->irq, enic_isr_legacy,
1747 			IRQF_SHARED, netdev->name, netdev);
1748 		break;
1749 
1750 	case VNIC_DEV_INTR_MODE_MSI:
1751 
1752 		err = request_irq(enic->pdev->irq, enic_isr_msi,
1753 			0, netdev->name, enic);
1754 		break;
1755 
1756 	case VNIC_DEV_INTR_MODE_MSIX:
1757 
1758 		for (i = 0; i < enic->rq_count; i++) {
1759 			intr = enic_msix_rq_intr(enic, i);
1760 			snprintf(enic->msix[intr].devname,
1761 				sizeof(enic->msix[intr].devname),
1762 				"%s-rx-%u", netdev->name, i);
1763 			enic->msix[intr].isr = enic_isr_msix;
1764 			enic->msix[intr].devid = &enic->napi[i];
1765 		}
1766 
1767 		for (i = 0; i < enic->wq_count; i++) {
1768 			int wq = enic_cq_wq(enic, i);
1769 
1770 			intr = enic_msix_wq_intr(enic, i);
1771 			snprintf(enic->msix[intr].devname,
1772 				sizeof(enic->msix[intr].devname),
1773 				"%s-tx-%u", netdev->name, i);
1774 			enic->msix[intr].isr = enic_isr_msix;
1775 			enic->msix[intr].devid = &enic->napi[wq];
1776 		}
1777 
1778 		intr = enic_msix_err_intr(enic);
1779 		snprintf(enic->msix[intr].devname,
1780 			sizeof(enic->msix[intr].devname),
1781 			"%s-err", netdev->name);
1782 		enic->msix[intr].isr = enic_isr_msix_err;
1783 		enic->msix[intr].devid = enic;
1784 
1785 		intr = enic_msix_notify_intr(enic);
1786 		snprintf(enic->msix[intr].devname,
1787 			sizeof(enic->msix[intr].devname),
1788 			"%s-notify", netdev->name);
1789 		enic->msix[intr].isr = enic_isr_msix_notify;
1790 		enic->msix[intr].devid = enic;
1791 
1792 		for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1793 			enic->msix[i].requested = 0;
1794 
1795 		for (i = 0; i < enic->intr_count; i++) {
1796 			err = request_irq(enic->msix_entry[i].vector,
1797 				enic->msix[i].isr, 0,
1798 				enic->msix[i].devname,
1799 				enic->msix[i].devid);
1800 			if (err) {
1801 				enic_free_intr(enic);
1802 				break;
1803 			}
1804 			enic->msix[i].requested = 1;
1805 		}
1806 
1807 		break;
1808 
1809 	default:
1810 		break;
1811 	}
1812 
1813 	return err;
1814 }
1815 
enic_synchronize_irqs(struct enic * enic)1816 static void enic_synchronize_irqs(struct enic *enic)
1817 {
1818 	unsigned int i;
1819 
1820 	switch (vnic_dev_get_intr_mode(enic->vdev)) {
1821 	case VNIC_DEV_INTR_MODE_INTX:
1822 	case VNIC_DEV_INTR_MODE_MSI:
1823 		synchronize_irq(enic->pdev->irq);
1824 		break;
1825 	case VNIC_DEV_INTR_MODE_MSIX:
1826 		for (i = 0; i < enic->intr_count; i++)
1827 			synchronize_irq(enic->msix_entry[i].vector);
1828 		break;
1829 	default:
1830 		break;
1831 	}
1832 }
1833 
enic_set_rx_coal_setting(struct enic * enic)1834 static void enic_set_rx_coal_setting(struct enic *enic)
1835 {
1836 	unsigned int speed;
1837 	int index = -1;
1838 	struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
1839 
1840 	/* 1. Read the link speed from fw
1841 	 * 2. Pick the default range for the speed
1842 	 * 3. Update it in enic->rx_coalesce_setting
1843 	 */
1844 	speed = vnic_dev_port_speed(enic->vdev);
1845 	if (ENIC_LINK_SPEED_10G < speed)
1846 		index = ENIC_LINK_40G_INDEX;
1847 	else if (ENIC_LINK_SPEED_4G < speed)
1848 		index = ENIC_LINK_10G_INDEX;
1849 	else
1850 		index = ENIC_LINK_4G_INDEX;
1851 
1852 	rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
1853 	rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
1854 	rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
1855 
1856 	/* Start with the value provided by UCSM */
1857 	for (index = 0; index < enic->rq_count; index++)
1858 		enic->cq[index].cur_rx_coal_timeval =
1859 				enic->config.intr_timer_usec;
1860 
1861 	rx_coal->use_adaptive_rx_coalesce = 1;
1862 }
1863 
enic_dev_notify_set(struct enic * enic)1864 static int enic_dev_notify_set(struct enic *enic)
1865 {
1866 	int err;
1867 
1868 	spin_lock_bh(&enic->devcmd_lock);
1869 	switch (vnic_dev_get_intr_mode(enic->vdev)) {
1870 	case VNIC_DEV_INTR_MODE_INTX:
1871 		err = vnic_dev_notify_set(enic->vdev, ENIC_LEGACY_NOTIFY_INTR);
1872 		break;
1873 	case VNIC_DEV_INTR_MODE_MSIX:
1874 		err = vnic_dev_notify_set(enic->vdev,
1875 			enic_msix_notify_intr(enic));
1876 		break;
1877 	default:
1878 		err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
1879 		break;
1880 	}
1881 	spin_unlock_bh(&enic->devcmd_lock);
1882 
1883 	return err;
1884 }
1885 
enic_notify_timer_start(struct enic * enic)1886 static void enic_notify_timer_start(struct enic *enic)
1887 {
1888 	switch (vnic_dev_get_intr_mode(enic->vdev)) {
1889 	case VNIC_DEV_INTR_MODE_MSI:
1890 		mod_timer(&enic->notify_timer, jiffies);
1891 		break;
1892 	default:
1893 		/* Using intr for notification for INTx/MSI-X */
1894 		break;
1895 	}
1896 }
1897 
1898 /* rtnl lock is held, process context */
enic_open(struct net_device * netdev)1899 static int enic_open(struct net_device *netdev)
1900 {
1901 	struct enic *enic = netdev_priv(netdev);
1902 	unsigned int i;
1903 	int err, ret;
1904 
1905 	err = enic_request_intr(enic);
1906 	if (err) {
1907 		netdev_err(netdev, "Unable to request irq.\n");
1908 		return err;
1909 	}
1910 	enic_init_affinity_hint(enic);
1911 	enic_set_affinity_hint(enic);
1912 
1913 	err = enic_dev_notify_set(enic);
1914 	if (err) {
1915 		netdev_err(netdev,
1916 			"Failed to alloc notify buffer, aborting.\n");
1917 		goto err_out_free_intr;
1918 	}
1919 
1920 	for (i = 0; i < enic->rq_count; i++) {
1921 		/* enable rq before updating rq desc */
1922 		vnic_rq_enable(&enic->rq[i]);
1923 		vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
1924 		/* Need at least one buffer on ring to get going */
1925 		if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
1926 			netdev_err(netdev, "Unable to alloc receive buffers\n");
1927 			err = -ENOMEM;
1928 			goto err_out_free_rq;
1929 		}
1930 	}
1931 
1932 	for (i = 0; i < enic->wq_count; i++)
1933 		vnic_wq_enable(&enic->wq[i]);
1934 
1935 	if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
1936 		enic_dev_add_station_addr(enic);
1937 
1938 	enic_set_rx_mode(netdev);
1939 
1940 	netif_tx_wake_all_queues(netdev);
1941 
1942 	for (i = 0; i < enic->rq_count; i++)
1943 		napi_enable(&enic->napi[i]);
1944 
1945 	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
1946 		for (i = 0; i < enic->wq_count; i++)
1947 			napi_enable(&enic->napi[enic_cq_wq(enic, i)]);
1948 	enic_dev_enable(enic);
1949 
1950 	for (i = 0; i < enic->intr_count; i++)
1951 		vnic_intr_unmask(&enic->intr[i]);
1952 
1953 	enic_notify_timer_start(enic);
1954 	enic_rfs_timer_start(enic);
1955 
1956 	return 0;
1957 
1958 err_out_free_rq:
1959 	for (i = 0; i < enic->rq_count; i++) {
1960 		ret = vnic_rq_disable(&enic->rq[i]);
1961 		if (!ret)
1962 			vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1963 	}
1964 	enic_dev_notify_unset(enic);
1965 err_out_free_intr:
1966 	enic_unset_affinity_hint(enic);
1967 	enic_free_intr(enic);
1968 
1969 	return err;
1970 }
1971 
1972 /* rtnl lock is held, process context */
enic_stop(struct net_device * netdev)1973 static int enic_stop(struct net_device *netdev)
1974 {
1975 	struct enic *enic = netdev_priv(netdev);
1976 	unsigned int i;
1977 	int err;
1978 
1979 	for (i = 0; i < enic->intr_count; i++) {
1980 		vnic_intr_mask(&enic->intr[i]);
1981 		(void)vnic_intr_masked(&enic->intr[i]); /* flush write */
1982 	}
1983 
1984 	enic_synchronize_irqs(enic);
1985 
1986 	del_timer_sync(&enic->notify_timer);
1987 	enic_rfs_flw_tbl_free(enic);
1988 
1989 	enic_dev_disable(enic);
1990 
1991 	for (i = 0; i < enic->rq_count; i++)
1992 		napi_disable(&enic->napi[i]);
1993 
1994 	netif_carrier_off(netdev);
1995 	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
1996 		for (i = 0; i < enic->wq_count; i++)
1997 			napi_disable(&enic->napi[enic_cq_wq(enic, i)]);
1998 	netif_tx_disable(netdev);
1999 
2000 	if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
2001 		enic_dev_del_station_addr(enic);
2002 
2003 	for (i = 0; i < enic->wq_count; i++) {
2004 		err = vnic_wq_disable(&enic->wq[i]);
2005 		if (err)
2006 			return err;
2007 	}
2008 	for (i = 0; i < enic->rq_count; i++) {
2009 		err = vnic_rq_disable(&enic->rq[i]);
2010 		if (err)
2011 			return err;
2012 	}
2013 
2014 	enic_dev_notify_unset(enic);
2015 	enic_unset_affinity_hint(enic);
2016 	enic_free_intr(enic);
2017 
2018 	for (i = 0; i < enic->wq_count; i++)
2019 		vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
2020 	for (i = 0; i < enic->rq_count; i++)
2021 		vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
2022 	for (i = 0; i < enic->cq_count; i++)
2023 		vnic_cq_clean(&enic->cq[i]);
2024 	for (i = 0; i < enic->intr_count; i++)
2025 		vnic_intr_clean(&enic->intr[i]);
2026 
2027 	return 0;
2028 }
2029 
_enic_change_mtu(struct net_device * netdev,int new_mtu)2030 static int _enic_change_mtu(struct net_device *netdev, int new_mtu)
2031 {
2032 	bool running = netif_running(netdev);
2033 	int err = 0;
2034 
2035 	ASSERT_RTNL();
2036 	if (running) {
2037 		err = enic_stop(netdev);
2038 		if (err)
2039 			return err;
2040 	}
2041 
2042 	netdev->mtu = new_mtu;
2043 
2044 	if (running) {
2045 		err = enic_open(netdev);
2046 		if (err)
2047 			return err;
2048 	}
2049 
2050 	return 0;
2051 }
2052 
enic_change_mtu(struct net_device * netdev,int new_mtu)2053 static int enic_change_mtu(struct net_device *netdev, int new_mtu)
2054 {
2055 	struct enic *enic = netdev_priv(netdev);
2056 
2057 	if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
2058 		return -EOPNOTSUPP;
2059 
2060 	if (netdev->mtu > enic->port_mtu)
2061 		netdev_warn(netdev,
2062 			    "interface MTU (%d) set higher than port MTU (%d)\n",
2063 			    netdev->mtu, enic->port_mtu);
2064 
2065 	return _enic_change_mtu(netdev, new_mtu);
2066 }
2067 
enic_change_mtu_work(struct work_struct * work)2068 static void enic_change_mtu_work(struct work_struct *work)
2069 {
2070 	struct enic *enic = container_of(work, struct enic, change_mtu_work);
2071 	struct net_device *netdev = enic->netdev;
2072 	int new_mtu = vnic_dev_mtu(enic->vdev);
2073 
2074 	rtnl_lock();
2075 	(void)_enic_change_mtu(netdev, new_mtu);
2076 	rtnl_unlock();
2077 
2078 	netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
2079 }
2080 
2081 #ifdef CONFIG_NET_POLL_CONTROLLER
enic_poll_controller(struct net_device * netdev)2082 static void enic_poll_controller(struct net_device *netdev)
2083 {
2084 	struct enic *enic = netdev_priv(netdev);
2085 	struct vnic_dev *vdev = enic->vdev;
2086 	unsigned int i, intr;
2087 
2088 	switch (vnic_dev_get_intr_mode(vdev)) {
2089 	case VNIC_DEV_INTR_MODE_MSIX:
2090 		for (i = 0; i < enic->rq_count; i++) {
2091 			intr = enic_msix_rq_intr(enic, i);
2092 			enic_isr_msix(enic->msix_entry[intr].vector,
2093 				      &enic->napi[i]);
2094 		}
2095 
2096 		for (i = 0; i < enic->wq_count; i++) {
2097 			intr = enic_msix_wq_intr(enic, i);
2098 			enic_isr_msix(enic->msix_entry[intr].vector,
2099 				      &enic->napi[enic_cq_wq(enic, i)]);
2100 		}
2101 
2102 		break;
2103 	case VNIC_DEV_INTR_MODE_MSI:
2104 		enic_isr_msi(enic->pdev->irq, enic);
2105 		break;
2106 	case VNIC_DEV_INTR_MODE_INTX:
2107 		enic_isr_legacy(enic->pdev->irq, netdev);
2108 		break;
2109 	default:
2110 		break;
2111 	}
2112 }
2113 #endif
2114 
enic_dev_wait(struct vnic_dev * vdev,int (* start)(struct vnic_dev *,int),int (* finished)(struct vnic_dev *,int *),int arg)2115 static int enic_dev_wait(struct vnic_dev *vdev,
2116 	int (*start)(struct vnic_dev *, int),
2117 	int (*finished)(struct vnic_dev *, int *),
2118 	int arg)
2119 {
2120 	unsigned long time;
2121 	int done;
2122 	int err;
2123 
2124 	err = start(vdev, arg);
2125 	if (err)
2126 		return err;
2127 
2128 	/* Wait for func to complete...2 seconds max
2129 	 */
2130 
2131 	time = jiffies + (HZ * 2);
2132 	do {
2133 
2134 		err = finished(vdev, &done);
2135 		if (err)
2136 			return err;
2137 
2138 		if (done)
2139 			return 0;
2140 
2141 		schedule_timeout_uninterruptible(HZ / 10);
2142 
2143 	} while (time_after(time, jiffies));
2144 
2145 	return -ETIMEDOUT;
2146 }
2147 
enic_dev_open(struct enic * enic)2148 static int enic_dev_open(struct enic *enic)
2149 {
2150 	int err;
2151 	u32 flags = CMD_OPENF_IG_DESCCACHE;
2152 
2153 	err = enic_dev_wait(enic->vdev, vnic_dev_open,
2154 		vnic_dev_open_done, flags);
2155 	if (err)
2156 		dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
2157 			err);
2158 
2159 	return err;
2160 }
2161 
enic_dev_soft_reset(struct enic * enic)2162 static int enic_dev_soft_reset(struct enic *enic)
2163 {
2164 	int err;
2165 
2166 	err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset,
2167 			    vnic_dev_soft_reset_done, 0);
2168 	if (err)
2169 		netdev_err(enic->netdev, "vNIC soft reset failed, err %d\n",
2170 			   err);
2171 
2172 	return err;
2173 }
2174 
enic_dev_hang_reset(struct enic * enic)2175 static int enic_dev_hang_reset(struct enic *enic)
2176 {
2177 	int err;
2178 
2179 	err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
2180 		vnic_dev_hang_reset_done, 0);
2181 	if (err)
2182 		netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
2183 			err);
2184 
2185 	return err;
2186 }
2187 
__enic_set_rsskey(struct enic * enic)2188 int __enic_set_rsskey(struct enic *enic)
2189 {
2190 	union vnic_rss_key *rss_key_buf_va;
2191 	dma_addr_t rss_key_buf_pa;
2192 	int i, kidx, bidx, err;
2193 
2194 	rss_key_buf_va = dma_alloc_coherent(&enic->pdev->dev,
2195 					    sizeof(union vnic_rss_key),
2196 					    &rss_key_buf_pa, GFP_ATOMIC);
2197 	if (!rss_key_buf_va)
2198 		return -ENOMEM;
2199 
2200 	for (i = 0; i < ENIC_RSS_LEN; i++) {
2201 		kidx = i / ENIC_RSS_BYTES_PER_KEY;
2202 		bidx = i % ENIC_RSS_BYTES_PER_KEY;
2203 		rss_key_buf_va->key[kidx].b[bidx] = enic->rss_key[i];
2204 	}
2205 	spin_lock_bh(&enic->devcmd_lock);
2206 	err = enic_set_rss_key(enic,
2207 		rss_key_buf_pa,
2208 		sizeof(union vnic_rss_key));
2209 	spin_unlock_bh(&enic->devcmd_lock);
2210 
2211 	dma_free_coherent(&enic->pdev->dev, sizeof(union vnic_rss_key),
2212 			  rss_key_buf_va, rss_key_buf_pa);
2213 
2214 	return err;
2215 }
2216 
enic_set_rsskey(struct enic * enic)2217 static int enic_set_rsskey(struct enic *enic)
2218 {
2219 	netdev_rss_key_fill(enic->rss_key, ENIC_RSS_LEN);
2220 
2221 	return __enic_set_rsskey(enic);
2222 }
2223 
enic_set_rsscpu(struct enic * enic,u8 rss_hash_bits)2224 static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
2225 {
2226 	dma_addr_t rss_cpu_buf_pa;
2227 	union vnic_rss_cpu *rss_cpu_buf_va = NULL;
2228 	unsigned int i;
2229 	int err;
2230 
2231 	rss_cpu_buf_va = dma_alloc_coherent(&enic->pdev->dev,
2232 					    sizeof(union vnic_rss_cpu),
2233 					    &rss_cpu_buf_pa, GFP_ATOMIC);
2234 	if (!rss_cpu_buf_va)
2235 		return -ENOMEM;
2236 
2237 	for (i = 0; i < (1 << rss_hash_bits); i++)
2238 		(*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
2239 
2240 	spin_lock_bh(&enic->devcmd_lock);
2241 	err = enic_set_rss_cpu(enic,
2242 		rss_cpu_buf_pa,
2243 		sizeof(union vnic_rss_cpu));
2244 	spin_unlock_bh(&enic->devcmd_lock);
2245 
2246 	dma_free_coherent(&enic->pdev->dev, sizeof(union vnic_rss_cpu),
2247 			  rss_cpu_buf_va, rss_cpu_buf_pa);
2248 
2249 	return err;
2250 }
2251 
enic_set_niccfg(struct enic * enic,u8 rss_default_cpu,u8 rss_hash_type,u8 rss_hash_bits,u8 rss_base_cpu,u8 rss_enable)2252 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
2253 	u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
2254 {
2255 	const u8 tso_ipid_split_en = 0;
2256 	const u8 ig_vlan_strip_en = 1;
2257 	int err;
2258 
2259 	/* Enable VLAN tag stripping.
2260 	*/
2261 
2262 	spin_lock_bh(&enic->devcmd_lock);
2263 	err = enic_set_nic_cfg(enic,
2264 		rss_default_cpu, rss_hash_type,
2265 		rss_hash_bits, rss_base_cpu,
2266 		rss_enable, tso_ipid_split_en,
2267 		ig_vlan_strip_en);
2268 	spin_unlock_bh(&enic->devcmd_lock);
2269 
2270 	return err;
2271 }
2272 
enic_set_rss_nic_cfg(struct enic * enic)2273 static int enic_set_rss_nic_cfg(struct enic *enic)
2274 {
2275 	struct device *dev = enic_get_dev(enic);
2276 	const u8 rss_default_cpu = 0;
2277 	const u8 rss_hash_bits = 7;
2278 	const u8 rss_base_cpu = 0;
2279 	u8 rss_hash_type;
2280 	int res;
2281 	u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
2282 
2283 	spin_lock_bh(&enic->devcmd_lock);
2284 	res = vnic_dev_capable_rss_hash_type(enic->vdev, &rss_hash_type);
2285 	spin_unlock_bh(&enic->devcmd_lock);
2286 	if (res) {
2287 		/* defaults for old adapters
2288 		 */
2289 		rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4	|
2290 				NIC_CFG_RSS_HASH_TYPE_TCP_IPV4	|
2291 				NIC_CFG_RSS_HASH_TYPE_IPV6	|
2292 				NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
2293 	}
2294 
2295 	if (rss_enable) {
2296 		if (!enic_set_rsskey(enic)) {
2297 			if (enic_set_rsscpu(enic, rss_hash_bits)) {
2298 				rss_enable = 0;
2299 				dev_warn(dev, "RSS disabled, "
2300 					"Failed to set RSS cpu indirection table.");
2301 			}
2302 		} else {
2303 			rss_enable = 0;
2304 			dev_warn(dev, "RSS disabled, Failed to set RSS key.\n");
2305 		}
2306 	}
2307 
2308 	return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
2309 		rss_hash_bits, rss_base_cpu, rss_enable);
2310 }
2311 
enic_set_api_busy(struct enic * enic,bool busy)2312 static void enic_set_api_busy(struct enic *enic, bool busy)
2313 {
2314 	spin_lock(&enic->enic_api_lock);
2315 	enic->enic_api_busy = busy;
2316 	spin_unlock(&enic->enic_api_lock);
2317 }
2318 
enic_reset(struct work_struct * work)2319 static void enic_reset(struct work_struct *work)
2320 {
2321 	struct enic *enic = container_of(work, struct enic, reset);
2322 
2323 	if (!netif_running(enic->netdev))
2324 		return;
2325 
2326 	rtnl_lock();
2327 
2328 	/* Stop any activity from infiniband */
2329 	enic_set_api_busy(enic, true);
2330 
2331 	enic_stop(enic->netdev);
2332 	enic_dev_soft_reset(enic);
2333 	enic_reset_addr_lists(enic);
2334 	enic_init_vnic_resources(enic);
2335 	enic_set_rss_nic_cfg(enic);
2336 	enic_dev_set_ig_vlan_rewrite_mode(enic);
2337 	enic_open(enic->netdev);
2338 
2339 	/* Allow infiniband to fiddle with the device again */
2340 	enic_set_api_busy(enic, false);
2341 
2342 	call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
2343 
2344 	rtnl_unlock();
2345 }
2346 
enic_tx_hang_reset(struct work_struct * work)2347 static void enic_tx_hang_reset(struct work_struct *work)
2348 {
2349 	struct enic *enic = container_of(work, struct enic, tx_hang_reset);
2350 
2351 	rtnl_lock();
2352 
2353 	/* Stop any activity from infiniband */
2354 	enic_set_api_busy(enic, true);
2355 
2356 	enic_dev_hang_notify(enic);
2357 	enic_stop(enic->netdev);
2358 	enic_dev_hang_reset(enic);
2359 	enic_reset_addr_lists(enic);
2360 	enic_init_vnic_resources(enic);
2361 	enic_set_rss_nic_cfg(enic);
2362 	enic_dev_set_ig_vlan_rewrite_mode(enic);
2363 	enic_open(enic->netdev);
2364 
2365 	/* Allow infiniband to fiddle with the device again */
2366 	enic_set_api_busy(enic, false);
2367 
2368 	call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
2369 
2370 	rtnl_unlock();
2371 }
2372 
enic_set_intr_mode(struct enic * enic)2373 static int enic_set_intr_mode(struct enic *enic)
2374 {
2375 	unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
2376 	unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
2377 	unsigned int i;
2378 
2379 	/* Set interrupt mode (INTx, MSI, MSI-X) depending
2380 	 * on system capabilities.
2381 	 *
2382 	 * Try MSI-X first
2383 	 *
2384 	 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
2385 	 * (the second to last INTR is used for WQ/RQ errors)
2386 	 * (the last INTR is used for notifications)
2387 	 */
2388 
2389 	BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
2390 	for (i = 0; i < n + m + 2; i++)
2391 		enic->msix_entry[i].entry = i;
2392 
2393 	/* Use multiple RQs if RSS is enabled
2394 	 */
2395 
2396 	if (ENIC_SETTING(enic, RSS) &&
2397 	    enic->config.intr_mode < 1 &&
2398 	    enic->rq_count >= n &&
2399 	    enic->wq_count >= m &&
2400 	    enic->cq_count >= n + m &&
2401 	    enic->intr_count >= n + m + 2) {
2402 
2403 		if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
2404 					  n + m + 2, n + m + 2) > 0) {
2405 
2406 			enic->rq_count = n;
2407 			enic->wq_count = m;
2408 			enic->cq_count = n + m;
2409 			enic->intr_count = n + m + 2;
2410 
2411 			vnic_dev_set_intr_mode(enic->vdev,
2412 				VNIC_DEV_INTR_MODE_MSIX);
2413 
2414 			return 0;
2415 		}
2416 	}
2417 
2418 	if (enic->config.intr_mode < 1 &&
2419 	    enic->rq_count >= 1 &&
2420 	    enic->wq_count >= m &&
2421 	    enic->cq_count >= 1 + m &&
2422 	    enic->intr_count >= 1 + m + 2) {
2423 		if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
2424 					  1 + m + 2, 1 + m + 2) > 0) {
2425 
2426 			enic->rq_count = 1;
2427 			enic->wq_count = m;
2428 			enic->cq_count = 1 + m;
2429 			enic->intr_count = 1 + m + 2;
2430 
2431 			vnic_dev_set_intr_mode(enic->vdev,
2432 				VNIC_DEV_INTR_MODE_MSIX);
2433 
2434 			return 0;
2435 		}
2436 	}
2437 
2438 	/* Next try MSI
2439 	 *
2440 	 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
2441 	 */
2442 
2443 	if (enic->config.intr_mode < 2 &&
2444 	    enic->rq_count >= 1 &&
2445 	    enic->wq_count >= 1 &&
2446 	    enic->cq_count >= 2 &&
2447 	    enic->intr_count >= 1 &&
2448 	    !pci_enable_msi(enic->pdev)) {
2449 
2450 		enic->rq_count = 1;
2451 		enic->wq_count = 1;
2452 		enic->cq_count = 2;
2453 		enic->intr_count = 1;
2454 
2455 		vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
2456 
2457 		return 0;
2458 	}
2459 
2460 	/* Next try INTx
2461 	 *
2462 	 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
2463 	 * (the first INTR is used for WQ/RQ)
2464 	 * (the second INTR is used for WQ/RQ errors)
2465 	 * (the last INTR is used for notifications)
2466 	 */
2467 
2468 	if (enic->config.intr_mode < 3 &&
2469 	    enic->rq_count >= 1 &&
2470 	    enic->wq_count >= 1 &&
2471 	    enic->cq_count >= 2 &&
2472 	    enic->intr_count >= 3) {
2473 
2474 		enic->rq_count = 1;
2475 		enic->wq_count = 1;
2476 		enic->cq_count = 2;
2477 		enic->intr_count = 3;
2478 
2479 		vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
2480 
2481 		return 0;
2482 	}
2483 
2484 	vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2485 
2486 	return -EINVAL;
2487 }
2488 
enic_clear_intr_mode(struct enic * enic)2489 static void enic_clear_intr_mode(struct enic *enic)
2490 {
2491 	switch (vnic_dev_get_intr_mode(enic->vdev)) {
2492 	case VNIC_DEV_INTR_MODE_MSIX:
2493 		pci_disable_msix(enic->pdev);
2494 		break;
2495 	case VNIC_DEV_INTR_MODE_MSI:
2496 		pci_disable_msi(enic->pdev);
2497 		break;
2498 	default:
2499 		break;
2500 	}
2501 
2502 	vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2503 }
2504 
2505 static const struct net_device_ops enic_netdev_dynamic_ops = {
2506 	.ndo_open		= enic_open,
2507 	.ndo_stop		= enic_stop,
2508 	.ndo_start_xmit		= enic_hard_start_xmit,
2509 	.ndo_get_stats64	= enic_get_stats,
2510 	.ndo_validate_addr	= eth_validate_addr,
2511 	.ndo_set_rx_mode	= enic_set_rx_mode,
2512 	.ndo_set_mac_address	= enic_set_mac_address_dynamic,
2513 	.ndo_change_mtu		= enic_change_mtu,
2514 	.ndo_vlan_rx_add_vid	= enic_vlan_rx_add_vid,
2515 	.ndo_vlan_rx_kill_vid	= enic_vlan_rx_kill_vid,
2516 	.ndo_tx_timeout		= enic_tx_timeout,
2517 	.ndo_set_vf_port	= enic_set_vf_port,
2518 	.ndo_get_vf_port	= enic_get_vf_port,
2519 	.ndo_set_vf_mac		= enic_set_vf_mac,
2520 #ifdef CONFIG_NET_POLL_CONTROLLER
2521 	.ndo_poll_controller	= enic_poll_controller,
2522 #endif
2523 #ifdef CONFIG_RFS_ACCEL
2524 	.ndo_rx_flow_steer	= enic_rx_flow_steer,
2525 #endif
2526 	.ndo_features_check	= enic_features_check,
2527 };
2528 
2529 static const struct net_device_ops enic_netdev_ops = {
2530 	.ndo_open		= enic_open,
2531 	.ndo_stop		= enic_stop,
2532 	.ndo_start_xmit		= enic_hard_start_xmit,
2533 	.ndo_get_stats64	= enic_get_stats,
2534 	.ndo_validate_addr	= eth_validate_addr,
2535 	.ndo_set_mac_address	= enic_set_mac_address,
2536 	.ndo_set_rx_mode	= enic_set_rx_mode,
2537 	.ndo_change_mtu		= enic_change_mtu,
2538 	.ndo_vlan_rx_add_vid	= enic_vlan_rx_add_vid,
2539 	.ndo_vlan_rx_kill_vid	= enic_vlan_rx_kill_vid,
2540 	.ndo_tx_timeout		= enic_tx_timeout,
2541 	.ndo_set_vf_port	= enic_set_vf_port,
2542 	.ndo_get_vf_port	= enic_get_vf_port,
2543 	.ndo_set_vf_mac		= enic_set_vf_mac,
2544 #ifdef CONFIG_NET_POLL_CONTROLLER
2545 	.ndo_poll_controller	= enic_poll_controller,
2546 #endif
2547 #ifdef CONFIG_RFS_ACCEL
2548 	.ndo_rx_flow_steer	= enic_rx_flow_steer,
2549 #endif
2550 	.ndo_features_check	= enic_features_check,
2551 };
2552 
enic_dev_deinit(struct enic * enic)2553 static void enic_dev_deinit(struct enic *enic)
2554 {
2555 	unsigned int i;
2556 
2557 	for (i = 0; i < enic->rq_count; i++)
2558 		__netif_napi_del(&enic->napi[i]);
2559 
2560 	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
2561 		for (i = 0; i < enic->wq_count; i++)
2562 			__netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]);
2563 
2564 	/* observe RCU grace period after __netif_napi_del() calls */
2565 	synchronize_net();
2566 
2567 	enic_free_vnic_resources(enic);
2568 	enic_clear_intr_mode(enic);
2569 	enic_free_affinity_hint(enic);
2570 }
2571 
enic_kdump_kernel_config(struct enic * enic)2572 static void enic_kdump_kernel_config(struct enic *enic)
2573 {
2574 	if (is_kdump_kernel()) {
2575 		dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n");
2576 		enic->rq_count = 1;
2577 		enic->wq_count = 1;
2578 		enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS;
2579 		enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS;
2580 		enic->config.mtu = min_t(u16, 1500, enic->config.mtu);
2581 	}
2582 }
2583 
enic_dev_init(struct enic * enic)2584 static int enic_dev_init(struct enic *enic)
2585 {
2586 	struct device *dev = enic_get_dev(enic);
2587 	struct net_device *netdev = enic->netdev;
2588 	unsigned int i;
2589 	int err;
2590 
2591 	/* Get interrupt coalesce timer info */
2592 	err = enic_dev_intr_coal_timer_info(enic);
2593 	if (err) {
2594 		dev_warn(dev, "Using default conversion factor for "
2595 			"interrupt coalesce timer\n");
2596 		vnic_dev_intr_coal_timer_info_default(enic->vdev);
2597 	}
2598 
2599 	/* Get vNIC configuration
2600 	 */
2601 
2602 	err = enic_get_vnic_config(enic);
2603 	if (err) {
2604 		dev_err(dev, "Get vNIC configuration failed, aborting\n");
2605 		return err;
2606 	}
2607 
2608 	/* Get available resource counts
2609 	 */
2610 
2611 	enic_get_res_counts(enic);
2612 
2613 	/* modify resource count if we are in kdump_kernel
2614 	 */
2615 	enic_kdump_kernel_config(enic);
2616 
2617 	/* Set interrupt mode based on resource counts and system
2618 	 * capabilities
2619 	 */
2620 
2621 	err = enic_set_intr_mode(enic);
2622 	if (err) {
2623 		dev_err(dev, "Failed to set intr mode based on resource "
2624 			"counts and system capabilities, aborting\n");
2625 		return err;
2626 	}
2627 
2628 	/* Allocate and configure vNIC resources
2629 	 */
2630 
2631 	err = enic_alloc_vnic_resources(enic);
2632 	if (err) {
2633 		dev_err(dev, "Failed to alloc vNIC resources, aborting\n");
2634 		goto err_out_free_vnic_resources;
2635 	}
2636 
2637 	enic_init_vnic_resources(enic);
2638 
2639 	err = enic_set_rss_nic_cfg(enic);
2640 	if (err) {
2641 		dev_err(dev, "Failed to config nic, aborting\n");
2642 		goto err_out_free_vnic_resources;
2643 	}
2644 
2645 	switch (vnic_dev_get_intr_mode(enic->vdev)) {
2646 	default:
2647 		netif_napi_add(netdev, &enic->napi[0], enic_poll);
2648 		break;
2649 	case VNIC_DEV_INTR_MODE_MSIX:
2650 		for (i = 0; i < enic->rq_count; i++) {
2651 			netif_napi_add(netdev, &enic->napi[i],
2652 				       enic_poll_msix_rq);
2653 		}
2654 		for (i = 0; i < enic->wq_count; i++)
2655 			netif_napi_add(netdev,
2656 				       &enic->napi[enic_cq_wq(enic, i)],
2657 				       enic_poll_msix_wq);
2658 		break;
2659 	}
2660 
2661 	return 0;
2662 
2663 err_out_free_vnic_resources:
2664 	enic_free_affinity_hint(enic);
2665 	enic_clear_intr_mode(enic);
2666 	enic_free_vnic_resources(enic);
2667 
2668 	return err;
2669 }
2670 
enic_iounmap(struct enic * enic)2671 static void enic_iounmap(struct enic *enic)
2672 {
2673 	unsigned int i;
2674 
2675 	for (i = 0; i < ARRAY_SIZE(enic->bar); i++)
2676 		if (enic->bar[i].vaddr)
2677 			iounmap(enic->bar[i].vaddr);
2678 }
2679 
enic_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2680 static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2681 {
2682 	struct device *dev = &pdev->dev;
2683 	struct net_device *netdev;
2684 	struct enic *enic;
2685 	int using_dac = 0;
2686 	unsigned int i;
2687 	int err;
2688 #ifdef CONFIG_PCI_IOV
2689 	int pos = 0;
2690 #endif
2691 	int num_pps = 1;
2692 
2693 	/* Allocate net device structure and initialize.  Private
2694 	 * instance data is initialized to zero.
2695 	 */
2696 
2697 	netdev = alloc_etherdev_mqs(sizeof(struct enic),
2698 				    ENIC_RQ_MAX, ENIC_WQ_MAX);
2699 	if (!netdev)
2700 		return -ENOMEM;
2701 
2702 	pci_set_drvdata(pdev, netdev);
2703 
2704 	SET_NETDEV_DEV(netdev, &pdev->dev);
2705 
2706 	enic = netdev_priv(netdev);
2707 	enic->netdev = netdev;
2708 	enic->pdev = pdev;
2709 
2710 	/* Setup PCI resources
2711 	 */
2712 
2713 	err = pci_enable_device_mem(pdev);
2714 	if (err) {
2715 		dev_err(dev, "Cannot enable PCI device, aborting\n");
2716 		goto err_out_free_netdev;
2717 	}
2718 
2719 	err = pci_request_regions(pdev, DRV_NAME);
2720 	if (err) {
2721 		dev_err(dev, "Cannot request PCI regions, aborting\n");
2722 		goto err_out_disable_device;
2723 	}
2724 
2725 	pci_set_master(pdev);
2726 
2727 	/* Query PCI controller on system for DMA addressing
2728 	 * limitation for the device.  Try 47-bit first, and
2729 	 * fail to 32-bit.
2730 	 */
2731 
2732 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(47));
2733 	if (err) {
2734 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2735 		if (err) {
2736 			dev_err(dev, "No usable DMA configuration, aborting\n");
2737 			goto err_out_release_regions;
2738 		}
2739 	} else {
2740 		using_dac = 1;
2741 	}
2742 
2743 	/* Map vNIC resources from BAR0-5
2744 	 */
2745 
2746 	for (i = 0; i < ARRAY_SIZE(enic->bar); i++) {
2747 		if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
2748 			continue;
2749 		enic->bar[i].len = pci_resource_len(pdev, i);
2750 		enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
2751 		if (!enic->bar[i].vaddr) {
2752 			dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i);
2753 			err = -ENODEV;
2754 			goto err_out_iounmap;
2755 		}
2756 		enic->bar[i].bus_addr = pci_resource_start(pdev, i);
2757 	}
2758 
2759 	/* Register vNIC device
2760 	 */
2761 
2762 	enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
2763 		ARRAY_SIZE(enic->bar));
2764 	if (!enic->vdev) {
2765 		dev_err(dev, "vNIC registration failed, aborting\n");
2766 		err = -ENODEV;
2767 		goto err_out_iounmap;
2768 	}
2769 
2770 	err = vnic_devcmd_init(enic->vdev);
2771 
2772 	if (err)
2773 		goto err_out_vnic_unregister;
2774 
2775 #ifdef CONFIG_PCI_IOV
2776 	/* Get number of subvnics */
2777 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
2778 	if (pos) {
2779 		pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF,
2780 			&enic->num_vfs);
2781 		if (enic->num_vfs) {
2782 			err = pci_enable_sriov(pdev, enic->num_vfs);
2783 			if (err) {
2784 				dev_err(dev, "SRIOV enable failed, aborting."
2785 					" pci_enable_sriov() returned %d\n",
2786 					err);
2787 				goto err_out_vnic_unregister;
2788 			}
2789 			enic->priv_flags |= ENIC_SRIOV_ENABLED;
2790 			num_pps = enic->num_vfs;
2791 		}
2792 	}
2793 #endif
2794 
2795 	/* Allocate structure for port profiles */
2796 	enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
2797 	if (!enic->pp) {
2798 		err = -ENOMEM;
2799 		goto err_out_disable_sriov_pp;
2800 	}
2801 
2802 	/* Issue device open to get device in known state
2803 	 */
2804 
2805 	err = enic_dev_open(enic);
2806 	if (err) {
2807 		dev_err(dev, "vNIC dev open failed, aborting\n");
2808 		goto err_out_disable_sriov;
2809 	}
2810 
2811 	/* Setup devcmd lock
2812 	 */
2813 
2814 	spin_lock_init(&enic->devcmd_lock);
2815 	spin_lock_init(&enic->enic_api_lock);
2816 
2817 	/*
2818 	 * Set ingress vlan rewrite mode before vnic initialization
2819 	 */
2820 
2821 	err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2822 	if (err) {
2823 		dev_err(dev,
2824 			"Failed to set ingress vlan rewrite mode, aborting.\n");
2825 		goto err_out_dev_close;
2826 	}
2827 
2828 	/* Issue device init to initialize the vnic-to-switch link.
2829 	 * We'll start with carrier off and wait for link UP
2830 	 * notification later to turn on carrier.  We don't need
2831 	 * to wait here for the vnic-to-switch link initialization
2832 	 * to complete; link UP notification is the indication that
2833 	 * the process is complete.
2834 	 */
2835 
2836 	netif_carrier_off(netdev);
2837 
2838 	/* Do not call dev_init for a dynamic vnic.
2839 	 * For a dynamic vnic, init_prov_info will be
2840 	 * called later by an upper layer.
2841 	 */
2842 
2843 	if (!enic_is_dynamic(enic)) {
2844 		err = vnic_dev_init(enic->vdev, 0);
2845 		if (err) {
2846 			dev_err(dev, "vNIC dev init failed, aborting\n");
2847 			goto err_out_dev_close;
2848 		}
2849 	}
2850 
2851 	err = enic_dev_init(enic);
2852 	if (err) {
2853 		dev_err(dev, "Device initialization failed, aborting\n");
2854 		goto err_out_dev_close;
2855 	}
2856 
2857 	netif_set_real_num_tx_queues(netdev, enic->wq_count);
2858 	netif_set_real_num_rx_queues(netdev, enic->rq_count);
2859 
2860 	/* Setup notification timer, HW reset task, and wq locks
2861 	 */
2862 
2863 	timer_setup(&enic->notify_timer, enic_notify_timer, 0);
2864 
2865 	enic_rfs_flw_tbl_init(enic);
2866 	enic_set_rx_coal_setting(enic);
2867 	INIT_WORK(&enic->reset, enic_reset);
2868 	INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
2869 	INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
2870 
2871 	for (i = 0; i < enic->wq_count; i++)
2872 		spin_lock_init(&enic->wq_lock[i]);
2873 
2874 	/* Register net device
2875 	 */
2876 
2877 	enic->port_mtu = enic->config.mtu;
2878 
2879 	err = enic_set_mac_addr(netdev, enic->mac_addr);
2880 	if (err) {
2881 		dev_err(dev, "Invalid MAC address, aborting\n");
2882 		goto err_out_dev_deinit;
2883 	}
2884 
2885 	enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
2886 	/* rx coalesce time already got initialized. This gets used
2887 	 * if adaptive coal is turned off
2888 	 */
2889 	enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2890 
2891 	if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
2892 		netdev->netdev_ops = &enic_netdev_dynamic_ops;
2893 	else
2894 		netdev->netdev_ops = &enic_netdev_ops;
2895 
2896 	netdev->watchdog_timeo = 2 * HZ;
2897 	enic_set_ethtool_ops(netdev);
2898 
2899 	netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2900 	if (ENIC_SETTING(enic, LOOP)) {
2901 		netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2902 		enic->loop_enable = 1;
2903 		enic->loop_tag = enic->config.loop_tag;
2904 		dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
2905 	}
2906 	if (ENIC_SETTING(enic, TXCSUM))
2907 		netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2908 	if (ENIC_SETTING(enic, TSO))
2909 		netdev->hw_features |= NETIF_F_TSO |
2910 			NETIF_F_TSO6 | NETIF_F_TSO_ECN;
2911 	if (ENIC_SETTING(enic, RSS))
2912 		netdev->hw_features |= NETIF_F_RXHASH;
2913 	if (ENIC_SETTING(enic, RXCSUM))
2914 		netdev->hw_features |= NETIF_F_RXCSUM;
2915 	if (ENIC_SETTING(enic, VXLAN)) {
2916 		u64 patch_level;
2917 		u64 a1 = 0;
2918 
2919 		netdev->hw_enc_features |= NETIF_F_RXCSUM		|
2920 					   NETIF_F_TSO			|
2921 					   NETIF_F_TSO6			|
2922 					   NETIF_F_TSO_ECN		|
2923 					   NETIF_F_GSO_UDP_TUNNEL	|
2924 					   NETIF_F_HW_CSUM		|
2925 					   NETIF_F_GSO_UDP_TUNNEL_CSUM;
2926 		netdev->hw_features |= netdev->hw_enc_features;
2927 		/* get bit mask from hw about supported offload bit level
2928 		 * BIT(0) = fw supports patch_level 0
2929 		 *	    fcoe bit = encap
2930 		 *	    fcoe_fc_crc_ok = outer csum ok
2931 		 * BIT(1) = always set by fw
2932 		 * BIT(2) = fw supports patch_level 2
2933 		 *	    BIT(0) in rss_hash = encap
2934 		 *	    BIT(1,2) in rss_hash = outer_ip_csum_ok/
2935 		 *				   outer_tcp_csum_ok
2936 		 * used in enic_rq_indicate_buf
2937 		 */
2938 		err = vnic_dev_get_supported_feature_ver(enic->vdev,
2939 							 VIC_FEATURE_VXLAN,
2940 							 &patch_level, &a1);
2941 		if (err)
2942 			patch_level = 0;
2943 		enic->vxlan.flags = (u8)a1;
2944 		/* mask bits that are supported by driver
2945 		 */
2946 		patch_level &= BIT_ULL(0) | BIT_ULL(2);
2947 		patch_level = fls(patch_level);
2948 		patch_level = patch_level ? patch_level - 1 : 0;
2949 		enic->vxlan.patch_level = patch_level;
2950 
2951 		if (vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ) == 1 ||
2952 		    enic->vxlan.flags & ENIC_VXLAN_MULTI_WQ) {
2953 			netdev->udp_tunnel_nic_info = &enic_udp_tunnels_v4;
2954 			if (enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6)
2955 				netdev->udp_tunnel_nic_info = &enic_udp_tunnels;
2956 		}
2957 	}
2958 
2959 	netdev->features |= netdev->hw_features;
2960 	netdev->vlan_features |= netdev->features;
2961 
2962 #ifdef CONFIG_RFS_ACCEL
2963 	netdev->hw_features |= NETIF_F_NTUPLE;
2964 #endif
2965 
2966 	if (using_dac)
2967 		netdev->features |= NETIF_F_HIGHDMA;
2968 
2969 	netdev->priv_flags |= IFF_UNICAST_FLT;
2970 
2971 	/* MTU range: 68 - 9000 */
2972 	netdev->min_mtu = ENIC_MIN_MTU;
2973 	netdev->max_mtu = ENIC_MAX_MTU;
2974 	netdev->mtu	= enic->port_mtu;
2975 
2976 	err = register_netdev(netdev);
2977 	if (err) {
2978 		dev_err(dev, "Cannot register net device, aborting\n");
2979 		goto err_out_dev_deinit;
2980 	}
2981 	enic->rx_copybreak = RX_COPYBREAK_DEFAULT;
2982 
2983 	return 0;
2984 
2985 err_out_dev_deinit:
2986 	enic_dev_deinit(enic);
2987 err_out_dev_close:
2988 	vnic_dev_close(enic->vdev);
2989 err_out_disable_sriov:
2990 	kfree(enic->pp);
2991 err_out_disable_sriov_pp:
2992 #ifdef CONFIG_PCI_IOV
2993 	if (enic_sriov_enabled(enic)) {
2994 		pci_disable_sriov(pdev);
2995 		enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
2996 	}
2997 #endif
2998 err_out_vnic_unregister:
2999 	vnic_dev_unregister(enic->vdev);
3000 err_out_iounmap:
3001 	enic_iounmap(enic);
3002 err_out_release_regions:
3003 	pci_release_regions(pdev);
3004 err_out_disable_device:
3005 	pci_disable_device(pdev);
3006 err_out_free_netdev:
3007 	free_netdev(netdev);
3008 
3009 	return err;
3010 }
3011 
enic_remove(struct pci_dev * pdev)3012 static void enic_remove(struct pci_dev *pdev)
3013 {
3014 	struct net_device *netdev = pci_get_drvdata(pdev);
3015 
3016 	if (netdev) {
3017 		struct enic *enic = netdev_priv(netdev);
3018 
3019 		cancel_work_sync(&enic->reset);
3020 		cancel_work_sync(&enic->change_mtu_work);
3021 		unregister_netdev(netdev);
3022 		enic_dev_deinit(enic);
3023 		vnic_dev_close(enic->vdev);
3024 #ifdef CONFIG_PCI_IOV
3025 		if (enic_sriov_enabled(enic)) {
3026 			pci_disable_sriov(pdev);
3027 			enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
3028 		}
3029 #endif
3030 		kfree(enic->pp);
3031 		vnic_dev_unregister(enic->vdev);
3032 		enic_iounmap(enic);
3033 		pci_release_regions(pdev);
3034 		pci_disable_device(pdev);
3035 		free_netdev(netdev);
3036 	}
3037 }
3038 
3039 static struct pci_driver enic_driver = {
3040 	.name = DRV_NAME,
3041 	.id_table = enic_id_table,
3042 	.probe = enic_probe,
3043 	.remove = enic_remove,
3044 };
3045 
3046 module_pci_driver(enic_driver);
3047