1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2005 - 2016 Broadcom
4  * All rights reserved.
5  *
6  * Contact Information:
7  * linux-drivers@emulex.com
8  *
9  * Emulex
10  * 3333 Susan Street
11  * Costa Mesa, CA 92626
12  */
13 
14 #include <linux/prefetch.h>
15 #include <linux/module.h>
16 #include "be.h"
17 #include "be_cmds.h"
18 #include <asm/div64.h>
19 #include <linux/if_bridge.h>
20 #include <net/busy_poll.h>
21 #include <net/vxlan.h>
22 
23 MODULE_DESCRIPTION(DRV_DESC);
24 MODULE_AUTHOR("Emulex Corporation");
25 MODULE_LICENSE("GPL");
26 
27 /* num_vfs module param is obsolete.
28  * Use sysfs method to enable/disable VFs.
29  */
30 static unsigned int num_vfs;
31 module_param(num_vfs, uint, 0444);
32 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
33 
34 static ushort rx_frag_size = 2048;
35 module_param(rx_frag_size, ushort, 0444);
36 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37 
38 /* Per-module error detection/recovery workq shared across all functions.
39  * Each function schedules its own work request on this shared workq.
40  */
41 static struct workqueue_struct *be_err_recovery_workq;
42 
43 static const struct pci_device_id be_dev_ids[] = {
44 #ifdef CONFIG_BE2NET_BE2
45 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
46 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
47 #endif /* CONFIG_BE2NET_BE2 */
48 #ifdef CONFIG_BE2NET_BE3
49 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
50 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
51 #endif /* CONFIG_BE2NET_BE3 */
52 #ifdef CONFIG_BE2NET_LANCER
53 	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
54 	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
55 #endif /* CONFIG_BE2NET_LANCER */
56 #ifdef CONFIG_BE2NET_SKYHAWK
57 	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
58 	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
59 #endif /* CONFIG_BE2NET_SKYHAWK */
60 	{ 0 }
61 };
62 MODULE_DEVICE_TABLE(pci, be_dev_ids);
63 
64 /* Workqueue used by all functions for defering cmd calls to the adapter */
65 static struct workqueue_struct *be_wq;
66 
67 /* UE Status Low CSR */
68 static const char * const ue_status_low_desc[] = {
69 	"CEV",
70 	"CTX",
71 	"DBUF",
72 	"ERX",
73 	"Host",
74 	"MPU",
75 	"NDMA",
76 	"PTC ",
77 	"RDMA ",
78 	"RXF ",
79 	"RXIPS ",
80 	"RXULP0 ",
81 	"RXULP1 ",
82 	"RXULP2 ",
83 	"TIM ",
84 	"TPOST ",
85 	"TPRE ",
86 	"TXIPS ",
87 	"TXULP0 ",
88 	"TXULP1 ",
89 	"UC ",
90 	"WDMA ",
91 	"TXULP2 ",
92 	"HOST1 ",
93 	"P0_OB_LINK ",
94 	"P1_OB_LINK ",
95 	"HOST_GPIO ",
96 	"MBOX ",
97 	"ERX2 ",
98 	"SPARE ",
99 	"JTAG ",
100 	"MPU_INTPEND "
101 };
102 
103 /* UE Status High CSR */
104 static const char * const ue_status_hi_desc[] = {
105 	"LPCMEMHOST",
106 	"MGMT_MAC",
107 	"PCS0ONLINE",
108 	"MPU_IRAM",
109 	"PCS1ONLINE",
110 	"PCTL0",
111 	"PCTL1",
112 	"PMEM",
113 	"RR",
114 	"TXPB",
115 	"RXPP",
116 	"XAUI",
117 	"TXP",
118 	"ARM",
119 	"IPC",
120 	"HOST2",
121 	"HOST3",
122 	"HOST4",
123 	"HOST5",
124 	"HOST6",
125 	"HOST7",
126 	"ECRC",
127 	"Poison TLP",
128 	"NETC",
129 	"PERIPH",
130 	"LLTXULP",
131 	"D2P",
132 	"RCON",
133 	"LDMA",
134 	"LLTXP",
135 	"LLTXPB",
136 	"Unknown"
137 };
138 
139 #define BE_VF_IF_EN_FLAGS	(BE_IF_FLAGS_UNTAGGED | \
140 				 BE_IF_FLAGS_BROADCAST | \
141 				 BE_IF_FLAGS_MULTICAST | \
142 				 BE_IF_FLAGS_PASS_L3L4_ERRORS)
143 
144 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
145 {
146 	struct be_dma_mem *mem = &q->dma_mem;
147 
148 	if (mem->va) {
149 		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
150 				  mem->dma);
151 		mem->va = NULL;
152 	}
153 }
154 
155 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
156 			  u16 len, u16 entry_size)
157 {
158 	struct be_dma_mem *mem = &q->dma_mem;
159 
160 	memset(q, 0, sizeof(*q));
161 	q->len = len;
162 	q->entry_size = entry_size;
163 	mem->size = len * entry_size;
164 	mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
165 				     &mem->dma, GFP_KERNEL);
166 	if (!mem->va)
167 		return -ENOMEM;
168 	return 0;
169 }
170 
171 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
172 {
173 	u32 reg, enabled;
174 
175 	pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
176 			      &reg);
177 	enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
178 
179 	if (!enabled && enable)
180 		reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
181 	else if (enabled && !enable)
182 		reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
183 	else
184 		return;
185 
186 	pci_write_config_dword(adapter->pdev,
187 			       PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
188 }
189 
190 static void be_intr_set(struct be_adapter *adapter, bool enable)
191 {
192 	int status = 0;
193 
194 	/* On lancer interrupts can't be controlled via this register */
195 	if (lancer_chip(adapter))
196 		return;
197 
198 	if (be_check_error(adapter, BE_ERROR_EEH))
199 		return;
200 
201 	status = be_cmd_intr_set(adapter, enable);
202 	if (status)
203 		be_reg_intr_set(adapter, enable);
204 }
205 
206 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
207 {
208 	u32 val = 0;
209 
210 	if (be_check_error(adapter, BE_ERROR_HW))
211 		return;
212 
213 	val |= qid & DB_RQ_RING_ID_MASK;
214 	val |= posted << DB_RQ_NUM_POSTED_SHIFT;
215 
216 	wmb();
217 	iowrite32(val, adapter->db + DB_RQ_OFFSET);
218 }
219 
220 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
221 			  u16 posted)
222 {
223 	u32 val = 0;
224 
225 	if (be_check_error(adapter, BE_ERROR_HW))
226 		return;
227 
228 	val |= txo->q.id & DB_TXULP_RING_ID_MASK;
229 	val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
230 
231 	wmb();
232 	iowrite32(val, adapter->db + txo->db_offset);
233 }
234 
235 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
236 			 bool arm, bool clear_int, u16 num_popped,
237 			 u32 eq_delay_mult_enc)
238 {
239 	u32 val = 0;
240 
241 	val |= qid & DB_EQ_RING_ID_MASK;
242 	val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
243 
244 	if (be_check_error(adapter, BE_ERROR_HW))
245 		return;
246 
247 	if (arm)
248 		val |= 1 << DB_EQ_REARM_SHIFT;
249 	if (clear_int)
250 		val |= 1 << DB_EQ_CLR_SHIFT;
251 	val |= 1 << DB_EQ_EVNT_SHIFT;
252 	val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
253 	val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
254 	iowrite32(val, adapter->db + DB_EQ_OFFSET);
255 }
256 
257 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
258 {
259 	u32 val = 0;
260 
261 	val |= qid & DB_CQ_RING_ID_MASK;
262 	val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
263 			DB_CQ_RING_ID_EXT_MASK_SHIFT);
264 
265 	if (be_check_error(adapter, BE_ERROR_HW))
266 		return;
267 
268 	if (arm)
269 		val |= 1 << DB_CQ_REARM_SHIFT;
270 	val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
271 	iowrite32(val, adapter->db + DB_CQ_OFFSET);
272 }
273 
274 static int be_dev_mac_add(struct be_adapter *adapter, const u8 *mac)
275 {
276 	int i;
277 
278 	/* Check if mac has already been added as part of uc-list */
279 	for (i = 0; i < adapter->uc_macs; i++) {
280 		if (ether_addr_equal(adapter->uc_list[i].mac, mac)) {
281 			/* mac already added, skip addition */
282 			adapter->pmac_id[0] = adapter->pmac_id[i + 1];
283 			return 0;
284 		}
285 	}
286 
287 	return be_cmd_pmac_add(adapter, mac, adapter->if_handle,
288 			       &adapter->pmac_id[0], 0);
289 }
290 
291 static void be_dev_mac_del(struct be_adapter *adapter, int pmac_id)
292 {
293 	int i;
294 
295 	/* Skip deletion if the programmed mac is
296 	 * being used in uc-list
297 	 */
298 	for (i = 0; i < adapter->uc_macs; i++) {
299 		if (adapter->pmac_id[i + 1] == pmac_id)
300 			return;
301 	}
302 	be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
303 }
304 
305 static int be_mac_addr_set(struct net_device *netdev, void *p)
306 {
307 	struct be_adapter *adapter = netdev_priv(netdev);
308 	struct device *dev = &adapter->pdev->dev;
309 	struct sockaddr *addr = p;
310 	int status;
311 	u8 mac[ETH_ALEN];
312 	u32 old_pmac_id = adapter->pmac_id[0];
313 
314 	if (!is_valid_ether_addr(addr->sa_data))
315 		return -EADDRNOTAVAIL;
316 
317 	/* Proceed further only if, User provided MAC is different
318 	 * from active MAC
319 	 */
320 	if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
321 		return 0;
322 
323 	/* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
324 	 * address
325 	 */
326 	if (BEx_chip(adapter) && be_virtfn(adapter) &&
327 	    !check_privilege(adapter, BE_PRIV_FILTMGMT))
328 		return -EPERM;
329 
330 	/* if device is not running, copy MAC to netdev->dev_addr */
331 	if (!netif_running(netdev))
332 		goto done;
333 
334 	/* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
335 	 * privilege or if PF did not provision the new MAC address.
336 	 * On BE3, this cmd will always fail if the VF doesn't have the
337 	 * FILTMGMT privilege. This failure is OK, only if the PF programmed
338 	 * the MAC for the VF.
339 	 */
340 	mutex_lock(&adapter->rx_filter_lock);
341 	status = be_dev_mac_add(adapter, (u8 *)addr->sa_data);
342 	if (!status) {
343 
344 		/* Delete the old programmed MAC. This call may fail if the
345 		 * old MAC was already deleted by the PF driver.
346 		 */
347 		if (adapter->pmac_id[0] != old_pmac_id)
348 			be_dev_mac_del(adapter, old_pmac_id);
349 	}
350 
351 	mutex_unlock(&adapter->rx_filter_lock);
352 	/* Decide if the new MAC is successfully activated only after
353 	 * querying the FW
354 	 */
355 	status = be_cmd_get_active_mac(adapter, adapter->pmac_id[0], mac,
356 				       adapter->if_handle, true, 0);
357 	if (status)
358 		goto err;
359 
360 	/* The MAC change did not happen, either due to lack of privilege
361 	 * or PF didn't pre-provision.
362 	 */
363 	if (!ether_addr_equal(addr->sa_data, mac)) {
364 		status = -EPERM;
365 		goto err;
366 	}
367 
368 	/* Remember currently programmed MAC */
369 	ether_addr_copy(adapter->dev_mac, addr->sa_data);
370 done:
371 	eth_hw_addr_set(netdev, addr->sa_data);
372 	dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
373 	return 0;
374 err:
375 	dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
376 	return status;
377 }
378 
379 /* BE2 supports only v0 cmd */
380 static void *hw_stats_from_cmd(struct be_adapter *adapter)
381 {
382 	if (BE2_chip(adapter)) {
383 		struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
384 
385 		return &cmd->hw_stats;
386 	} else if (BE3_chip(adapter)) {
387 		struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
388 
389 		return &cmd->hw_stats;
390 	} else {
391 		struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
392 
393 		return &cmd->hw_stats;
394 	}
395 }
396 
397 /* BE2 supports only v0 cmd */
398 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
399 {
400 	if (BE2_chip(adapter)) {
401 		struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
402 
403 		return &hw_stats->erx;
404 	} else if (BE3_chip(adapter)) {
405 		struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
406 
407 		return &hw_stats->erx;
408 	} else {
409 		struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
410 
411 		return &hw_stats->erx;
412 	}
413 }
414 
415 static void populate_be_v0_stats(struct be_adapter *adapter)
416 {
417 	struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
418 	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
419 	struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
420 	struct be_port_rxf_stats_v0 *port_stats =
421 					&rxf_stats->port[adapter->port_num];
422 	struct be_drv_stats *drvs = &adapter->drv_stats;
423 
424 	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
425 	drvs->rx_pause_frames = port_stats->rx_pause_frames;
426 	drvs->rx_crc_errors = port_stats->rx_crc_errors;
427 	drvs->rx_control_frames = port_stats->rx_control_frames;
428 	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
429 	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
430 	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
431 	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
432 	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
433 	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
434 	drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
435 	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
436 	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
437 	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
438 	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
439 	drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
440 	drvs->rx_dropped_header_too_small =
441 		port_stats->rx_dropped_header_too_small;
442 	drvs->rx_address_filtered =
443 					port_stats->rx_address_filtered +
444 					port_stats->rx_vlan_filtered;
445 	drvs->rx_alignment_symbol_errors =
446 		port_stats->rx_alignment_symbol_errors;
447 
448 	drvs->tx_pauseframes = port_stats->tx_pauseframes;
449 	drvs->tx_controlframes = port_stats->tx_controlframes;
450 
451 	if (adapter->port_num)
452 		drvs->jabber_events = rxf_stats->port1_jabber_events;
453 	else
454 		drvs->jabber_events = rxf_stats->port0_jabber_events;
455 	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
456 	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
457 	drvs->forwarded_packets = rxf_stats->forwarded_packets;
458 	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
459 	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
460 	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
461 	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
462 }
463 
464 static void populate_be_v1_stats(struct be_adapter *adapter)
465 {
466 	struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
467 	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
468 	struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
469 	struct be_port_rxf_stats_v1 *port_stats =
470 					&rxf_stats->port[adapter->port_num];
471 	struct be_drv_stats *drvs = &adapter->drv_stats;
472 
473 	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
474 	drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
475 	drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
476 	drvs->rx_pause_frames = port_stats->rx_pause_frames;
477 	drvs->rx_crc_errors = port_stats->rx_crc_errors;
478 	drvs->rx_control_frames = port_stats->rx_control_frames;
479 	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
480 	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
481 	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
482 	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
483 	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
484 	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
485 	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
486 	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
487 	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
488 	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
489 	drvs->rx_dropped_header_too_small =
490 		port_stats->rx_dropped_header_too_small;
491 	drvs->rx_input_fifo_overflow_drop =
492 		port_stats->rx_input_fifo_overflow_drop;
493 	drvs->rx_address_filtered = port_stats->rx_address_filtered;
494 	drvs->rx_alignment_symbol_errors =
495 		port_stats->rx_alignment_symbol_errors;
496 	drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
497 	drvs->tx_pauseframes = port_stats->tx_pauseframes;
498 	drvs->tx_controlframes = port_stats->tx_controlframes;
499 	drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
500 	drvs->jabber_events = port_stats->jabber_events;
501 	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
502 	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
503 	drvs->forwarded_packets = rxf_stats->forwarded_packets;
504 	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
505 	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
506 	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
507 	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
508 }
509 
510 static void populate_be_v2_stats(struct be_adapter *adapter)
511 {
512 	struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
513 	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
514 	struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
515 	struct be_port_rxf_stats_v2 *port_stats =
516 					&rxf_stats->port[adapter->port_num];
517 	struct be_drv_stats *drvs = &adapter->drv_stats;
518 
519 	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
520 	drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
521 	drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
522 	drvs->rx_pause_frames = port_stats->rx_pause_frames;
523 	drvs->rx_crc_errors = port_stats->rx_crc_errors;
524 	drvs->rx_control_frames = port_stats->rx_control_frames;
525 	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
526 	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
527 	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
528 	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
529 	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
530 	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
531 	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
532 	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
533 	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
534 	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
535 	drvs->rx_dropped_header_too_small =
536 		port_stats->rx_dropped_header_too_small;
537 	drvs->rx_input_fifo_overflow_drop =
538 		port_stats->rx_input_fifo_overflow_drop;
539 	drvs->rx_address_filtered = port_stats->rx_address_filtered;
540 	drvs->rx_alignment_symbol_errors =
541 		port_stats->rx_alignment_symbol_errors;
542 	drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
543 	drvs->tx_pauseframes = port_stats->tx_pauseframes;
544 	drvs->tx_controlframes = port_stats->tx_controlframes;
545 	drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
546 	drvs->jabber_events = port_stats->jabber_events;
547 	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
548 	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
549 	drvs->forwarded_packets = rxf_stats->forwarded_packets;
550 	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
551 	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
552 	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
553 	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
554 	if (be_roce_supported(adapter)) {
555 		drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
556 		drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
557 		drvs->rx_roce_frames = port_stats->roce_frames_received;
558 		drvs->roce_drops_crc = port_stats->roce_drops_crc;
559 		drvs->roce_drops_payload_len =
560 			port_stats->roce_drops_payload_len;
561 	}
562 }
563 
564 static void populate_lancer_stats(struct be_adapter *adapter)
565 {
566 	struct be_drv_stats *drvs = &adapter->drv_stats;
567 	struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
568 
569 	be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
570 	drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
571 	drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
572 	drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
573 	drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
574 	drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
575 	drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
576 	drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
577 	drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
578 	drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
579 	drvs->rx_dropped_tcp_length =
580 				pport_stats->rx_dropped_invalid_tcp_length;
581 	drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
582 	drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
583 	drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
584 	drvs->rx_dropped_header_too_small =
585 				pport_stats->rx_dropped_header_too_small;
586 	drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
587 	drvs->rx_address_filtered =
588 					pport_stats->rx_address_filtered +
589 					pport_stats->rx_vlan_filtered;
590 	drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
591 	drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
592 	drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
593 	drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
594 	drvs->jabber_events = pport_stats->rx_jabbers;
595 	drvs->forwarded_packets = pport_stats->num_forwards_lo;
596 	drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
597 	drvs->rx_drops_too_many_frags =
598 				pport_stats->rx_drops_too_many_frags_lo;
599 }
600 
601 static void accumulate_16bit_val(u32 *acc, u16 val)
602 {
603 #define lo(x)			(x & 0xFFFF)
604 #define hi(x)			(x & 0xFFFF0000)
605 	bool wrapped = val < lo(*acc);
606 	u32 newacc = hi(*acc) + val;
607 
608 	if (wrapped)
609 		newacc += 65536;
610 	WRITE_ONCE(*acc, newacc);
611 }
612 
613 static void populate_erx_stats(struct be_adapter *adapter,
614 			       struct be_rx_obj *rxo, u32 erx_stat)
615 {
616 	if (!BEx_chip(adapter))
617 		rx_stats(rxo)->rx_drops_no_frags = erx_stat;
618 	else
619 		/* below erx HW counter can actually wrap around after
620 		 * 65535. Driver accumulates a 32-bit value
621 		 */
622 		accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
623 				     (u16)erx_stat);
624 }
625 
626 void be_parse_stats(struct be_adapter *adapter)
627 {
628 	struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
629 	struct be_rx_obj *rxo;
630 	int i;
631 	u32 erx_stat;
632 
633 	if (lancer_chip(adapter)) {
634 		populate_lancer_stats(adapter);
635 	} else {
636 		if (BE2_chip(adapter))
637 			populate_be_v0_stats(adapter);
638 		else if (BE3_chip(adapter))
639 			/* for BE3 */
640 			populate_be_v1_stats(adapter);
641 		else
642 			populate_be_v2_stats(adapter);
643 
644 		/* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
645 		for_all_rx_queues(adapter, rxo, i) {
646 			erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
647 			populate_erx_stats(adapter, rxo, erx_stat);
648 		}
649 	}
650 }
651 
652 static void be_get_stats64(struct net_device *netdev,
653 			   struct rtnl_link_stats64 *stats)
654 {
655 	struct be_adapter *adapter = netdev_priv(netdev);
656 	struct be_drv_stats *drvs = &adapter->drv_stats;
657 	struct be_rx_obj *rxo;
658 	struct be_tx_obj *txo;
659 	u64 pkts, bytes;
660 	unsigned int start;
661 	int i;
662 
663 	for_all_rx_queues(adapter, rxo, i) {
664 		const struct be_rx_stats *rx_stats = rx_stats(rxo);
665 
666 		do {
667 			start = u64_stats_fetch_begin(&rx_stats->sync);
668 			pkts = rx_stats(rxo)->rx_pkts;
669 			bytes = rx_stats(rxo)->rx_bytes;
670 		} while (u64_stats_fetch_retry(&rx_stats->sync, start));
671 		stats->rx_packets += pkts;
672 		stats->rx_bytes += bytes;
673 		stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
674 		stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
675 					rx_stats(rxo)->rx_drops_no_frags;
676 	}
677 
678 	for_all_tx_queues(adapter, txo, i) {
679 		const struct be_tx_stats *tx_stats = tx_stats(txo);
680 
681 		do {
682 			start = u64_stats_fetch_begin(&tx_stats->sync);
683 			pkts = tx_stats(txo)->tx_pkts;
684 			bytes = tx_stats(txo)->tx_bytes;
685 		} while (u64_stats_fetch_retry(&tx_stats->sync, start));
686 		stats->tx_packets += pkts;
687 		stats->tx_bytes += bytes;
688 	}
689 
690 	/* bad pkts received */
691 	stats->rx_errors = drvs->rx_crc_errors +
692 		drvs->rx_alignment_symbol_errors +
693 		drvs->rx_in_range_errors +
694 		drvs->rx_out_range_errors +
695 		drvs->rx_frame_too_long +
696 		drvs->rx_dropped_too_small +
697 		drvs->rx_dropped_too_short +
698 		drvs->rx_dropped_header_too_small +
699 		drvs->rx_dropped_tcp_length +
700 		drvs->rx_dropped_runt;
701 
702 	/* detailed rx errors */
703 	stats->rx_length_errors = drvs->rx_in_range_errors +
704 		drvs->rx_out_range_errors +
705 		drvs->rx_frame_too_long;
706 
707 	stats->rx_crc_errors = drvs->rx_crc_errors;
708 
709 	/* frame alignment errors */
710 	stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
711 
712 	/* receiver fifo overrun */
713 	/* drops_no_pbuf is no per i/f, it's per BE card */
714 	stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
715 				drvs->rx_input_fifo_overflow_drop +
716 				drvs->rx_drops_no_pbuf;
717 }
718 
719 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
720 {
721 	struct net_device *netdev = adapter->netdev;
722 
723 	if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
724 		netif_carrier_off(netdev);
725 		adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
726 	}
727 
728 	if (link_status)
729 		netif_carrier_on(netdev);
730 	else
731 		netif_carrier_off(netdev);
732 
733 	netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
734 }
735 
736 static int be_gso_hdr_len(struct sk_buff *skb)
737 {
738 	if (skb->encapsulation)
739 		return skb_inner_tcp_all_headers(skb);
740 
741 	return skb_tcp_all_headers(skb);
742 }
743 
744 static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
745 {
746 	struct be_tx_stats *stats = tx_stats(txo);
747 	u32 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
748 	/* Account for headers which get duplicated in TSO pkt */
749 	u32 dup_hdr_len = tx_pkts > 1 ? be_gso_hdr_len(skb) * (tx_pkts - 1) : 0;
750 
751 	u64_stats_update_begin(&stats->sync);
752 	stats->tx_reqs++;
753 	stats->tx_bytes += skb->len + dup_hdr_len;
754 	stats->tx_pkts += tx_pkts;
755 	if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
756 		stats->tx_vxlan_offload_pkts += tx_pkts;
757 	u64_stats_update_end(&stats->sync);
758 }
759 
760 /* Returns number of WRBs needed for the skb */
761 static u32 skb_wrb_cnt(struct sk_buff *skb)
762 {
763 	/* +1 for the header wrb */
764 	return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
765 }
766 
767 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
768 {
769 	wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
770 	wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
771 	wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
772 	wrb->rsvd0 = 0;
773 }
774 
775 /* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
776  * to avoid the swap and shift/mask operations in wrb_fill().
777  */
778 static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
779 {
780 	wrb->frag_pa_hi = 0;
781 	wrb->frag_pa_lo = 0;
782 	wrb->frag_len = 0;
783 	wrb->rsvd0 = 0;
784 }
785 
786 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
787 				     struct sk_buff *skb)
788 {
789 	u8 vlan_prio;
790 	u16 vlan_tag;
791 
792 	vlan_tag = skb_vlan_tag_get(skb);
793 	vlan_prio = skb_vlan_tag_get_prio(skb);
794 	/* If vlan priority provided by OS is NOT in available bmap */
795 	if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
796 		vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
797 				adapter->recommended_prio_bits;
798 
799 	return vlan_tag;
800 }
801 
802 /* Used only for IP tunnel packets */
803 static u16 skb_inner_ip_proto(struct sk_buff *skb)
804 {
805 	return (inner_ip_hdr(skb)->version == 4) ?
806 		inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
807 }
808 
809 static u16 skb_ip_proto(struct sk_buff *skb)
810 {
811 	return (ip_hdr(skb)->version == 4) ?
812 		ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
813 }
814 
815 static inline bool be_is_txq_full(struct be_tx_obj *txo)
816 {
817 	return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
818 }
819 
820 static inline bool be_can_txq_wake(struct be_tx_obj *txo)
821 {
822 	return atomic_read(&txo->q.used) < txo->q.len / 2;
823 }
824 
825 static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
826 {
827 	return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
828 }
829 
830 static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
831 				       struct sk_buff *skb,
832 				       struct be_wrb_params *wrb_params)
833 {
834 	u16 proto;
835 
836 	if (skb_is_gso(skb)) {
837 		BE_WRB_F_SET(wrb_params->features, LSO, 1);
838 		wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
839 		if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
840 			BE_WRB_F_SET(wrb_params->features, LSO6, 1);
841 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
842 		if (skb->encapsulation) {
843 			BE_WRB_F_SET(wrb_params->features, IPCS, 1);
844 			proto = skb_inner_ip_proto(skb);
845 		} else {
846 			proto = skb_ip_proto(skb);
847 		}
848 		if (proto == IPPROTO_TCP)
849 			BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
850 		else if (proto == IPPROTO_UDP)
851 			BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
852 	}
853 
854 	if (skb_vlan_tag_present(skb)) {
855 		BE_WRB_F_SET(wrb_params->features, VLAN, 1);
856 		wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
857 	}
858 
859 	BE_WRB_F_SET(wrb_params->features, CRC, 1);
860 }
861 
862 static void wrb_fill_hdr(struct be_adapter *adapter,
863 			 struct be_eth_hdr_wrb *hdr,
864 			 struct be_wrb_params *wrb_params,
865 			 struct sk_buff *skb)
866 {
867 	memset(hdr, 0, sizeof(*hdr));
868 
869 	SET_TX_WRB_HDR_BITS(crc, hdr,
870 			    BE_WRB_F_GET(wrb_params->features, CRC));
871 	SET_TX_WRB_HDR_BITS(ipcs, hdr,
872 			    BE_WRB_F_GET(wrb_params->features, IPCS));
873 	SET_TX_WRB_HDR_BITS(tcpcs, hdr,
874 			    BE_WRB_F_GET(wrb_params->features, TCPCS));
875 	SET_TX_WRB_HDR_BITS(udpcs, hdr,
876 			    BE_WRB_F_GET(wrb_params->features, UDPCS));
877 
878 	SET_TX_WRB_HDR_BITS(lso, hdr,
879 			    BE_WRB_F_GET(wrb_params->features, LSO));
880 	SET_TX_WRB_HDR_BITS(lso6, hdr,
881 			    BE_WRB_F_GET(wrb_params->features, LSO6));
882 	SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
883 
884 	/* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
885 	 * hack is not needed, the evt bit is set while ringing DB.
886 	 */
887 	SET_TX_WRB_HDR_BITS(event, hdr,
888 			    BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
889 	SET_TX_WRB_HDR_BITS(vlan, hdr,
890 			    BE_WRB_F_GET(wrb_params->features, VLAN));
891 	SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
892 
893 	SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
894 	SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
895 	SET_TX_WRB_HDR_BITS(mgmt, hdr,
896 			    BE_WRB_F_GET(wrb_params->features, OS2BMC));
897 }
898 
899 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
900 			  bool unmap_single)
901 {
902 	dma_addr_t dma;
903 	u32 frag_len = le32_to_cpu(wrb->frag_len);
904 
905 
906 	dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
907 		(u64)le32_to_cpu(wrb->frag_pa_lo);
908 	if (frag_len) {
909 		if (unmap_single)
910 			dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
911 		else
912 			dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
913 	}
914 }
915 
916 /* Grab a WRB header for xmit */
917 static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
918 {
919 	u32 head = txo->q.head;
920 
921 	queue_head_inc(&txo->q);
922 	return head;
923 }
924 
925 /* Set up the WRB header for xmit */
926 static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
927 				struct be_tx_obj *txo,
928 				struct be_wrb_params *wrb_params,
929 				struct sk_buff *skb, u16 head)
930 {
931 	u32 num_frags = skb_wrb_cnt(skb);
932 	struct be_queue_info *txq = &txo->q;
933 	struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
934 
935 	wrb_fill_hdr(adapter, hdr, wrb_params, skb);
936 	be_dws_cpu_to_le(hdr, sizeof(*hdr));
937 
938 	BUG_ON(txo->sent_skb_list[head]);
939 	txo->sent_skb_list[head] = skb;
940 	txo->last_req_hdr = head;
941 	atomic_add(num_frags, &txq->used);
942 	txo->last_req_wrb_cnt = num_frags;
943 	txo->pend_wrb_cnt += num_frags;
944 }
945 
946 /* Setup a WRB fragment (buffer descriptor) for xmit */
947 static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
948 				 int len)
949 {
950 	struct be_eth_wrb *wrb;
951 	struct be_queue_info *txq = &txo->q;
952 
953 	wrb = queue_head_node(txq);
954 	wrb_fill(wrb, busaddr, len);
955 	queue_head_inc(txq);
956 }
957 
958 /* Bring the queue back to the state it was in before be_xmit_enqueue() routine
959  * was invoked. The producer index is restored to the previous packet and the
960  * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
961  */
962 static void be_xmit_restore(struct be_adapter *adapter,
963 			    struct be_tx_obj *txo, u32 head, bool map_single,
964 			    u32 copied)
965 {
966 	struct device *dev;
967 	struct be_eth_wrb *wrb;
968 	struct be_queue_info *txq = &txo->q;
969 
970 	dev = &adapter->pdev->dev;
971 	txq->head = head;
972 
973 	/* skip the first wrb (hdr); it's not mapped */
974 	queue_head_inc(txq);
975 	while (copied) {
976 		wrb = queue_head_node(txq);
977 		unmap_tx_frag(dev, wrb, map_single);
978 		map_single = false;
979 		copied -= le32_to_cpu(wrb->frag_len);
980 		queue_head_inc(txq);
981 	}
982 
983 	txq->head = head;
984 }
985 
986 /* Enqueue the given packet for transmit. This routine allocates WRBs for the
987  * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
988  * of WRBs used up by the packet.
989  */
990 static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
991 			   struct sk_buff *skb,
992 			   struct be_wrb_params *wrb_params)
993 {
994 	u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
995 	struct device *dev = &adapter->pdev->dev;
996 	bool map_single = false;
997 	u32 head;
998 	dma_addr_t busaddr;
999 	int len;
1000 
1001 	head = be_tx_get_wrb_hdr(txo);
1002 
1003 	if (skb->len > skb->data_len) {
1004 		len = skb_headlen(skb);
1005 
1006 		busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
1007 		if (dma_mapping_error(dev, busaddr))
1008 			goto dma_err;
1009 		map_single = true;
1010 		be_tx_setup_wrb_frag(txo, busaddr, len);
1011 		copied += len;
1012 	}
1013 
1014 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1015 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1016 		len = skb_frag_size(frag);
1017 
1018 		busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
1019 		if (dma_mapping_error(dev, busaddr))
1020 			goto dma_err;
1021 		be_tx_setup_wrb_frag(txo, busaddr, len);
1022 		copied += len;
1023 	}
1024 
1025 	be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
1026 
1027 	be_tx_stats_update(txo, skb);
1028 	return wrb_cnt;
1029 
1030 dma_err:
1031 	adapter->drv_stats.dma_map_errors++;
1032 	be_xmit_restore(adapter, txo, head, map_single, copied);
1033 	return 0;
1034 }
1035 
1036 static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
1037 {
1038 	return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
1039 }
1040 
1041 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
1042 					     struct sk_buff *skb,
1043 					     struct be_wrb_params
1044 					     *wrb_params)
1045 {
1046 	bool insert_vlan = false;
1047 	u16 vlan_tag = 0;
1048 
1049 	skb = skb_share_check(skb, GFP_ATOMIC);
1050 	if (unlikely(!skb))
1051 		return skb;
1052 
1053 	if (skb_vlan_tag_present(skb)) {
1054 		vlan_tag = be_get_tx_vlan_tag(adapter, skb);
1055 		insert_vlan = true;
1056 	}
1057 
1058 	if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
1059 		if (!insert_vlan) {
1060 			vlan_tag = adapter->pvid;
1061 			insert_vlan = true;
1062 		}
1063 		/* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1064 		 * skip VLAN insertion
1065 		 */
1066 		BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1067 	}
1068 
1069 	if (insert_vlan) {
1070 		skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1071 						vlan_tag);
1072 		if (unlikely(!skb))
1073 			return skb;
1074 		__vlan_hwaccel_clear_tag(skb);
1075 	}
1076 
1077 	/* Insert the outer VLAN, if any */
1078 	if (adapter->qnq_vid) {
1079 		vlan_tag = adapter->qnq_vid;
1080 		skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1081 						vlan_tag);
1082 		if (unlikely(!skb))
1083 			return skb;
1084 		BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1085 	}
1086 
1087 	return skb;
1088 }
1089 
1090 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1091 {
1092 	struct ethhdr *eh = (struct ethhdr *)skb->data;
1093 	u16 offset = ETH_HLEN;
1094 
1095 	if (eh->h_proto == htons(ETH_P_IPV6)) {
1096 		struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1097 
1098 		offset += sizeof(struct ipv6hdr);
1099 		if (ip6h->nexthdr != NEXTHDR_TCP &&
1100 		    ip6h->nexthdr != NEXTHDR_UDP) {
1101 			struct ipv6_opt_hdr *ehdr =
1102 				(struct ipv6_opt_hdr *)(skb->data + offset);
1103 
1104 			/* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1105 			if (ehdr->hdrlen == 0xff)
1106 				return true;
1107 		}
1108 	}
1109 	return false;
1110 }
1111 
1112 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1113 {
1114 	return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
1115 }
1116 
1117 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
1118 {
1119 	return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
1120 }
1121 
1122 static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1123 						  struct sk_buff *skb,
1124 						  struct be_wrb_params
1125 						  *wrb_params)
1126 {
1127 	struct vlan_ethhdr *veh = skb_vlan_eth_hdr(skb);
1128 	unsigned int eth_hdr_len;
1129 	struct iphdr *ip;
1130 
1131 	/* For padded packets, BE HW modifies tot_len field in IP header
1132 	 * incorrecly when VLAN tag is inserted by HW.
1133 	 * For padded packets, Lancer computes incorrect checksum.
1134 	 */
1135 	eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1136 						VLAN_ETH_HLEN : ETH_HLEN;
1137 	if (skb->len <= 60 &&
1138 	    (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
1139 	    is_ipv4_pkt(skb)) {
1140 		ip = (struct iphdr *)ip_hdr(skb);
1141 		pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1142 	}
1143 
1144 	/* If vlan tag is already inlined in the packet, skip HW VLAN
1145 	 * tagging in pvid-tagging mode
1146 	 */
1147 	if (be_pvid_tagging_enabled(adapter) &&
1148 	    veh->h_vlan_proto == htons(ETH_P_8021Q))
1149 		BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1150 
1151 	/* HW has a bug wherein it will calculate CSUM for VLAN
1152 	 * pkts even though it is disabled.
1153 	 * Manually insert VLAN in pkt.
1154 	 */
1155 	if (skb->ip_summed != CHECKSUM_PARTIAL &&
1156 	    skb_vlan_tag_present(skb)) {
1157 		skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1158 		if (unlikely(!skb))
1159 			goto err;
1160 	}
1161 
1162 	/* HW may lockup when VLAN HW tagging is requested on
1163 	 * certain ipv6 packets. Drop such pkts if the HW workaround to
1164 	 * skip HW tagging is not enabled by FW.
1165 	 */
1166 	if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
1167 		     (adapter->pvid || adapter->qnq_vid) &&
1168 		     !qnq_async_evt_rcvd(adapter)))
1169 		goto tx_drop;
1170 
1171 	/* Manual VLAN tag insertion to prevent:
1172 	 * ASIC lockup when the ASIC inserts VLAN tag into
1173 	 * certain ipv6 packets. Insert VLAN tags in driver,
1174 	 * and set event, completion, vlan bits accordingly
1175 	 * in the Tx WRB.
1176 	 */
1177 	if (be_ipv6_tx_stall_chk(adapter, skb) &&
1178 	    be_vlan_tag_tx_chk(adapter, skb)) {
1179 		skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1180 		if (unlikely(!skb))
1181 			goto err;
1182 	}
1183 
1184 	return skb;
1185 tx_drop:
1186 	dev_kfree_skb_any(skb);
1187 err:
1188 	return NULL;
1189 }
1190 
1191 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1192 					   struct sk_buff *skb,
1193 					   struct be_wrb_params *wrb_params)
1194 {
1195 	int err;
1196 
1197 	/* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1198 	 * packets that are 32b or less may cause a transmit stall
1199 	 * on that port. The workaround is to pad such packets
1200 	 * (len <= 32 bytes) to a minimum length of 36b.
1201 	 */
1202 	if (skb->len <= 32) {
1203 		if (skb_put_padto(skb, 36))
1204 			return NULL;
1205 	}
1206 
1207 	if (BEx_chip(adapter) || lancer_chip(adapter)) {
1208 		skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
1209 		if (!skb)
1210 			return NULL;
1211 	}
1212 
1213 	/* The stack can send us skbs with length greater than
1214 	 * what the HW can handle. Trim the extra bytes.
1215 	 */
1216 	WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
1217 	err = pskb_trim(skb, BE_MAX_GSO_SIZE);
1218 	WARN_ON(err);
1219 
1220 	return skb;
1221 }
1222 
1223 static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1224 {
1225 	struct be_queue_info *txq = &txo->q;
1226 	struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1227 
1228 	/* Mark the last request eventable if it hasn't been marked already */
1229 	if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1230 		hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1231 
1232 	/* compose a dummy wrb if there are odd set of wrbs to notify */
1233 	if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
1234 		wrb_fill_dummy(queue_head_node(txq));
1235 		queue_head_inc(txq);
1236 		atomic_inc(&txq->used);
1237 		txo->pend_wrb_cnt++;
1238 		hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1239 					   TX_HDR_WRB_NUM_SHIFT);
1240 		hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1241 					  TX_HDR_WRB_NUM_SHIFT);
1242 	}
1243 	be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1244 	txo->pend_wrb_cnt = 0;
1245 }
1246 
1247 /* OS2BMC related */
1248 
1249 #define DHCP_CLIENT_PORT	68
1250 #define DHCP_SERVER_PORT	67
1251 #define NET_BIOS_PORT1		137
1252 #define NET_BIOS_PORT2		138
1253 #define DHCPV6_RAS_PORT		547
1254 
1255 #define is_mc_allowed_on_bmc(adapter, eh)	\
1256 	(!is_multicast_filt_enabled(adapter) &&	\
1257 	 is_multicast_ether_addr(eh->h_dest) &&	\
1258 	 !is_broadcast_ether_addr(eh->h_dest))
1259 
1260 #define is_bc_allowed_on_bmc(adapter, eh)	\
1261 	(!is_broadcast_filt_enabled(adapter) &&	\
1262 	 is_broadcast_ether_addr(eh->h_dest))
1263 
1264 #define is_arp_allowed_on_bmc(adapter, skb)	\
1265 	(is_arp(skb) && is_arp_filt_enabled(adapter))
1266 
1267 #define is_arp(skb)	(skb->protocol == htons(ETH_P_ARP))
1268 
1269 #define is_arp_filt_enabled(adapter)	\
1270 		(adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1271 
1272 #define is_dhcp_client_filt_enabled(adapter)	\
1273 		(adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1274 
1275 #define is_dhcp_srvr_filt_enabled(adapter)	\
1276 		(adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1277 
1278 #define is_nbios_filt_enabled(adapter)	\
1279 		(adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1280 
1281 #define is_ipv6_na_filt_enabled(adapter)	\
1282 		(adapter->bmc_filt_mask &	\
1283 			BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1284 
1285 #define is_ipv6_ra_filt_enabled(adapter)	\
1286 		(adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1287 
1288 #define is_ipv6_ras_filt_enabled(adapter)	\
1289 		(adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1290 
1291 #define is_broadcast_filt_enabled(adapter)	\
1292 		(adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1293 
1294 #define is_multicast_filt_enabled(adapter)	\
1295 		(adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1296 
1297 static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1298 			       struct sk_buff **skb)
1299 {
1300 	struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1301 	bool os2bmc = false;
1302 
1303 	if (!be_is_os2bmc_enabled(adapter))
1304 		goto done;
1305 
1306 	if (!is_multicast_ether_addr(eh->h_dest))
1307 		goto done;
1308 
1309 	if (is_mc_allowed_on_bmc(adapter, eh) ||
1310 	    is_bc_allowed_on_bmc(adapter, eh) ||
1311 	    is_arp_allowed_on_bmc(adapter, (*skb))) {
1312 		os2bmc = true;
1313 		goto done;
1314 	}
1315 
1316 	if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1317 		struct ipv6hdr *hdr = ipv6_hdr((*skb));
1318 		u8 nexthdr = hdr->nexthdr;
1319 
1320 		if (nexthdr == IPPROTO_ICMPV6) {
1321 			struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1322 
1323 			switch (icmp6->icmp6_type) {
1324 			case NDISC_ROUTER_ADVERTISEMENT:
1325 				os2bmc = is_ipv6_ra_filt_enabled(adapter);
1326 				goto done;
1327 			case NDISC_NEIGHBOUR_ADVERTISEMENT:
1328 				os2bmc = is_ipv6_na_filt_enabled(adapter);
1329 				goto done;
1330 			default:
1331 				break;
1332 			}
1333 		}
1334 	}
1335 
1336 	if (is_udp_pkt((*skb))) {
1337 		struct udphdr *udp = udp_hdr((*skb));
1338 
1339 		switch (ntohs(udp->dest)) {
1340 		case DHCP_CLIENT_PORT:
1341 			os2bmc = is_dhcp_client_filt_enabled(adapter);
1342 			goto done;
1343 		case DHCP_SERVER_PORT:
1344 			os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1345 			goto done;
1346 		case NET_BIOS_PORT1:
1347 		case NET_BIOS_PORT2:
1348 			os2bmc = is_nbios_filt_enabled(adapter);
1349 			goto done;
1350 		case DHCPV6_RAS_PORT:
1351 			os2bmc = is_ipv6_ras_filt_enabled(adapter);
1352 			goto done;
1353 		default:
1354 			break;
1355 		}
1356 	}
1357 done:
1358 	/* For packets over a vlan, which are destined
1359 	 * to BMC, asic expects the vlan to be inline in the packet.
1360 	 */
1361 	if (os2bmc)
1362 		*skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1363 
1364 	return os2bmc;
1365 }
1366 
1367 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1368 {
1369 	struct be_adapter *adapter = netdev_priv(netdev);
1370 	u16 q_idx = skb_get_queue_mapping(skb);
1371 	struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
1372 	struct be_wrb_params wrb_params = { 0 };
1373 	bool flush = !netdev_xmit_more();
1374 	u16 wrb_cnt;
1375 
1376 	skb = be_xmit_workarounds(adapter, skb, &wrb_params);
1377 	if (unlikely(!skb))
1378 		goto drop;
1379 
1380 	be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1381 
1382 	wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1383 	if (unlikely(!wrb_cnt)) {
1384 		dev_kfree_skb_any(skb);
1385 		goto drop;
1386 	}
1387 
1388 	/* if os2bmc is enabled and if the pkt is destined to bmc,
1389 	 * enqueue the pkt a 2nd time with mgmt bit set.
1390 	 */
1391 	if (be_send_pkt_to_bmc(adapter, &skb)) {
1392 		BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1393 		wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1394 		if (unlikely(!wrb_cnt))
1395 			goto drop;
1396 		else
1397 			skb_get(skb);
1398 	}
1399 
1400 	if (be_is_txq_full(txo)) {
1401 		netif_stop_subqueue(netdev, q_idx);
1402 		tx_stats(txo)->tx_stops++;
1403 	}
1404 
1405 	if (flush || __netif_subqueue_stopped(netdev, q_idx))
1406 		be_xmit_flush(adapter, txo);
1407 
1408 	return NETDEV_TX_OK;
1409 drop:
1410 	tx_stats(txo)->tx_drv_drops++;
1411 	/* Flush the already enqueued tx requests */
1412 	if (flush && txo->pend_wrb_cnt)
1413 		be_xmit_flush(adapter, txo);
1414 
1415 	return NETDEV_TX_OK;
1416 }
1417 
1418 static void be_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1419 {
1420 	struct be_adapter *adapter = netdev_priv(netdev);
1421 	struct device *dev = &adapter->pdev->dev;
1422 	struct be_tx_obj *txo;
1423 	struct sk_buff *skb;
1424 	struct tcphdr *tcphdr;
1425 	struct udphdr *udphdr;
1426 	u32 *entry;
1427 	int status;
1428 	int i, j;
1429 
1430 	for_all_tx_queues(adapter, txo, i) {
1431 		dev_info(dev, "TXQ Dump: %d H: %d T: %d used: %d, qid: 0x%x\n",
1432 			 i, txo->q.head, txo->q.tail,
1433 			 atomic_read(&txo->q.used), txo->q.id);
1434 
1435 		entry = txo->q.dma_mem.va;
1436 		for (j = 0; j < TX_Q_LEN * 4; j += 4) {
1437 			if (entry[j] != 0 || entry[j + 1] != 0 ||
1438 			    entry[j + 2] != 0 || entry[j + 3] != 0) {
1439 				dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n",
1440 					 j, entry[j], entry[j + 1],
1441 					 entry[j + 2], entry[j + 3]);
1442 			}
1443 		}
1444 
1445 		entry = txo->cq.dma_mem.va;
1446 		dev_info(dev, "TXCQ Dump: %d  H: %d T: %d used: %d\n",
1447 			 i, txo->cq.head, txo->cq.tail,
1448 			 atomic_read(&txo->cq.used));
1449 		for (j = 0; j < TX_CQ_LEN * 4; j += 4) {
1450 			if (entry[j] != 0 || entry[j + 1] != 0 ||
1451 			    entry[j + 2] != 0 || entry[j + 3] != 0) {
1452 				dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n",
1453 					 j, entry[j], entry[j + 1],
1454 					 entry[j + 2], entry[j + 3]);
1455 			}
1456 		}
1457 
1458 		for (j = 0; j < TX_Q_LEN; j++) {
1459 			if (txo->sent_skb_list[j]) {
1460 				skb = txo->sent_skb_list[j];
1461 				if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
1462 					tcphdr = tcp_hdr(skb);
1463 					dev_info(dev, "TCP source port %d\n",
1464 						 ntohs(tcphdr->source));
1465 					dev_info(dev, "TCP dest port %d\n",
1466 						 ntohs(tcphdr->dest));
1467 					dev_info(dev, "TCP sequence num %d\n",
1468 						 ntohs(tcphdr->seq));
1469 					dev_info(dev, "TCP ack_seq %d\n",
1470 						 ntohs(tcphdr->ack_seq));
1471 				} else if (ip_hdr(skb)->protocol ==
1472 					   IPPROTO_UDP) {
1473 					udphdr = udp_hdr(skb);
1474 					dev_info(dev, "UDP source port %d\n",
1475 						 ntohs(udphdr->source));
1476 					dev_info(dev, "UDP dest port %d\n",
1477 						 ntohs(udphdr->dest));
1478 				}
1479 				dev_info(dev, "skb[%d] %p len %d proto 0x%x\n",
1480 					 j, skb, skb->len, skb->protocol);
1481 			}
1482 		}
1483 	}
1484 
1485 	if (lancer_chip(adapter)) {
1486 		dev_info(dev, "Initiating reset due to tx timeout\n");
1487 		dev_info(dev, "Resetting adapter\n");
1488 		status = lancer_physdev_ctrl(adapter,
1489 					     PHYSDEV_CONTROL_FW_RESET_MASK);
1490 		if (status)
1491 			dev_err(dev, "Reset failed .. Reboot server\n");
1492 	}
1493 }
1494 
1495 static inline bool be_in_all_promisc(struct be_adapter *adapter)
1496 {
1497 	return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1498 			BE_IF_FLAGS_ALL_PROMISCUOUS;
1499 }
1500 
1501 static int be_set_vlan_promisc(struct be_adapter *adapter)
1502 {
1503 	struct device *dev = &adapter->pdev->dev;
1504 	int status;
1505 
1506 	if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1507 		return 0;
1508 
1509 	status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1510 	if (!status) {
1511 		dev_info(dev, "Enabled VLAN promiscuous mode\n");
1512 		adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1513 	} else {
1514 		dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1515 	}
1516 	return status;
1517 }
1518 
1519 static int be_clear_vlan_promisc(struct be_adapter *adapter)
1520 {
1521 	struct device *dev = &adapter->pdev->dev;
1522 	int status;
1523 
1524 	status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1525 	if (!status) {
1526 		dev_info(dev, "Disabling VLAN promiscuous mode\n");
1527 		adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1528 	}
1529 	return status;
1530 }
1531 
1532 /*
1533  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1534  * If the user configures more, place BE in vlan promiscuous mode.
1535  */
1536 static int be_vid_config(struct be_adapter *adapter)
1537 {
1538 	struct device *dev = &adapter->pdev->dev;
1539 	u16 vids[BE_NUM_VLANS_SUPPORTED];
1540 	u16 num = 0, i = 0;
1541 	int status = 0;
1542 
1543 	/* No need to change the VLAN state if the I/F is in promiscuous */
1544 	if (adapter->netdev->flags & IFF_PROMISC)
1545 		return 0;
1546 
1547 	if (adapter->vlans_added > be_max_vlans(adapter))
1548 		return be_set_vlan_promisc(adapter);
1549 
1550 	if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1551 		status = be_clear_vlan_promisc(adapter);
1552 		if (status)
1553 			return status;
1554 	}
1555 	/* Construct VLAN Table to give to HW */
1556 	for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1557 		vids[num++] = cpu_to_le16(i);
1558 
1559 	status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
1560 	if (status) {
1561 		dev_err(dev, "Setting HW VLAN filtering failed\n");
1562 		/* Set to VLAN promisc mode as setting VLAN filter failed */
1563 		if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1564 		    addl_status(status) ==
1565 				MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
1566 			return be_set_vlan_promisc(adapter);
1567 	}
1568 	return status;
1569 }
1570 
1571 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1572 {
1573 	struct be_adapter *adapter = netdev_priv(netdev);
1574 	int status = 0;
1575 
1576 	mutex_lock(&adapter->rx_filter_lock);
1577 
1578 	/* Packets with VID 0 are always received by Lancer by default */
1579 	if (lancer_chip(adapter) && vid == 0)
1580 		goto done;
1581 
1582 	if (test_bit(vid, adapter->vids))
1583 		goto done;
1584 
1585 	set_bit(vid, adapter->vids);
1586 	adapter->vlans_added++;
1587 
1588 	status = be_vid_config(adapter);
1589 done:
1590 	mutex_unlock(&adapter->rx_filter_lock);
1591 	return status;
1592 }
1593 
1594 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1595 {
1596 	struct be_adapter *adapter = netdev_priv(netdev);
1597 	int status = 0;
1598 
1599 	mutex_lock(&adapter->rx_filter_lock);
1600 
1601 	/* Packets with VID 0 are always received by Lancer by default */
1602 	if (lancer_chip(adapter) && vid == 0)
1603 		goto done;
1604 
1605 	if (!test_bit(vid, adapter->vids))
1606 		goto done;
1607 
1608 	clear_bit(vid, adapter->vids);
1609 	adapter->vlans_added--;
1610 
1611 	status = be_vid_config(adapter);
1612 done:
1613 	mutex_unlock(&adapter->rx_filter_lock);
1614 	return status;
1615 }
1616 
1617 static void be_set_all_promisc(struct be_adapter *adapter)
1618 {
1619 	be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1620 	adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1621 }
1622 
1623 static void be_set_mc_promisc(struct be_adapter *adapter)
1624 {
1625 	int status;
1626 
1627 	if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1628 		return;
1629 
1630 	status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1631 	if (!status)
1632 		adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1633 }
1634 
1635 static void be_set_uc_promisc(struct be_adapter *adapter)
1636 {
1637 	int status;
1638 
1639 	if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)
1640 		return;
1641 
1642 	status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON);
1643 	if (!status)
1644 		adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
1645 }
1646 
1647 static void be_clear_uc_promisc(struct be_adapter *adapter)
1648 {
1649 	int status;
1650 
1651 	if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS))
1652 		return;
1653 
1654 	status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF);
1655 	if (!status)
1656 		adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
1657 }
1658 
1659 /* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
1660  * We use a single callback function for both sync and unsync. We really don't
1661  * add/remove addresses through this callback. But, we use it to detect changes
1662  * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
1663  */
1664 static int be_uc_list_update(struct net_device *netdev,
1665 			     const unsigned char *addr)
1666 {
1667 	struct be_adapter *adapter = netdev_priv(netdev);
1668 
1669 	adapter->update_uc_list = true;
1670 	return 0;
1671 }
1672 
1673 static int be_mc_list_update(struct net_device *netdev,
1674 			     const unsigned char *addr)
1675 {
1676 	struct be_adapter *adapter = netdev_priv(netdev);
1677 
1678 	adapter->update_mc_list = true;
1679 	return 0;
1680 }
1681 
1682 static void be_set_mc_list(struct be_adapter *adapter)
1683 {
1684 	struct net_device *netdev = adapter->netdev;
1685 	struct netdev_hw_addr *ha;
1686 	bool mc_promisc = false;
1687 	int status;
1688 
1689 	netif_addr_lock_bh(netdev);
1690 	__dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update);
1691 
1692 	if (netdev->flags & IFF_PROMISC) {
1693 		adapter->update_mc_list = false;
1694 	} else if (netdev->flags & IFF_ALLMULTI ||
1695 		   netdev_mc_count(netdev) > be_max_mc(adapter)) {
1696 		/* Enable multicast promisc if num configured exceeds
1697 		 * what we support
1698 		 */
1699 		mc_promisc = true;
1700 		adapter->update_mc_list = false;
1701 	} else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) {
1702 		/* Update mc-list unconditionally if the iface was previously
1703 		 * in mc-promisc mode and now is out of that mode.
1704 		 */
1705 		adapter->update_mc_list = true;
1706 	}
1707 
1708 	if (adapter->update_mc_list) {
1709 		int i = 0;
1710 
1711 		/* cache the mc-list in adapter */
1712 		netdev_for_each_mc_addr(ha, netdev) {
1713 			ether_addr_copy(adapter->mc_list[i].mac, ha->addr);
1714 			i++;
1715 		}
1716 		adapter->mc_count = netdev_mc_count(netdev);
1717 	}
1718 	netif_addr_unlock_bh(netdev);
1719 
1720 	if (mc_promisc) {
1721 		be_set_mc_promisc(adapter);
1722 	} else if (adapter->update_mc_list) {
1723 		status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1724 		if (!status)
1725 			adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1726 		else
1727 			be_set_mc_promisc(adapter);
1728 
1729 		adapter->update_mc_list = false;
1730 	}
1731 }
1732 
1733 static void be_clear_mc_list(struct be_adapter *adapter)
1734 {
1735 	struct net_device *netdev = adapter->netdev;
1736 
1737 	__dev_mc_unsync(netdev, NULL);
1738 	be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF);
1739 	adapter->mc_count = 0;
1740 }
1741 
1742 static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
1743 {
1744 	if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) {
1745 		adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
1746 		return 0;
1747 	}
1748 
1749 	return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac,
1750 			       adapter->if_handle,
1751 			       &adapter->pmac_id[uc_idx + 1], 0);
1752 }
1753 
1754 static void be_uc_mac_del(struct be_adapter *adapter, int pmac_id)
1755 {
1756 	if (pmac_id == adapter->pmac_id[0])
1757 		return;
1758 
1759 	be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
1760 }
1761 
1762 static void be_set_uc_list(struct be_adapter *adapter)
1763 {
1764 	struct net_device *netdev = adapter->netdev;
1765 	struct netdev_hw_addr *ha;
1766 	bool uc_promisc = false;
1767 	int curr_uc_macs = 0, i;
1768 
1769 	netif_addr_lock_bh(netdev);
1770 	__dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update);
1771 
1772 	if (netdev->flags & IFF_PROMISC) {
1773 		adapter->update_uc_list = false;
1774 	} else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
1775 		uc_promisc = true;
1776 		adapter->update_uc_list = false;
1777 	}  else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) {
1778 		/* Update uc-list unconditionally if the iface was previously
1779 		 * in uc-promisc mode and now is out of that mode.
1780 		 */
1781 		adapter->update_uc_list = true;
1782 	}
1783 
1784 	if (adapter->update_uc_list) {
1785 		/* cache the uc-list in adapter array */
1786 		i = 0;
1787 		netdev_for_each_uc_addr(ha, netdev) {
1788 			ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
1789 			i++;
1790 		}
1791 		curr_uc_macs = netdev_uc_count(netdev);
1792 	}
1793 	netif_addr_unlock_bh(netdev);
1794 
1795 	if (uc_promisc) {
1796 		be_set_uc_promisc(adapter);
1797 	} else if (adapter->update_uc_list) {
1798 		be_clear_uc_promisc(adapter);
1799 
1800 		for (i = 0; i < adapter->uc_macs; i++)
1801 			be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
1802 
1803 		for (i = 0; i < curr_uc_macs; i++)
1804 			be_uc_mac_add(adapter, i);
1805 		adapter->uc_macs = curr_uc_macs;
1806 		adapter->update_uc_list = false;
1807 	}
1808 }
1809 
1810 static void be_clear_uc_list(struct be_adapter *adapter)
1811 {
1812 	struct net_device *netdev = adapter->netdev;
1813 	int i;
1814 
1815 	__dev_uc_unsync(netdev, NULL);
1816 	for (i = 0; i < adapter->uc_macs; i++)
1817 		be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
1818 
1819 	adapter->uc_macs = 0;
1820 }
1821 
1822 static void __be_set_rx_mode(struct be_adapter *adapter)
1823 {
1824 	struct net_device *netdev = adapter->netdev;
1825 
1826 	mutex_lock(&adapter->rx_filter_lock);
1827 
1828 	if (netdev->flags & IFF_PROMISC) {
1829 		if (!be_in_all_promisc(adapter))
1830 			be_set_all_promisc(adapter);
1831 	} else if (be_in_all_promisc(adapter)) {
1832 		/* We need to re-program the vlan-list or clear
1833 		 * vlan-promisc mode (if needed) when the interface
1834 		 * comes out of promisc mode.
1835 		 */
1836 		be_vid_config(adapter);
1837 	}
1838 
1839 	be_set_uc_list(adapter);
1840 	be_set_mc_list(adapter);
1841 
1842 	mutex_unlock(&adapter->rx_filter_lock);
1843 }
1844 
1845 static void be_work_set_rx_mode(struct work_struct *work)
1846 {
1847 	struct be_cmd_work *cmd_work =
1848 				container_of(work, struct be_cmd_work, work);
1849 
1850 	__be_set_rx_mode(cmd_work->adapter);
1851 	kfree(cmd_work);
1852 }
1853 
1854 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1855 {
1856 	struct be_adapter *adapter = netdev_priv(netdev);
1857 	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1858 	int status;
1859 
1860 	if (!sriov_enabled(adapter))
1861 		return -EPERM;
1862 
1863 	if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1864 		return -EINVAL;
1865 
1866 	/* Proceed further only if user provided MAC is different
1867 	 * from active MAC
1868 	 */
1869 	if (ether_addr_equal(mac, vf_cfg->mac_addr))
1870 		return 0;
1871 
1872 	if (BEx_chip(adapter)) {
1873 		be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1874 				vf + 1);
1875 
1876 		status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1877 					 &vf_cfg->pmac_id, vf + 1);
1878 	} else {
1879 		status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1880 					vf + 1);
1881 	}
1882 
1883 	if (status) {
1884 		dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1885 			mac, vf, status);
1886 		return be_cmd_status(status);
1887 	}
1888 
1889 	ether_addr_copy(vf_cfg->mac_addr, mac);
1890 
1891 	return 0;
1892 }
1893 
1894 static int be_get_vf_config(struct net_device *netdev, int vf,
1895 			    struct ifla_vf_info *vi)
1896 {
1897 	struct be_adapter *adapter = netdev_priv(netdev);
1898 	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1899 
1900 	if (!sriov_enabled(adapter))
1901 		return -EPERM;
1902 
1903 	if (vf >= adapter->num_vfs)
1904 		return -EINVAL;
1905 
1906 	vi->vf = vf;
1907 	vi->max_tx_rate = vf_cfg->tx_rate;
1908 	vi->min_tx_rate = 0;
1909 	vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1910 	vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1911 	memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1912 	vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
1913 	vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
1914 
1915 	return 0;
1916 }
1917 
1918 static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1919 {
1920 	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1921 	u16 vids[BE_NUM_VLANS_SUPPORTED];
1922 	int vf_if_id = vf_cfg->if_handle;
1923 	int status;
1924 
1925 	/* Enable Transparent VLAN Tagging */
1926 	status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
1927 	if (status)
1928 		return status;
1929 
1930 	/* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1931 	vids[0] = 0;
1932 	status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1933 	if (!status)
1934 		dev_info(&adapter->pdev->dev,
1935 			 "Cleared guest VLANs on VF%d", vf);
1936 
1937 	/* After TVT is enabled, disallow VFs to program VLAN filters */
1938 	if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1939 		status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1940 						  ~BE_PRIV_FILTMGMT, vf + 1);
1941 		if (!status)
1942 			vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1943 	}
1944 	return 0;
1945 }
1946 
1947 static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1948 {
1949 	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1950 	struct device *dev = &adapter->pdev->dev;
1951 	int status;
1952 
1953 	/* Reset Transparent VLAN Tagging. */
1954 	status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
1955 				       vf_cfg->if_handle, 0, 0);
1956 	if (status)
1957 		return status;
1958 
1959 	/* Allow VFs to program VLAN filtering */
1960 	if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1961 		status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1962 						  BE_PRIV_FILTMGMT, vf + 1);
1963 		if (!status) {
1964 			vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1965 			dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1966 		}
1967 	}
1968 
1969 	dev_info(dev,
1970 		 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1971 	return 0;
1972 }
1973 
1974 static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
1975 			  __be16 vlan_proto)
1976 {
1977 	struct be_adapter *adapter = netdev_priv(netdev);
1978 	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1979 	int status;
1980 
1981 	if (!sriov_enabled(adapter))
1982 		return -EPERM;
1983 
1984 	if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1985 		return -EINVAL;
1986 
1987 	if (vlan_proto != htons(ETH_P_8021Q))
1988 		return -EPROTONOSUPPORT;
1989 
1990 	if (vlan || qos) {
1991 		vlan |= qos << VLAN_PRIO_SHIFT;
1992 		status = be_set_vf_tvt(adapter, vf, vlan);
1993 	} else {
1994 		status = be_clear_vf_tvt(adapter, vf);
1995 	}
1996 
1997 	if (status) {
1998 		dev_err(&adapter->pdev->dev,
1999 			"VLAN %d config on VF %d failed : %#x\n", vlan, vf,
2000 			status);
2001 		return be_cmd_status(status);
2002 	}
2003 
2004 	vf_cfg->vlan_tag = vlan;
2005 	return 0;
2006 }
2007 
2008 static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
2009 			     int min_tx_rate, int max_tx_rate)
2010 {
2011 	struct be_adapter *adapter = netdev_priv(netdev);
2012 	struct device *dev = &adapter->pdev->dev;
2013 	int percent_rate, status = 0;
2014 	u16 link_speed = 0;
2015 	u8 link_status;
2016 
2017 	if (!sriov_enabled(adapter))
2018 		return -EPERM;
2019 
2020 	if (vf >= adapter->num_vfs)
2021 		return -EINVAL;
2022 
2023 	if (min_tx_rate)
2024 		return -EINVAL;
2025 
2026 	if (!max_tx_rate)
2027 		goto config_qos;
2028 
2029 	status = be_cmd_link_status_query(adapter, &link_speed,
2030 					  &link_status, 0);
2031 	if (status)
2032 		goto err;
2033 
2034 	if (!link_status) {
2035 		dev_err(dev, "TX-rate setting not allowed when link is down\n");
2036 		status = -ENETDOWN;
2037 		goto err;
2038 	}
2039 
2040 	if (max_tx_rate < 100 || max_tx_rate > link_speed) {
2041 		dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
2042 			link_speed);
2043 		status = -EINVAL;
2044 		goto err;
2045 	}
2046 
2047 	/* On Skyhawk the QOS setting must be done only as a % value */
2048 	percent_rate = link_speed / 100;
2049 	if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
2050 		dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
2051 			percent_rate);
2052 		status = -EINVAL;
2053 		goto err;
2054 	}
2055 
2056 config_qos:
2057 	status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
2058 	if (status)
2059 		goto err;
2060 
2061 	adapter->vf_cfg[vf].tx_rate = max_tx_rate;
2062 	return 0;
2063 
2064 err:
2065 	dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
2066 		max_tx_rate, vf);
2067 	return be_cmd_status(status);
2068 }
2069 
2070 static int be_set_vf_link_state(struct net_device *netdev, int vf,
2071 				int link_state)
2072 {
2073 	struct be_adapter *adapter = netdev_priv(netdev);
2074 	int status;
2075 
2076 	if (!sriov_enabled(adapter))
2077 		return -EPERM;
2078 
2079 	if (vf >= adapter->num_vfs)
2080 		return -EINVAL;
2081 
2082 	status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
2083 	if (status) {
2084 		dev_err(&adapter->pdev->dev,
2085 			"Link state change on VF %d failed: %#x\n", vf, status);
2086 		return be_cmd_status(status);
2087 	}
2088 
2089 	adapter->vf_cfg[vf].plink_tracking = link_state;
2090 
2091 	return 0;
2092 }
2093 
2094 static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
2095 {
2096 	struct be_adapter *adapter = netdev_priv(netdev);
2097 	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
2098 	u8 spoofchk;
2099 	int status;
2100 
2101 	if (!sriov_enabled(adapter))
2102 		return -EPERM;
2103 
2104 	if (vf >= adapter->num_vfs)
2105 		return -EINVAL;
2106 
2107 	if (BEx_chip(adapter))
2108 		return -EOPNOTSUPP;
2109 
2110 	if (enable == vf_cfg->spoofchk)
2111 		return 0;
2112 
2113 	spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
2114 
2115 	status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
2116 				       0, spoofchk);
2117 	if (status) {
2118 		dev_err(&adapter->pdev->dev,
2119 			"Spoofchk change on VF %d failed: %#x\n", vf, status);
2120 		return be_cmd_status(status);
2121 	}
2122 
2123 	vf_cfg->spoofchk = enable;
2124 	return 0;
2125 }
2126 
2127 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
2128 			  ulong now)
2129 {
2130 	aic->rx_pkts_prev = rx_pkts;
2131 	aic->tx_reqs_prev = tx_pkts;
2132 	aic->jiffies = now;
2133 }
2134 
2135 static int be_get_new_eqd(struct be_eq_obj *eqo)
2136 {
2137 	struct be_adapter *adapter = eqo->adapter;
2138 	int eqd, start;
2139 	struct be_aic_obj *aic;
2140 	struct be_rx_obj *rxo;
2141 	struct be_tx_obj *txo;
2142 	u64 rx_pkts = 0, tx_pkts = 0;
2143 	ulong now;
2144 	u32 pps, delta;
2145 	int i;
2146 
2147 	aic = &adapter->aic_obj[eqo->idx];
2148 	if (!adapter->aic_enabled) {
2149 		if (aic->jiffies)
2150 			aic->jiffies = 0;
2151 		eqd = aic->et_eqd;
2152 		return eqd;
2153 	}
2154 
2155 	for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2156 		do {
2157 			start = u64_stats_fetch_begin(&rxo->stats.sync);
2158 			rx_pkts += rxo->stats.rx_pkts;
2159 		} while (u64_stats_fetch_retry(&rxo->stats.sync, start));
2160 	}
2161 
2162 	for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2163 		do {
2164 			start = u64_stats_fetch_begin(&txo->stats.sync);
2165 			tx_pkts += txo->stats.tx_reqs;
2166 		} while (u64_stats_fetch_retry(&txo->stats.sync, start));
2167 	}
2168 
2169 	/* Skip, if wrapped around or first calculation */
2170 	now = jiffies;
2171 	if (!aic->jiffies || time_before(now, aic->jiffies) ||
2172 	    rx_pkts < aic->rx_pkts_prev ||
2173 	    tx_pkts < aic->tx_reqs_prev) {
2174 		be_aic_update(aic, rx_pkts, tx_pkts, now);
2175 		return aic->prev_eqd;
2176 	}
2177 
2178 	delta = jiffies_to_msecs(now - aic->jiffies);
2179 	if (delta == 0)
2180 		return aic->prev_eqd;
2181 
2182 	pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
2183 		(((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
2184 	eqd = (pps / 15000) << 2;
2185 
2186 	if (eqd < 8)
2187 		eqd = 0;
2188 	eqd = min_t(u32, eqd, aic->max_eqd);
2189 	eqd = max_t(u32, eqd, aic->min_eqd);
2190 
2191 	be_aic_update(aic, rx_pkts, tx_pkts, now);
2192 
2193 	return eqd;
2194 }
2195 
2196 /* For Skyhawk-R only */
2197 static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
2198 {
2199 	struct be_adapter *adapter = eqo->adapter;
2200 	struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
2201 	ulong now = jiffies;
2202 	int eqd;
2203 	u32 mult_enc;
2204 
2205 	if (!adapter->aic_enabled)
2206 		return 0;
2207 
2208 	if (jiffies_to_msecs(now - aic->jiffies) < 1)
2209 		eqd = aic->prev_eqd;
2210 	else
2211 		eqd = be_get_new_eqd(eqo);
2212 
2213 	if (eqd > 100)
2214 		mult_enc = R2I_DLY_ENC_1;
2215 	else if (eqd > 60)
2216 		mult_enc = R2I_DLY_ENC_2;
2217 	else if (eqd > 20)
2218 		mult_enc = R2I_DLY_ENC_3;
2219 	else
2220 		mult_enc = R2I_DLY_ENC_0;
2221 
2222 	aic->prev_eqd = eqd;
2223 
2224 	return mult_enc;
2225 }
2226 
2227 void be_eqd_update(struct be_adapter *adapter, bool force_update)
2228 {
2229 	struct be_set_eqd set_eqd[MAX_EVT_QS];
2230 	struct be_aic_obj *aic;
2231 	struct be_eq_obj *eqo;
2232 	int i, num = 0, eqd;
2233 
2234 	for_all_evt_queues(adapter, eqo, i) {
2235 		aic = &adapter->aic_obj[eqo->idx];
2236 		eqd = be_get_new_eqd(eqo);
2237 		if (force_update || eqd != aic->prev_eqd) {
2238 			set_eqd[num].delay_multiplier = (eqd * 65)/100;
2239 			set_eqd[num].eq_id = eqo->q.id;
2240 			aic->prev_eqd = eqd;
2241 			num++;
2242 		}
2243 	}
2244 
2245 	if (num)
2246 		be_cmd_modify_eqd(adapter, set_eqd, num);
2247 }
2248 
2249 static void be_rx_stats_update(struct be_rx_obj *rxo,
2250 			       struct be_rx_compl_info *rxcp)
2251 {
2252 	struct be_rx_stats *stats = rx_stats(rxo);
2253 
2254 	u64_stats_update_begin(&stats->sync);
2255 	stats->rx_compl++;
2256 	stats->rx_bytes += rxcp->pkt_size;
2257 	stats->rx_pkts++;
2258 	if (rxcp->tunneled)
2259 		stats->rx_vxlan_offload_pkts++;
2260 	if (rxcp->pkt_type == BE_MULTICAST_PACKET)
2261 		stats->rx_mcast_pkts++;
2262 	if (rxcp->err)
2263 		stats->rx_compl_err++;
2264 	u64_stats_update_end(&stats->sync);
2265 }
2266 
2267 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
2268 {
2269 	/* L4 checksum is not reliable for non TCP/UDP packets.
2270 	 * Also ignore ipcksm for ipv6 pkts
2271 	 */
2272 	return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
2273 		(rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
2274 }
2275 
2276 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
2277 {
2278 	struct be_adapter *adapter = rxo->adapter;
2279 	struct be_rx_page_info *rx_page_info;
2280 	struct be_queue_info *rxq = &rxo->q;
2281 	u32 frag_idx = rxq->tail;
2282 
2283 	rx_page_info = &rxo->page_info_tbl[frag_idx];
2284 	BUG_ON(!rx_page_info->page);
2285 
2286 	if (rx_page_info->last_frag) {
2287 		dma_unmap_page(&adapter->pdev->dev,
2288 			       dma_unmap_addr(rx_page_info, bus),
2289 			       adapter->big_page_size, DMA_FROM_DEVICE);
2290 		rx_page_info->last_frag = false;
2291 	} else {
2292 		dma_sync_single_for_cpu(&adapter->pdev->dev,
2293 					dma_unmap_addr(rx_page_info, bus),
2294 					rx_frag_size, DMA_FROM_DEVICE);
2295 	}
2296 
2297 	queue_tail_inc(rxq);
2298 	atomic_dec(&rxq->used);
2299 	return rx_page_info;
2300 }
2301 
2302 /* Throwaway the data in the Rx completion */
2303 static void be_rx_compl_discard(struct be_rx_obj *rxo,
2304 				struct be_rx_compl_info *rxcp)
2305 {
2306 	struct be_rx_page_info *page_info;
2307 	u16 i, num_rcvd = rxcp->num_rcvd;
2308 
2309 	for (i = 0; i < num_rcvd; i++) {
2310 		page_info = get_rx_page_info(rxo);
2311 		put_page(page_info->page);
2312 		memset(page_info, 0, sizeof(*page_info));
2313 	}
2314 }
2315 
2316 /*
2317  * skb_fill_rx_data forms a complete skb for an ether frame
2318  * indicated by rxcp.
2319  */
2320 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2321 			     struct be_rx_compl_info *rxcp)
2322 {
2323 	struct be_rx_page_info *page_info;
2324 	u16 i, j;
2325 	u16 hdr_len, curr_frag_len, remaining;
2326 	u8 *start;
2327 
2328 	page_info = get_rx_page_info(rxo);
2329 	start = page_address(page_info->page) + page_info->page_offset;
2330 	prefetch(start);
2331 
2332 	/* Copy data in the first descriptor of this completion */
2333 	curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
2334 
2335 	skb->len = curr_frag_len;
2336 	if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
2337 		memcpy(skb->data, start, curr_frag_len);
2338 		/* Complete packet has now been moved to data */
2339 		put_page(page_info->page);
2340 		skb->data_len = 0;
2341 		skb->tail += curr_frag_len;
2342 	} else {
2343 		hdr_len = ETH_HLEN;
2344 		memcpy(skb->data, start, hdr_len);
2345 		skb_shinfo(skb)->nr_frags = 1;
2346 		skb_frag_set_page(skb, 0, page_info->page);
2347 		skb_frag_off_set(&skb_shinfo(skb)->frags[0],
2348 				 page_info->page_offset + hdr_len);
2349 		skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2350 				  curr_frag_len - hdr_len);
2351 		skb->data_len = curr_frag_len - hdr_len;
2352 		skb->truesize += rx_frag_size;
2353 		skb->tail += hdr_len;
2354 	}
2355 	page_info->page = NULL;
2356 
2357 	if (rxcp->pkt_size <= rx_frag_size) {
2358 		BUG_ON(rxcp->num_rcvd != 1);
2359 		return;
2360 	}
2361 
2362 	/* More frags present for this completion */
2363 	remaining = rxcp->pkt_size - curr_frag_len;
2364 	for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
2365 		page_info = get_rx_page_info(rxo);
2366 		curr_frag_len = min(remaining, rx_frag_size);
2367 
2368 		/* Coalesce all frags from the same physical page in one slot */
2369 		if (page_info->page_offset == 0) {
2370 			/* Fresh page */
2371 			j++;
2372 			skb_frag_set_page(skb, j, page_info->page);
2373 			skb_frag_off_set(&skb_shinfo(skb)->frags[j],
2374 					 page_info->page_offset);
2375 			skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
2376 			skb_shinfo(skb)->nr_frags++;
2377 		} else {
2378 			put_page(page_info->page);
2379 		}
2380 
2381 		skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
2382 		skb->len += curr_frag_len;
2383 		skb->data_len += curr_frag_len;
2384 		skb->truesize += rx_frag_size;
2385 		remaining -= curr_frag_len;
2386 		page_info->page = NULL;
2387 	}
2388 	BUG_ON(j > MAX_SKB_FRAGS);
2389 }
2390 
2391 /* Process the RX completion indicated by rxcp when GRO is disabled */
2392 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
2393 				struct be_rx_compl_info *rxcp)
2394 {
2395 	struct be_adapter *adapter = rxo->adapter;
2396 	struct net_device *netdev = adapter->netdev;
2397 	struct sk_buff *skb;
2398 
2399 	skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
2400 	if (unlikely(!skb)) {
2401 		rx_stats(rxo)->rx_drops_no_skbs++;
2402 		be_rx_compl_discard(rxo, rxcp);
2403 		return;
2404 	}
2405 
2406 	skb_fill_rx_data(rxo, skb, rxcp);
2407 
2408 	if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
2409 		skb->ip_summed = CHECKSUM_UNNECESSARY;
2410 	else
2411 		skb_checksum_none_assert(skb);
2412 
2413 	skb->protocol = eth_type_trans(skb, netdev);
2414 	skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
2415 	if (netdev->features & NETIF_F_RXHASH)
2416 		skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
2417 
2418 	skb->csum_level = rxcp->tunneled;
2419 	skb_mark_napi_id(skb, napi);
2420 
2421 	if (rxcp->vlanf)
2422 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
2423 
2424 	netif_receive_skb(skb);
2425 }
2426 
2427 /* Process the RX completion indicated by rxcp when GRO is enabled */
2428 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2429 				    struct napi_struct *napi,
2430 				    struct be_rx_compl_info *rxcp)
2431 {
2432 	struct be_adapter *adapter = rxo->adapter;
2433 	struct be_rx_page_info *page_info;
2434 	struct sk_buff *skb = NULL;
2435 	u16 remaining, curr_frag_len;
2436 	u16 i, j;
2437 
2438 	skb = napi_get_frags(napi);
2439 	if (!skb) {
2440 		be_rx_compl_discard(rxo, rxcp);
2441 		return;
2442 	}
2443 
2444 	remaining = rxcp->pkt_size;
2445 	for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
2446 		page_info = get_rx_page_info(rxo);
2447 
2448 		curr_frag_len = min(remaining, rx_frag_size);
2449 
2450 		/* Coalesce all frags from the same physical page in one slot */
2451 		if (i == 0 || page_info->page_offset == 0) {
2452 			/* First frag or Fresh page */
2453 			j++;
2454 			skb_frag_set_page(skb, j, page_info->page);
2455 			skb_frag_off_set(&skb_shinfo(skb)->frags[j],
2456 					 page_info->page_offset);
2457 			skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
2458 		} else {
2459 			put_page(page_info->page);
2460 		}
2461 		skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
2462 		skb->truesize += rx_frag_size;
2463 		remaining -= curr_frag_len;
2464 		memset(page_info, 0, sizeof(*page_info));
2465 	}
2466 	BUG_ON(j > MAX_SKB_FRAGS);
2467 
2468 	skb_shinfo(skb)->nr_frags = j + 1;
2469 	skb->len = rxcp->pkt_size;
2470 	skb->data_len = rxcp->pkt_size;
2471 	skb->ip_summed = CHECKSUM_UNNECESSARY;
2472 	skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
2473 	if (adapter->netdev->features & NETIF_F_RXHASH)
2474 		skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
2475 
2476 	skb->csum_level = rxcp->tunneled;
2477 
2478 	if (rxcp->vlanf)
2479 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
2480 
2481 	napi_gro_frags(napi);
2482 }
2483 
2484 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2485 				 struct be_rx_compl_info *rxcp)
2486 {
2487 	rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2488 	rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2489 	rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2490 	rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2491 	rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2492 	rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2493 	rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2494 	rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2495 	rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2496 	rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2497 	rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
2498 	if (rxcp->vlanf) {
2499 		rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2500 		rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
2501 	}
2502 	rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
2503 	rxcp->tunneled =
2504 		GET_RX_COMPL_V1_BITS(tunneled, compl);
2505 }
2506 
2507 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2508 				 struct be_rx_compl_info *rxcp)
2509 {
2510 	rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2511 	rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2512 	rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2513 	rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2514 	rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2515 	rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2516 	rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2517 	rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2518 	rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2519 	rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2520 	rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
2521 	if (rxcp->vlanf) {
2522 		rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2523 		rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
2524 	}
2525 	rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2526 	rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2527 }
2528 
2529 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2530 {
2531 	struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2532 	struct be_rx_compl_info *rxcp = &rxo->rxcp;
2533 	struct be_adapter *adapter = rxo->adapter;
2534 
2535 	/* For checking the valid bit it is Ok to use either definition as the
2536 	 * valid bit is at the same position in both v0 and v1 Rx compl */
2537 	if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2538 		return NULL;
2539 
2540 	rmb();
2541 	be_dws_le_to_cpu(compl, sizeof(*compl));
2542 
2543 	if (adapter->be3_native)
2544 		be_parse_rx_compl_v1(compl, rxcp);
2545 	else
2546 		be_parse_rx_compl_v0(compl, rxcp);
2547 
2548 	if (rxcp->ip_frag)
2549 		rxcp->l4_csum = 0;
2550 
2551 	if (rxcp->vlanf) {
2552 		/* In QNQ modes, if qnq bit is not set, then the packet was
2553 		 * tagged only with the transparent outer vlan-tag and must
2554 		 * not be treated as a vlan packet by host
2555 		 */
2556 		if (be_is_qnq_mode(adapter) && !rxcp->qnq)
2557 			rxcp->vlanf = 0;
2558 
2559 		if (!lancer_chip(adapter))
2560 			rxcp->vlan_tag = swab16(rxcp->vlan_tag);
2561 
2562 		if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
2563 		    !test_bit(rxcp->vlan_tag, adapter->vids))
2564 			rxcp->vlanf = 0;
2565 	}
2566 
2567 	/* As the compl has been parsed, reset it; we wont touch it again */
2568 	compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
2569 
2570 	queue_tail_inc(&rxo->cq);
2571 	return rxcp;
2572 }
2573 
2574 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
2575 {
2576 	u32 order = get_order(size);
2577 
2578 	if (order > 0)
2579 		gfp |= __GFP_COMP;
2580 	return  alloc_pages(gfp, order);
2581 }
2582 
2583 /*
2584  * Allocate a page, split it to fragments of size rx_frag_size and post as
2585  * receive buffers to BE
2586  */
2587 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
2588 {
2589 	struct be_adapter *adapter = rxo->adapter;
2590 	struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
2591 	struct be_queue_info *rxq = &rxo->q;
2592 	struct page *pagep = NULL;
2593 	struct device *dev = &adapter->pdev->dev;
2594 	struct be_eth_rx_d *rxd;
2595 	u64 page_dmaaddr = 0, frag_dmaaddr;
2596 	u32 posted, page_offset = 0, notify = 0;
2597 
2598 	page_info = &rxo->page_info_tbl[rxq->head];
2599 	for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
2600 		if (!pagep) {
2601 			pagep = be_alloc_pages(adapter->big_page_size, gfp);
2602 			if (unlikely(!pagep)) {
2603 				rx_stats(rxo)->rx_post_fail++;
2604 				break;
2605 			}
2606 			page_dmaaddr = dma_map_page(dev, pagep, 0,
2607 						    adapter->big_page_size,
2608 						    DMA_FROM_DEVICE);
2609 			if (dma_mapping_error(dev, page_dmaaddr)) {
2610 				put_page(pagep);
2611 				pagep = NULL;
2612 				adapter->drv_stats.dma_map_errors++;
2613 				break;
2614 			}
2615 			page_offset = 0;
2616 		} else {
2617 			get_page(pagep);
2618 			page_offset += rx_frag_size;
2619 		}
2620 		page_info->page_offset = page_offset;
2621 		page_info->page = pagep;
2622 
2623 		rxd = queue_head_node(rxq);
2624 		frag_dmaaddr = page_dmaaddr + page_info->page_offset;
2625 		rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2626 		rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
2627 
2628 		/* Any space left in the current big page for another frag? */
2629 		if ((page_offset + rx_frag_size + rx_frag_size) >
2630 					adapter->big_page_size) {
2631 			pagep = NULL;
2632 			page_info->last_frag = true;
2633 			dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2634 		} else {
2635 			dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
2636 		}
2637 
2638 		prev_page_info = page_info;
2639 		queue_head_inc(rxq);
2640 		page_info = &rxo->page_info_tbl[rxq->head];
2641 	}
2642 
2643 	/* Mark the last frag of a page when we break out of the above loop
2644 	 * with no more slots available in the RXQ
2645 	 */
2646 	if (pagep) {
2647 		prev_page_info->last_frag = true;
2648 		dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2649 	}
2650 
2651 	if (posted) {
2652 		atomic_add(posted, &rxq->used);
2653 		if (rxo->rx_post_starved)
2654 			rxo->rx_post_starved = false;
2655 		do {
2656 			notify = min(MAX_NUM_POST_ERX_DB, posted);
2657 			be_rxq_notify(adapter, rxq->id, notify);
2658 			posted -= notify;
2659 		} while (posted);
2660 	} else if (atomic_read(&rxq->used) == 0) {
2661 		/* Let be_worker replenish when memory is available */
2662 		rxo->rx_post_starved = true;
2663 	}
2664 }
2665 
2666 static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
2667 {
2668 	switch (status) {
2669 	case BE_TX_COMP_HDR_PARSE_ERR:
2670 		tx_stats(txo)->tx_hdr_parse_err++;
2671 		break;
2672 	case BE_TX_COMP_NDMA_ERR:
2673 		tx_stats(txo)->tx_dma_err++;
2674 		break;
2675 	case BE_TX_COMP_ACL_ERR:
2676 		tx_stats(txo)->tx_spoof_check_err++;
2677 		break;
2678 	}
2679 }
2680 
2681 static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
2682 {
2683 	switch (status) {
2684 	case LANCER_TX_COMP_LSO_ERR:
2685 		tx_stats(txo)->tx_tso_err++;
2686 		break;
2687 	case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2688 	case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2689 		tx_stats(txo)->tx_spoof_check_err++;
2690 		break;
2691 	case LANCER_TX_COMP_QINQ_ERR:
2692 		tx_stats(txo)->tx_qinq_err++;
2693 		break;
2694 	case LANCER_TX_COMP_PARITY_ERR:
2695 		tx_stats(txo)->tx_internal_parity_err++;
2696 		break;
2697 	case LANCER_TX_COMP_DMA_ERR:
2698 		tx_stats(txo)->tx_dma_err++;
2699 		break;
2700 	case LANCER_TX_COMP_SGE_ERR:
2701 		tx_stats(txo)->tx_sge_err++;
2702 		break;
2703 	}
2704 }
2705 
2706 static struct be_tx_compl_info *be_tx_compl_get(struct be_adapter *adapter,
2707 						struct be_tx_obj *txo)
2708 {
2709 	struct be_queue_info *tx_cq = &txo->cq;
2710 	struct be_tx_compl_info *txcp = &txo->txcp;
2711 	struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
2712 
2713 	if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
2714 		return NULL;
2715 
2716 	/* Ensure load ordering of valid bit dword and other dwords below */
2717 	rmb();
2718 	be_dws_le_to_cpu(compl, sizeof(*compl));
2719 
2720 	txcp->status = GET_TX_COMPL_BITS(status, compl);
2721 	txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
2722 
2723 	if (txcp->status) {
2724 		if (lancer_chip(adapter)) {
2725 			lancer_update_tx_err(txo, txcp->status);
2726 			/* Reset the adapter incase of TSO,
2727 			 * SGE or Parity error
2728 			 */
2729 			if (txcp->status == LANCER_TX_COMP_LSO_ERR ||
2730 			    txcp->status == LANCER_TX_COMP_PARITY_ERR ||
2731 			    txcp->status == LANCER_TX_COMP_SGE_ERR)
2732 				be_set_error(adapter, BE_ERROR_TX);
2733 		} else {
2734 			be_update_tx_err(txo, txcp->status);
2735 		}
2736 	}
2737 
2738 	if (be_check_error(adapter, BE_ERROR_TX))
2739 		return NULL;
2740 
2741 	compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
2742 	queue_tail_inc(tx_cq);
2743 	return txcp;
2744 }
2745 
2746 static u16 be_tx_compl_process(struct be_adapter *adapter,
2747 			       struct be_tx_obj *txo, u16 last_index)
2748 {
2749 	struct sk_buff **sent_skbs = txo->sent_skb_list;
2750 	struct be_queue_info *txq = &txo->q;
2751 	struct sk_buff *skb = NULL;
2752 	bool unmap_skb_hdr = false;
2753 	struct be_eth_wrb *wrb;
2754 	u16 num_wrbs = 0;
2755 	u32 frag_index;
2756 
2757 	do {
2758 		if (sent_skbs[txq->tail]) {
2759 			/* Free skb from prev req */
2760 			if (skb)
2761 				dev_consume_skb_any(skb);
2762 			skb = sent_skbs[txq->tail];
2763 			sent_skbs[txq->tail] = NULL;
2764 			queue_tail_inc(txq);  /* skip hdr wrb */
2765 			num_wrbs++;
2766 			unmap_skb_hdr = true;
2767 		}
2768 		wrb = queue_tail_node(txq);
2769 		frag_index = txq->tail;
2770 		unmap_tx_frag(&adapter->pdev->dev, wrb,
2771 			      (unmap_skb_hdr && skb_headlen(skb)));
2772 		unmap_skb_hdr = false;
2773 		queue_tail_inc(txq);
2774 		num_wrbs++;
2775 	} while (frag_index != last_index);
2776 	dev_consume_skb_any(skb);
2777 
2778 	return num_wrbs;
2779 }
2780 
2781 /* Return the number of events in the event queue */
2782 static inline int events_get(struct be_eq_obj *eqo)
2783 {
2784 	struct be_eq_entry *eqe;
2785 	int num = 0;
2786 
2787 	do {
2788 		eqe = queue_tail_node(&eqo->q);
2789 		if (eqe->evt == 0)
2790 			break;
2791 
2792 		rmb();
2793 		eqe->evt = 0;
2794 		num++;
2795 		queue_tail_inc(&eqo->q);
2796 	} while (true);
2797 
2798 	return num;
2799 }
2800 
2801 /* Leaves the EQ is disarmed state */
2802 static void be_eq_clean(struct be_eq_obj *eqo)
2803 {
2804 	int num = events_get(eqo);
2805 
2806 	be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
2807 }
2808 
2809 /* Free posted rx buffers that were not used */
2810 static void be_rxq_clean(struct be_rx_obj *rxo)
2811 {
2812 	struct be_queue_info *rxq = &rxo->q;
2813 	struct be_rx_page_info *page_info;
2814 
2815 	while (atomic_read(&rxq->used) > 0) {
2816 		page_info = get_rx_page_info(rxo);
2817 		put_page(page_info->page);
2818 		memset(page_info, 0, sizeof(*page_info));
2819 	}
2820 	BUG_ON(atomic_read(&rxq->used));
2821 	rxq->tail = 0;
2822 	rxq->head = 0;
2823 }
2824 
2825 static void be_rx_cq_clean(struct be_rx_obj *rxo)
2826 {
2827 	struct be_queue_info *rx_cq = &rxo->cq;
2828 	struct be_rx_compl_info *rxcp;
2829 	struct be_adapter *adapter = rxo->adapter;
2830 	int flush_wait = 0;
2831 
2832 	/* Consume pending rx completions.
2833 	 * Wait for the flush completion (identified by zero num_rcvd)
2834 	 * to arrive. Notify CQ even when there are no more CQ entries
2835 	 * for HW to flush partially coalesced CQ entries.
2836 	 * In Lancer, there is no need to wait for flush compl.
2837 	 */
2838 	for (;;) {
2839 		rxcp = be_rx_compl_get(rxo);
2840 		if (!rxcp) {
2841 			if (lancer_chip(adapter))
2842 				break;
2843 
2844 			if (flush_wait++ > 50 ||
2845 			    be_check_error(adapter,
2846 					   BE_ERROR_HW)) {
2847 				dev_warn(&adapter->pdev->dev,
2848 					 "did not receive flush compl\n");
2849 				break;
2850 			}
2851 			be_cq_notify(adapter, rx_cq->id, true, 0);
2852 			mdelay(1);
2853 		} else {
2854 			be_rx_compl_discard(rxo, rxcp);
2855 			be_cq_notify(adapter, rx_cq->id, false, 1);
2856 			if (rxcp->num_rcvd == 0)
2857 				break;
2858 		}
2859 	}
2860 
2861 	/* After cleanup, leave the CQ in unarmed state */
2862 	be_cq_notify(adapter, rx_cq->id, false, 0);
2863 }
2864 
2865 static void be_tx_compl_clean(struct be_adapter *adapter)
2866 {
2867 	struct device *dev = &adapter->pdev->dev;
2868 	u16 cmpl = 0, timeo = 0, num_wrbs = 0;
2869 	struct be_tx_compl_info *txcp;
2870 	struct be_queue_info *txq;
2871 	u32 end_idx, notified_idx;
2872 	struct be_tx_obj *txo;
2873 	int i, pending_txqs;
2874 
2875 	/* Stop polling for compls when HW has been silent for 10ms */
2876 	do {
2877 		pending_txqs = adapter->num_tx_qs;
2878 
2879 		for_all_tx_queues(adapter, txo, i) {
2880 			cmpl = 0;
2881 			num_wrbs = 0;
2882 			txq = &txo->q;
2883 			while ((txcp = be_tx_compl_get(adapter, txo))) {
2884 				num_wrbs +=
2885 					be_tx_compl_process(adapter, txo,
2886 							    txcp->end_index);
2887 				cmpl++;
2888 			}
2889 			if (cmpl) {
2890 				be_cq_notify(adapter, txo->cq.id, false, cmpl);
2891 				atomic_sub(num_wrbs, &txq->used);
2892 				timeo = 0;
2893 			}
2894 			if (!be_is_tx_compl_pending(txo))
2895 				pending_txqs--;
2896 		}
2897 
2898 		if (pending_txqs == 0 || ++timeo > 10 ||
2899 		    be_check_error(adapter, BE_ERROR_HW))
2900 			break;
2901 
2902 		mdelay(1);
2903 	} while (true);
2904 
2905 	/* Free enqueued TX that was never notified to HW */
2906 	for_all_tx_queues(adapter, txo, i) {
2907 		txq = &txo->q;
2908 
2909 		if (atomic_read(&txq->used)) {
2910 			dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2911 				 i, atomic_read(&txq->used));
2912 			notified_idx = txq->tail;
2913 			end_idx = txq->tail;
2914 			index_adv(&end_idx, atomic_read(&txq->used) - 1,
2915 				  txq->len);
2916 			/* Use the tx-compl process logic to handle requests
2917 			 * that were not sent to the HW.
2918 			 */
2919 			num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2920 			atomic_sub(num_wrbs, &txq->used);
2921 			BUG_ON(atomic_read(&txq->used));
2922 			txo->pend_wrb_cnt = 0;
2923 			/* Since hw was never notified of these requests,
2924 			 * reset TXQ indices
2925 			 */
2926 			txq->head = notified_idx;
2927 			txq->tail = notified_idx;
2928 		}
2929 	}
2930 }
2931 
2932 static void be_evt_queues_destroy(struct be_adapter *adapter)
2933 {
2934 	struct be_eq_obj *eqo;
2935 	int i;
2936 
2937 	for_all_evt_queues(adapter, eqo, i) {
2938 		if (eqo->q.created) {
2939 			be_eq_clean(eqo);
2940 			be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2941 			netif_napi_del(&eqo->napi);
2942 			free_cpumask_var(eqo->affinity_mask);
2943 		}
2944 		be_queue_free(adapter, &eqo->q);
2945 	}
2946 }
2947 
2948 static int be_evt_queues_create(struct be_adapter *adapter)
2949 {
2950 	struct be_queue_info *eq;
2951 	struct be_eq_obj *eqo;
2952 	struct be_aic_obj *aic;
2953 	int i, rc;
2954 
2955 	/* need enough EQs to service both RX and TX queues */
2956 	adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2957 				    max(adapter->cfg_num_rx_irqs,
2958 					adapter->cfg_num_tx_irqs));
2959 
2960 	adapter->aic_enabled = true;
2961 
2962 	for_all_evt_queues(adapter, eqo, i) {
2963 		int numa_node = dev_to_node(&adapter->pdev->dev);
2964 
2965 		aic = &adapter->aic_obj[i];
2966 		eqo->adapter = adapter;
2967 		eqo->idx = i;
2968 		aic->max_eqd = BE_MAX_EQD;
2969 
2970 		eq = &eqo->q;
2971 		rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2972 				    sizeof(struct be_eq_entry));
2973 		if (rc)
2974 			return rc;
2975 
2976 		rc = be_cmd_eq_create(adapter, eqo);
2977 		if (rc)
2978 			return rc;
2979 
2980 		if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2981 			return -ENOMEM;
2982 		cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2983 				eqo->affinity_mask);
2984 		netif_napi_add(adapter->netdev, &eqo->napi, be_poll);
2985 	}
2986 	return 0;
2987 }
2988 
2989 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2990 {
2991 	struct be_queue_info *q;
2992 
2993 	q = &adapter->mcc_obj.q;
2994 	if (q->created)
2995 		be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2996 	be_queue_free(adapter, q);
2997 
2998 	q = &adapter->mcc_obj.cq;
2999 	if (q->created)
3000 		be_cmd_q_destroy(adapter, q, QTYPE_CQ);
3001 	be_queue_free(adapter, q);
3002 }
3003 
3004 /* Must be called only after TX qs are created as MCC shares TX EQ */
3005 static int be_mcc_queues_create(struct be_adapter *adapter)
3006 {
3007 	struct be_queue_info *q, *cq;
3008 
3009 	cq = &adapter->mcc_obj.cq;
3010 	if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
3011 			   sizeof(struct be_mcc_compl)))
3012 		goto err;
3013 
3014 	/* Use the default EQ for MCC completions */
3015 	if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
3016 		goto mcc_cq_free;
3017 
3018 	q = &adapter->mcc_obj.q;
3019 	if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
3020 		goto mcc_cq_destroy;
3021 
3022 	if (be_cmd_mccq_create(adapter, q, cq))
3023 		goto mcc_q_free;
3024 
3025 	return 0;
3026 
3027 mcc_q_free:
3028 	be_queue_free(adapter, q);
3029 mcc_cq_destroy:
3030 	be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
3031 mcc_cq_free:
3032 	be_queue_free(adapter, cq);
3033 err:
3034 	return -1;
3035 }
3036 
3037 static void be_tx_queues_destroy(struct be_adapter *adapter)
3038 {
3039 	struct be_queue_info *q;
3040 	struct be_tx_obj *txo;
3041 	u8 i;
3042 
3043 	for_all_tx_queues(adapter, txo, i) {
3044 		q = &txo->q;
3045 		if (q->created)
3046 			be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
3047 		be_queue_free(adapter, q);
3048 
3049 		q = &txo->cq;
3050 		if (q->created)
3051 			be_cmd_q_destroy(adapter, q, QTYPE_CQ);
3052 		be_queue_free(adapter, q);
3053 	}
3054 }
3055 
3056 static int be_tx_qs_create(struct be_adapter *adapter)
3057 {
3058 	struct be_queue_info *cq;
3059 	struct be_tx_obj *txo;
3060 	struct be_eq_obj *eqo;
3061 	int status, i;
3062 
3063 	adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
3064 
3065 	for_all_tx_queues(adapter, txo, i) {
3066 		cq = &txo->cq;
3067 		status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
3068 					sizeof(struct be_eth_tx_compl));
3069 		if (status)
3070 			return status;
3071 
3072 		u64_stats_init(&txo->stats.sync);
3073 		u64_stats_init(&txo->stats.sync_compl);
3074 
3075 		/* If num_evt_qs is less than num_tx_qs, then more than
3076 		 * one txq share an eq
3077 		 */
3078 		eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
3079 		status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
3080 		if (status)
3081 			return status;
3082 
3083 		status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
3084 					sizeof(struct be_eth_wrb));
3085 		if (status)
3086 			return status;
3087 
3088 		status = be_cmd_txq_create(adapter, txo);
3089 		if (status)
3090 			return status;
3091 
3092 		netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
3093 				    eqo->idx);
3094 	}
3095 
3096 	dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
3097 		 adapter->num_tx_qs);
3098 	return 0;
3099 }
3100 
3101 static void be_rx_cqs_destroy(struct be_adapter *adapter)
3102 {
3103 	struct be_queue_info *q;
3104 	struct be_rx_obj *rxo;
3105 	int i;
3106 
3107 	for_all_rx_queues(adapter, rxo, i) {
3108 		q = &rxo->cq;
3109 		if (q->created)
3110 			be_cmd_q_destroy(adapter, q, QTYPE_CQ);
3111 		be_queue_free(adapter, q);
3112 	}
3113 }
3114 
3115 static int be_rx_cqs_create(struct be_adapter *adapter)
3116 {
3117 	struct be_queue_info *eq, *cq;
3118 	struct be_rx_obj *rxo;
3119 	int rc, i;
3120 
3121 	adapter->num_rss_qs =
3122 			min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
3123 
3124 	/* We'll use RSS only if atleast 2 RSS rings are supported. */
3125 	if (adapter->num_rss_qs < 2)
3126 		adapter->num_rss_qs = 0;
3127 
3128 	adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
3129 
3130 	/* When the interface is not capable of RSS rings (and there is no
3131 	 * need to create a default RXQ) we'll still need one RXQ
3132 	 */
3133 	if (adapter->num_rx_qs == 0)
3134 		adapter->num_rx_qs = 1;
3135 
3136 	adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3137 	for_all_rx_queues(adapter, rxo, i) {
3138 		rxo->adapter = adapter;
3139 		cq = &rxo->cq;
3140 		rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
3141 				    sizeof(struct be_eth_rx_compl));
3142 		if (rc)
3143 			return rc;
3144 
3145 		u64_stats_init(&rxo->stats.sync);
3146 		eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
3147 		rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3148 		if (rc)
3149 			return rc;
3150 	}
3151 
3152 	dev_info(&adapter->pdev->dev,
3153 		 "created %d RX queue(s)\n", adapter->num_rx_qs);
3154 	return 0;
3155 }
3156 
3157 static irqreturn_t be_intx(int irq, void *dev)
3158 {
3159 	struct be_eq_obj *eqo = dev;
3160 	struct be_adapter *adapter = eqo->adapter;
3161 	int num_evts = 0;
3162 
3163 	/* IRQ is not expected when NAPI is scheduled as the EQ
3164 	 * will not be armed.
3165 	 * But, this can happen on Lancer INTx where it takes
3166 	 * a while to de-assert INTx or in BE2 where occasionaly
3167 	 * an interrupt may be raised even when EQ is unarmed.
3168 	 * If NAPI is already scheduled, then counting & notifying
3169 	 * events will orphan them.
3170 	 */
3171 	if (napi_schedule_prep(&eqo->napi)) {
3172 		num_evts = events_get(eqo);
3173 		__napi_schedule(&eqo->napi);
3174 		if (num_evts)
3175 			eqo->spurious_intr = 0;
3176 	}
3177 	be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
3178 
3179 	/* Return IRQ_HANDLED only for the first spurious intr
3180 	 * after a valid intr to stop the kernel from branding
3181 	 * this irq as a bad one!
3182 	 */
3183 	if (num_evts || eqo->spurious_intr++ == 0)
3184 		return IRQ_HANDLED;
3185 	else
3186 		return IRQ_NONE;
3187 }
3188 
3189 static irqreturn_t be_msix(int irq, void *dev)
3190 {
3191 	struct be_eq_obj *eqo = dev;
3192 
3193 	be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
3194 	napi_schedule(&eqo->napi);
3195 	return IRQ_HANDLED;
3196 }
3197 
3198 static inline bool do_gro(struct be_rx_compl_info *rxcp)
3199 {
3200 	return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
3201 }
3202 
3203 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
3204 			 int budget)
3205 {
3206 	struct be_adapter *adapter = rxo->adapter;
3207 	struct be_queue_info *rx_cq = &rxo->cq;
3208 	struct be_rx_compl_info *rxcp;
3209 	u32 work_done;
3210 	u32 frags_consumed = 0;
3211 
3212 	for (work_done = 0; work_done < budget; work_done++) {
3213 		rxcp = be_rx_compl_get(rxo);
3214 		if (!rxcp)
3215 			break;
3216 
3217 		/* Is it a flush compl that has no data */
3218 		if (unlikely(rxcp->num_rcvd == 0))
3219 			goto loop_continue;
3220 
3221 		/* Discard compl with partial DMA Lancer B0 */
3222 		if (unlikely(!rxcp->pkt_size)) {
3223 			be_rx_compl_discard(rxo, rxcp);
3224 			goto loop_continue;
3225 		}
3226 
3227 		/* On BE drop pkts that arrive due to imperfect filtering in
3228 		 * promiscuous mode on some skews
3229 		 */
3230 		if (unlikely(rxcp->port != adapter->port_num &&
3231 			     !lancer_chip(adapter))) {
3232 			be_rx_compl_discard(rxo, rxcp);
3233 			goto loop_continue;
3234 		}
3235 
3236 		if (do_gro(rxcp))
3237 			be_rx_compl_process_gro(rxo, napi, rxcp);
3238 		else
3239 			be_rx_compl_process(rxo, napi, rxcp);
3240 
3241 loop_continue:
3242 		frags_consumed += rxcp->num_rcvd;
3243 		be_rx_stats_update(rxo, rxcp);
3244 	}
3245 
3246 	if (work_done) {
3247 		be_cq_notify(adapter, rx_cq->id, true, work_done);
3248 
3249 		/* When an rx-obj gets into post_starved state, just
3250 		 * let be_worker do the posting.
3251 		 */
3252 		if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
3253 		    !rxo->rx_post_starved)
3254 			be_post_rx_frags(rxo, GFP_ATOMIC,
3255 					 max_t(u32, MAX_RX_POST,
3256 					       frags_consumed));
3257 	}
3258 
3259 	return work_done;
3260 }
3261 
3262 
3263 static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
3264 			  int idx)
3265 {
3266 	int num_wrbs = 0, work_done = 0;
3267 	struct be_tx_compl_info *txcp;
3268 
3269 	while ((txcp = be_tx_compl_get(adapter, txo))) {
3270 		num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
3271 		work_done++;
3272 	}
3273 
3274 	if (work_done) {
3275 		be_cq_notify(adapter, txo->cq.id, true, work_done);
3276 		atomic_sub(num_wrbs, &txo->q.used);
3277 
3278 		/* As Tx wrbs have been freed up, wake up netdev queue
3279 		 * if it was stopped due to lack of tx wrbs.  */
3280 		if (__netif_subqueue_stopped(adapter->netdev, idx) &&
3281 		    be_can_txq_wake(txo)) {
3282 			netif_wake_subqueue(adapter->netdev, idx);
3283 		}
3284 
3285 		u64_stats_update_begin(&tx_stats(txo)->sync_compl);
3286 		tx_stats(txo)->tx_compl += work_done;
3287 		u64_stats_update_end(&tx_stats(txo)->sync_compl);
3288 	}
3289 }
3290 
3291 int be_poll(struct napi_struct *napi, int budget)
3292 {
3293 	struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3294 	struct be_adapter *adapter = eqo->adapter;
3295 	int max_work = 0, work, i, num_evts;
3296 	struct be_rx_obj *rxo;
3297 	struct be_tx_obj *txo;
3298 	u32 mult_enc = 0;
3299 
3300 	num_evts = events_get(eqo);
3301 
3302 	for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3303 		be_process_tx(adapter, txo, i);
3304 
3305 	/* This loop will iterate twice for EQ0 in which
3306 	 * completions of the last RXQ (default one) are also processed
3307 	 * For other EQs the loop iterates only once
3308 	 */
3309 	for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3310 		work = be_process_rx(rxo, napi, budget);
3311 		max_work = max(work, max_work);
3312 	}
3313 
3314 	if (is_mcc_eqo(eqo))
3315 		be_process_mcc(adapter);
3316 
3317 	if (max_work < budget) {
3318 		napi_complete_done(napi, max_work);
3319 
3320 		/* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3321 		 * delay via a delay multiplier encoding value
3322 		 */
3323 		if (skyhawk_chip(adapter))
3324 			mult_enc = be_get_eq_delay_mult_enc(eqo);
3325 
3326 		be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3327 			     mult_enc);
3328 	} else {
3329 		/* As we'll continue in polling mode, count and clear events */
3330 		be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
3331 	}
3332 	return max_work;
3333 }
3334 
3335 void be_detect_error(struct be_adapter *adapter)
3336 {
3337 	u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3338 	u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
3339 	struct device *dev = &adapter->pdev->dev;
3340 	u16 val;
3341 	u32 i;
3342 
3343 	if (be_check_error(adapter, BE_ERROR_HW))
3344 		return;
3345 
3346 	if (lancer_chip(adapter)) {
3347 		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3348 		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3349 			be_set_error(adapter, BE_ERROR_UE);
3350 			sliport_err1 = ioread32(adapter->db +
3351 						SLIPORT_ERROR1_OFFSET);
3352 			sliport_err2 = ioread32(adapter->db +
3353 						SLIPORT_ERROR2_OFFSET);
3354 			/* Do not log error messages if its a FW reset */
3355 			if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3356 			    sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3357 				dev_info(dev, "Reset is in progress\n");
3358 			} else {
3359 				dev_err(dev, "Error detected in the card\n");
3360 				dev_err(dev, "ERR: sliport status 0x%x\n",
3361 					sliport_status);
3362 				dev_err(dev, "ERR: sliport error1 0x%x\n",
3363 					sliport_err1);
3364 				dev_err(dev, "ERR: sliport error2 0x%x\n",
3365 					sliport_err2);
3366 			}
3367 		}
3368 	} else {
3369 		ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3370 		ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3371 		ue_lo_mask = ioread32(adapter->pcicfg +
3372 				      PCICFG_UE_STATUS_LOW_MASK);
3373 		ue_hi_mask = ioread32(adapter->pcicfg +
3374 				      PCICFG_UE_STATUS_HI_MASK);
3375 
3376 		ue_lo = (ue_lo & ~ue_lo_mask);
3377 		ue_hi = (ue_hi & ~ue_hi_mask);
3378 
3379 		if (ue_lo || ue_hi) {
3380 			/* On certain platforms BE3 hardware can indicate
3381 			 * spurious UEs. In case of a UE in the chip,
3382 			 * the POST register correctly reports either a
3383 			 * FAT_LOG_START state (FW is currently dumping
3384 			 * FAT log data) or a ARMFW_UE state. Check for the
3385 			 * above states to ascertain if the UE is valid or not.
3386 			 */
3387 			if (BE3_chip(adapter)) {
3388 				val = be_POST_stage_get(adapter);
3389 				if ((val & POST_STAGE_FAT_LOG_START)
3390 				     != POST_STAGE_FAT_LOG_START &&
3391 				    (val & POST_STAGE_ARMFW_UE)
3392 				     != POST_STAGE_ARMFW_UE &&
3393 				    (val & POST_STAGE_RECOVERABLE_ERR)
3394 				     != POST_STAGE_RECOVERABLE_ERR)
3395 					return;
3396 			}
3397 
3398 			dev_err(dev, "Error detected in the adapter");
3399 			be_set_error(adapter, BE_ERROR_UE);
3400 
3401 			for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3402 				if (ue_lo & 1)
3403 					dev_err(dev, "UE: %s bit set\n",
3404 						ue_status_low_desc[i]);
3405 			}
3406 			for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3407 				if (ue_hi & 1)
3408 					dev_err(dev, "UE: %s bit set\n",
3409 						ue_status_hi_desc[i]);
3410 			}
3411 		}
3412 	}
3413 }
3414 
3415 static void be_msix_disable(struct be_adapter *adapter)
3416 {
3417 	if (msix_enabled(adapter)) {
3418 		pci_disable_msix(adapter->pdev);
3419 		adapter->num_msix_vec = 0;
3420 		adapter->num_msix_roce_vec = 0;
3421 	}
3422 }
3423 
3424 static int be_msix_enable(struct be_adapter *adapter)
3425 {
3426 	unsigned int i, max_roce_eqs;
3427 	struct device *dev = &adapter->pdev->dev;
3428 	int num_vec;
3429 
3430 	/* If RoCE is supported, program the max number of vectors that
3431 	 * could be used for NIC and RoCE, else, just program the number
3432 	 * we'll use initially.
3433 	 */
3434 	if (be_roce_supported(adapter)) {
3435 		max_roce_eqs =
3436 			be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
3437 		max_roce_eqs = min(max_roce_eqs, num_online_cpus());
3438 		num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
3439 	} else {
3440 		num_vec = max(adapter->cfg_num_rx_irqs,
3441 			      adapter->cfg_num_tx_irqs);
3442 	}
3443 
3444 	for (i = 0; i < num_vec; i++)
3445 		adapter->msix_entries[i].entry = i;
3446 
3447 	num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3448 					MIN_MSIX_VECTORS, num_vec);
3449 	if (num_vec < 0)
3450 		goto fail;
3451 
3452 	if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3453 		adapter->num_msix_roce_vec = num_vec / 2;
3454 		dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3455 			 adapter->num_msix_roce_vec);
3456 	}
3457 
3458 	adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3459 
3460 	dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3461 		 adapter->num_msix_vec);
3462 	return 0;
3463 
3464 fail:
3465 	dev_warn(dev, "MSIx enable failed\n");
3466 
3467 	/* INTx is not supported in VFs, so fail probe if enable_msix fails */
3468 	if (be_virtfn(adapter))
3469 		return num_vec;
3470 	return 0;
3471 }
3472 
3473 static inline int be_msix_vec_get(struct be_adapter *adapter,
3474 				  struct be_eq_obj *eqo)
3475 {
3476 	return adapter->msix_entries[eqo->msix_idx].vector;
3477 }
3478 
3479 static int be_msix_register(struct be_adapter *adapter)
3480 {
3481 	struct net_device *netdev = adapter->netdev;
3482 	struct be_eq_obj *eqo;
3483 	int status, i, vec;
3484 
3485 	for_all_evt_queues(adapter, eqo, i) {
3486 		sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3487 		vec = be_msix_vec_get(adapter, eqo);
3488 		status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3489 		if (status)
3490 			goto err_msix;
3491 
3492 		irq_update_affinity_hint(vec, eqo->affinity_mask);
3493 	}
3494 
3495 	return 0;
3496 err_msix:
3497 	for (i--; i >= 0; i--) {
3498 		eqo = &adapter->eq_obj[i];
3499 		free_irq(be_msix_vec_get(adapter, eqo), eqo);
3500 	}
3501 	dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
3502 		 status);
3503 	be_msix_disable(adapter);
3504 	return status;
3505 }
3506 
3507 static int be_irq_register(struct be_adapter *adapter)
3508 {
3509 	struct net_device *netdev = adapter->netdev;
3510 	int status;
3511 
3512 	if (msix_enabled(adapter)) {
3513 		status = be_msix_register(adapter);
3514 		if (status == 0)
3515 			goto done;
3516 		/* INTx is not supported for VF */
3517 		if (be_virtfn(adapter))
3518 			return status;
3519 	}
3520 
3521 	/* INTx: only the first EQ is used */
3522 	netdev->irq = adapter->pdev->irq;
3523 	status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
3524 			     &adapter->eq_obj[0]);
3525 	if (status) {
3526 		dev_err(&adapter->pdev->dev,
3527 			"INTx request IRQ failed - err %d\n", status);
3528 		return status;
3529 	}
3530 done:
3531 	adapter->isr_registered = true;
3532 	return 0;
3533 }
3534 
3535 static void be_irq_unregister(struct be_adapter *adapter)
3536 {
3537 	struct net_device *netdev = adapter->netdev;
3538 	struct be_eq_obj *eqo;
3539 	int i, vec;
3540 
3541 	if (!adapter->isr_registered)
3542 		return;
3543 
3544 	/* INTx */
3545 	if (!msix_enabled(adapter)) {
3546 		free_irq(netdev->irq, &adapter->eq_obj[0]);
3547 		goto done;
3548 	}
3549 
3550 	/* MSIx */
3551 	for_all_evt_queues(adapter, eqo, i) {
3552 		vec = be_msix_vec_get(adapter, eqo);
3553 		irq_update_affinity_hint(vec, NULL);
3554 		free_irq(vec, eqo);
3555 	}
3556 
3557 done:
3558 	adapter->isr_registered = false;
3559 }
3560 
3561 static void be_rx_qs_destroy(struct be_adapter *adapter)
3562 {
3563 	struct rss_info *rss = &adapter->rss_info;
3564 	struct be_queue_info *q;
3565 	struct be_rx_obj *rxo;
3566 	int i;
3567 
3568 	for_all_rx_queues(adapter, rxo, i) {
3569 		q = &rxo->q;
3570 		if (q->created) {
3571 			/* If RXQs are destroyed while in an "out of buffer"
3572 			 * state, there is a possibility of an HW stall on
3573 			 * Lancer. So, post 64 buffers to each queue to relieve
3574 			 * the "out of buffer" condition.
3575 			 * Make sure there's space in the RXQ before posting.
3576 			 */
3577 			if (lancer_chip(adapter)) {
3578 				be_rx_cq_clean(rxo);
3579 				if (atomic_read(&q->used) == 0)
3580 					be_post_rx_frags(rxo, GFP_KERNEL,
3581 							 MAX_RX_POST);
3582 			}
3583 
3584 			be_cmd_rxq_destroy(adapter, q);
3585 			be_rx_cq_clean(rxo);
3586 			be_rxq_clean(rxo);
3587 		}
3588 		be_queue_free(adapter, q);
3589 	}
3590 
3591 	if (rss->rss_flags) {
3592 		rss->rss_flags = RSS_ENABLE_NONE;
3593 		be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3594 				  128, rss->rss_hkey);
3595 	}
3596 }
3597 
3598 static void be_disable_if_filters(struct be_adapter *adapter)
3599 {
3600 	/* Don't delete MAC on BE3 VFs without FILTMGMT privilege  */
3601 	if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
3602 	    check_privilege(adapter, BE_PRIV_FILTMGMT)) {
3603 		be_dev_mac_del(adapter, adapter->pmac_id[0]);
3604 		eth_zero_addr(adapter->dev_mac);
3605 	}
3606 
3607 	be_clear_uc_list(adapter);
3608 	be_clear_mc_list(adapter);
3609 
3610 	/* The IFACE flags are enabled in the open path and cleared
3611 	 * in the close path. When a VF gets detached from the host and
3612 	 * assigned to a VM the following happens:
3613 	 *	- VF's IFACE flags get cleared in the detach path
3614 	 *	- IFACE create is issued by the VF in the attach path
3615 	 * Due to a bug in the BE3/Skyhawk-R FW
3616 	 * (Lancer FW doesn't have the bug), the IFACE capability flags
3617 	 * specified along with the IFACE create cmd issued by a VF are not
3618 	 * honoured by FW.  As a consequence, if a *new* driver
3619 	 * (that enables/disables IFACE flags in open/close)
3620 	 * is loaded in the host and an *old* driver is * used by a VM/VF,
3621 	 * the IFACE gets created *without* the needed flags.
3622 	 * To avoid this, disable RX-filter flags only for Lancer.
3623 	 */
3624 	if (lancer_chip(adapter)) {
3625 		be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3626 		adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3627 	}
3628 }
3629 
3630 static int be_close(struct net_device *netdev)
3631 {
3632 	struct be_adapter *adapter = netdev_priv(netdev);
3633 	struct be_eq_obj *eqo;
3634 	int i;
3635 
3636 	/* This protection is needed as be_close() may be called even when the
3637 	 * adapter is in cleared state (after eeh perm failure)
3638 	 */
3639 	if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3640 		return 0;
3641 
3642 	/* Before attempting cleanup ensure all the pending cmds in the
3643 	 * config_wq have finished execution
3644 	 */
3645 	flush_workqueue(be_wq);
3646 
3647 	be_disable_if_filters(adapter);
3648 
3649 	if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3650 		for_all_evt_queues(adapter, eqo, i) {
3651 			napi_disable(&eqo->napi);
3652 		}
3653 		adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
3654 	}
3655 
3656 	be_async_mcc_disable(adapter);
3657 
3658 	/* Wait for all pending tx completions to arrive so that
3659 	 * all tx skbs are freed.
3660 	 */
3661 	netif_tx_disable(netdev);
3662 	be_tx_compl_clean(adapter);
3663 
3664 	be_rx_qs_destroy(adapter);
3665 
3666 	for_all_evt_queues(adapter, eqo, i) {
3667 		if (msix_enabled(adapter))
3668 			synchronize_irq(be_msix_vec_get(adapter, eqo));
3669 		else
3670 			synchronize_irq(netdev->irq);
3671 		be_eq_clean(eqo);
3672 	}
3673 
3674 	be_irq_unregister(adapter);
3675 
3676 	return 0;
3677 }
3678 
3679 static int be_rx_qs_create(struct be_adapter *adapter)
3680 {
3681 	struct rss_info *rss = &adapter->rss_info;
3682 	u8 rss_key[RSS_HASH_KEY_LEN];
3683 	struct be_rx_obj *rxo;
3684 	int rc, i, j;
3685 
3686 	for_all_rx_queues(adapter, rxo, i) {
3687 		rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3688 				    sizeof(struct be_eth_rx_d));
3689 		if (rc)
3690 			return rc;
3691 	}
3692 
3693 	if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3694 		rxo = default_rxo(adapter);
3695 		rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3696 				       rx_frag_size, adapter->if_handle,
3697 				       false, &rxo->rss_id);
3698 		if (rc)
3699 			return rc;
3700 	}
3701 
3702 	for_all_rss_queues(adapter, rxo, i) {
3703 		rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3704 				       rx_frag_size, adapter->if_handle,
3705 				       true, &rxo->rss_id);
3706 		if (rc)
3707 			return rc;
3708 	}
3709 
3710 	if (be_multi_rxq(adapter)) {
3711 		for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
3712 			for_all_rss_queues(adapter, rxo, i) {
3713 				if ((j + i) >= RSS_INDIR_TABLE_LEN)
3714 					break;
3715 				rss->rsstable[j + i] = rxo->rss_id;
3716 				rss->rss_queue[j + i] = i;
3717 			}
3718 		}
3719 		rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3720 			RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
3721 
3722 		if (!BEx_chip(adapter))
3723 			rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3724 				RSS_ENABLE_UDP_IPV6;
3725 
3726 		netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3727 		rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3728 				       RSS_INDIR_TABLE_LEN, rss_key);
3729 		if (rc) {
3730 			rss->rss_flags = RSS_ENABLE_NONE;
3731 			return rc;
3732 		}
3733 
3734 		memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
3735 	} else {
3736 		/* Disable RSS, if only default RX Q is created */
3737 		rss->rss_flags = RSS_ENABLE_NONE;
3738 	}
3739 
3740 
3741 	/* Post 1 less than RXQ-len to avoid head being equal to tail,
3742 	 * which is a queue empty condition
3743 	 */
3744 	for_all_rx_queues(adapter, rxo, i)
3745 		be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3746 
3747 	return 0;
3748 }
3749 
3750 static int be_enable_if_filters(struct be_adapter *adapter)
3751 {
3752 	int status;
3753 
3754 	status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
3755 	if (status)
3756 		return status;
3757 
3758 	/* Normally this condition usually true as the ->dev_mac is zeroed.
3759 	 * But on BE3 VFs the initial MAC is pre-programmed by PF and
3760 	 * subsequent be_dev_mac_add() can fail (after fresh boot)
3761 	 */
3762 	if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
3763 		int old_pmac_id = -1;
3764 
3765 		/* Remember old programmed MAC if any - can happen on BE3 VF */
3766 		if (!is_zero_ether_addr(adapter->dev_mac))
3767 			old_pmac_id = adapter->pmac_id[0];
3768 
3769 		status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
3770 		if (status)
3771 			return status;
3772 
3773 		/* Delete the old programmed MAC as we successfully programmed
3774 		 * a new MAC
3775 		 */
3776 		if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
3777 			be_dev_mac_del(adapter, old_pmac_id);
3778 
3779 		ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
3780 	}
3781 
3782 	if (adapter->vlans_added)
3783 		be_vid_config(adapter);
3784 
3785 	__be_set_rx_mode(adapter);
3786 
3787 	return 0;
3788 }
3789 
3790 static int be_open(struct net_device *netdev)
3791 {
3792 	struct be_adapter *adapter = netdev_priv(netdev);
3793 	struct be_eq_obj *eqo;
3794 	struct be_rx_obj *rxo;
3795 	struct be_tx_obj *txo;
3796 	u8 link_status;
3797 	int status, i;
3798 
3799 	status = be_rx_qs_create(adapter);
3800 	if (status)
3801 		goto err;
3802 
3803 	status = be_enable_if_filters(adapter);
3804 	if (status)
3805 		goto err;
3806 
3807 	status = be_irq_register(adapter);
3808 	if (status)
3809 		goto err;
3810 
3811 	for_all_rx_queues(adapter, rxo, i)
3812 		be_cq_notify(adapter, rxo->cq.id, true, 0);
3813 
3814 	for_all_tx_queues(adapter, txo, i)
3815 		be_cq_notify(adapter, txo->cq.id, true, 0);
3816 
3817 	be_async_mcc_enable(adapter);
3818 
3819 	for_all_evt_queues(adapter, eqo, i) {
3820 		napi_enable(&eqo->napi);
3821 		be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
3822 	}
3823 	adapter->flags |= BE_FLAGS_NAPI_ENABLED;
3824 
3825 	status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
3826 	if (!status)
3827 		be_link_status_update(adapter, link_status);
3828 
3829 	netif_tx_start_all_queues(netdev);
3830 
3831 	udp_tunnel_nic_reset_ntf(netdev);
3832 
3833 	return 0;
3834 err:
3835 	be_close(adapter->netdev);
3836 	return -EIO;
3837 }
3838 
3839 static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3840 {
3841 	u32 addr;
3842 
3843 	addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3844 
3845 	mac[5] = (u8)(addr & 0xFF);
3846 	mac[4] = (u8)((addr >> 8) & 0xFF);
3847 	mac[3] = (u8)((addr >> 16) & 0xFF);
3848 	/* Use the OUI from the current MAC address */
3849 	memcpy(mac, adapter->netdev->dev_addr, 3);
3850 }
3851 
3852 /*
3853  * Generate a seed MAC address from the PF MAC Address using jhash.
3854  * MAC Address for VFs are assigned incrementally starting from the seed.
3855  * These addresses are programmed in the ASIC by the PF and the VF driver
3856  * queries for the MAC address during its probe.
3857  */
3858 static int be_vf_eth_addr_config(struct be_adapter *adapter)
3859 {
3860 	u32 vf;
3861 	int status = 0;
3862 	u8 mac[ETH_ALEN];
3863 	struct be_vf_cfg *vf_cfg;
3864 
3865 	be_vf_eth_addr_generate(adapter, mac);
3866 
3867 	for_all_vfs(adapter, vf_cfg, vf) {
3868 		if (BEx_chip(adapter))
3869 			status = be_cmd_pmac_add(adapter, mac,
3870 						 vf_cfg->if_handle,
3871 						 &vf_cfg->pmac_id, vf + 1);
3872 		else
3873 			status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3874 						vf + 1);
3875 
3876 		if (status)
3877 			dev_err(&adapter->pdev->dev,
3878 				"Mac address assignment failed for VF %d\n",
3879 				vf);
3880 		else
3881 			memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3882 
3883 		mac[5] += 1;
3884 	}
3885 	return status;
3886 }
3887 
3888 static int be_vfs_mac_query(struct be_adapter *adapter)
3889 {
3890 	int status, vf;
3891 	u8 mac[ETH_ALEN];
3892 	struct be_vf_cfg *vf_cfg;
3893 
3894 	for_all_vfs(adapter, vf_cfg, vf) {
3895 		status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3896 					       mac, vf_cfg->if_handle,
3897 					       false, vf+1);
3898 		if (status)
3899 			return status;
3900 		memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3901 	}
3902 	return 0;
3903 }
3904 
3905 static void be_vf_clear(struct be_adapter *adapter)
3906 {
3907 	struct be_vf_cfg *vf_cfg;
3908 	u32 vf;
3909 
3910 	if (pci_vfs_assigned(adapter->pdev)) {
3911 		dev_warn(&adapter->pdev->dev,
3912 			 "VFs are assigned to VMs: not disabling VFs\n");
3913 		goto done;
3914 	}
3915 
3916 	pci_disable_sriov(adapter->pdev);
3917 
3918 	for_all_vfs(adapter, vf_cfg, vf) {
3919 		if (BEx_chip(adapter))
3920 			be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3921 					vf_cfg->pmac_id, vf + 1);
3922 		else
3923 			be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3924 				       vf + 1);
3925 
3926 		be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3927 	}
3928 
3929 	if (BE3_chip(adapter))
3930 		be_cmd_set_hsw_config(adapter, 0, 0,
3931 				      adapter->if_handle,
3932 				      PORT_FWD_TYPE_PASSTHRU, 0);
3933 done:
3934 	kfree(adapter->vf_cfg);
3935 	adapter->num_vfs = 0;
3936 	adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
3937 }
3938 
3939 static void be_clear_queues(struct be_adapter *adapter)
3940 {
3941 	be_mcc_queues_destroy(adapter);
3942 	be_rx_cqs_destroy(adapter);
3943 	be_tx_queues_destroy(adapter);
3944 	be_evt_queues_destroy(adapter);
3945 }
3946 
3947 static void be_cancel_worker(struct be_adapter *adapter)
3948 {
3949 	if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3950 		cancel_delayed_work_sync(&adapter->work);
3951 		adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3952 	}
3953 }
3954 
3955 static void be_cancel_err_detection(struct be_adapter *adapter)
3956 {
3957 	struct be_error_recovery *err_rec = &adapter->error_recovery;
3958 
3959 	if (!be_err_recovery_workq)
3960 		return;
3961 
3962 	if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3963 		cancel_delayed_work_sync(&err_rec->err_detection_work);
3964 		adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3965 	}
3966 }
3967 
3968 /* VxLAN offload Notes:
3969  *
3970  * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
3971  * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
3972  * is expected to work across all types of IP tunnels once exported. Skyhawk
3973  * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
3974  * offloads in hw_enc_features only when a VxLAN port is added. If other (non
3975  * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
3976  * those other tunnels are unexported on the fly through ndo_features_check().
3977  */
3978 static int be_vxlan_set_port(struct net_device *netdev, unsigned int table,
3979 			     unsigned int entry, struct udp_tunnel_info *ti)
3980 {
3981 	struct be_adapter *adapter = netdev_priv(netdev);
3982 	struct device *dev = &adapter->pdev->dev;
3983 	int status;
3984 
3985 	status = be_cmd_manage_iface(adapter, adapter->if_handle,
3986 				     OP_CONVERT_NORMAL_TO_TUNNEL);
3987 	if (status) {
3988 		dev_warn(dev, "Failed to convert normal interface to tunnel\n");
3989 		return status;
3990 	}
3991 	adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
3992 
3993 	status = be_cmd_set_vxlan_port(adapter, ti->port);
3994 	if (status) {
3995 		dev_warn(dev, "Failed to add VxLAN port\n");
3996 		return status;
3997 	}
3998 	adapter->vxlan_port = ti->port;
3999 
4000 	netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4001 				   NETIF_F_TSO | NETIF_F_TSO6 |
4002 				   NETIF_F_GSO_UDP_TUNNEL;
4003 
4004 	dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4005 		 be16_to_cpu(ti->port));
4006 	return 0;
4007 }
4008 
4009 static int be_vxlan_unset_port(struct net_device *netdev, unsigned int table,
4010 			       unsigned int entry, struct udp_tunnel_info *ti)
4011 {
4012 	struct be_adapter *adapter = netdev_priv(netdev);
4013 
4014 	if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
4015 		be_cmd_manage_iface(adapter, adapter->if_handle,
4016 				    OP_CONVERT_TUNNEL_TO_NORMAL);
4017 
4018 	if (adapter->vxlan_port)
4019 		be_cmd_set_vxlan_port(adapter, 0);
4020 
4021 	adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
4022 	adapter->vxlan_port = 0;
4023 
4024 	netdev->hw_enc_features = 0;
4025 	return 0;
4026 }
4027 
4028 static const struct udp_tunnel_nic_info be_udp_tunnels = {
4029 	.set_port	= be_vxlan_set_port,
4030 	.unset_port	= be_vxlan_unset_port,
4031 	.flags		= UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
4032 			  UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
4033 	.tables		= {
4034 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
4035 	},
4036 };
4037 
4038 static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
4039 				struct be_resources *vft_res)
4040 {
4041 	struct be_resources res = adapter->pool_res;
4042 	u32 vf_if_cap_flags = res.vf_if_cap_flags;
4043 	struct be_resources res_mod = {0};
4044 	u16 num_vf_qs = 1;
4045 
4046 	/* Distribute the queue resources among the PF and it's VFs */
4047 	if (num_vfs) {
4048 		/* Divide the rx queues evenly among the VFs and the PF, capped
4049 		 * at VF-EQ-count. Any remainder queues belong to the PF.
4050 		 */
4051 		num_vf_qs = min(SH_VF_MAX_NIC_EQS,
4052 				res.max_rss_qs / (num_vfs + 1));
4053 
4054 		/* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
4055 		 * RSS Tables per port. Provide RSS on VFs, only if number of
4056 		 * VFs requested is less than it's PF Pool's RSS Tables limit.
4057 		 */
4058 		if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
4059 			num_vf_qs = 1;
4060 	}
4061 
4062 	/* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
4063 	 * which are modifiable using SET_PROFILE_CONFIG cmd.
4064 	 */
4065 	be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
4066 				  RESOURCE_MODIFIABLE, 0);
4067 
4068 	/* If RSS IFACE capability flags are modifiable for a VF, set the
4069 	 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
4070 	 * more than 1 RSSQ is available for a VF.
4071 	 * Otherwise, provision only 1 queue pair for VF.
4072 	 */
4073 	if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
4074 		vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4075 		if (num_vf_qs > 1) {
4076 			vf_if_cap_flags |= BE_IF_FLAGS_RSS;
4077 			if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
4078 				vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
4079 		} else {
4080 			vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
4081 					     BE_IF_FLAGS_DEFQ_RSS);
4082 		}
4083 	} else {
4084 		num_vf_qs = 1;
4085 	}
4086 
4087 	if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
4088 		vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4089 		vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4090 	}
4091 
4092 	vft_res->vf_if_cap_flags = vf_if_cap_flags;
4093 	vft_res->max_rx_qs = num_vf_qs;
4094 	vft_res->max_rss_qs = num_vf_qs;
4095 	vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
4096 	vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
4097 
4098 	/* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
4099 	 * among the PF and it's VFs, if the fields are changeable
4100 	 */
4101 	if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
4102 		vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
4103 
4104 	if (res_mod.max_vlans == FIELD_MODIFIABLE)
4105 		vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
4106 
4107 	if (res_mod.max_iface_count == FIELD_MODIFIABLE)
4108 		vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
4109 
4110 	if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
4111 		vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
4112 }
4113 
4114 static void be_if_destroy(struct be_adapter *adapter)
4115 {
4116 	be_cmd_if_destroy(adapter, adapter->if_handle,  0);
4117 
4118 	kfree(adapter->pmac_id);
4119 	adapter->pmac_id = NULL;
4120 
4121 	kfree(adapter->mc_list);
4122 	adapter->mc_list = NULL;
4123 
4124 	kfree(adapter->uc_list);
4125 	adapter->uc_list = NULL;
4126 }
4127 
4128 static int be_clear(struct be_adapter *adapter)
4129 {
4130 	struct pci_dev *pdev = adapter->pdev;
4131 	struct  be_resources vft_res = {0};
4132 
4133 	be_cancel_worker(adapter);
4134 
4135 	flush_workqueue(be_wq);
4136 
4137 	if (sriov_enabled(adapter))
4138 		be_vf_clear(adapter);
4139 
4140 	/* Re-configure FW to distribute resources evenly across max-supported
4141 	 * number of VFs, only when VFs are not already enabled.
4142 	 */
4143 	if (skyhawk_chip(adapter) && be_physfn(adapter) &&
4144 	    !pci_vfs_assigned(pdev)) {
4145 		be_calculate_vf_res(adapter,
4146 				    pci_sriov_get_totalvfs(pdev),
4147 				    &vft_res);
4148 		be_cmd_set_sriov_config(adapter, adapter->pool_res,
4149 					pci_sriov_get_totalvfs(pdev),
4150 					&vft_res);
4151 	}
4152 
4153 	be_vxlan_unset_port(adapter->netdev, 0, 0, NULL);
4154 
4155 	be_if_destroy(adapter);
4156 
4157 	be_clear_queues(adapter);
4158 
4159 	be_msix_disable(adapter);
4160 	adapter->flags &= ~BE_FLAGS_SETUP_DONE;
4161 	return 0;
4162 }
4163 
4164 static int be_vfs_if_create(struct be_adapter *adapter)
4165 {
4166 	struct be_resources res = {0};
4167 	u32 cap_flags, en_flags, vf;
4168 	struct be_vf_cfg *vf_cfg;
4169 	int status;
4170 
4171 	/* If a FW profile exists, then cap_flags are updated */
4172 	cap_flags = BE_VF_IF_EN_FLAGS;
4173 
4174 	for_all_vfs(adapter, vf_cfg, vf) {
4175 		if (!BE3_chip(adapter)) {
4176 			status = be_cmd_get_profile_config(adapter, &res, NULL,
4177 							   ACTIVE_PROFILE_TYPE,
4178 							   RESOURCE_LIMITS,
4179 							   vf + 1);
4180 			if (!status) {
4181 				cap_flags = res.if_cap_flags;
4182 				/* Prevent VFs from enabling VLAN promiscuous
4183 				 * mode
4184 				 */
4185 				cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4186 			}
4187 		}
4188 
4189 		/* PF should enable IF flags during proxy if_create call */
4190 		en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
4191 		status = be_cmd_if_create(adapter, cap_flags, en_flags,
4192 					  &vf_cfg->if_handle, vf + 1);
4193 		if (status)
4194 			return status;
4195 	}
4196 
4197 	return 0;
4198 }
4199 
4200 static int be_vf_setup_init(struct be_adapter *adapter)
4201 {
4202 	struct be_vf_cfg *vf_cfg;
4203 	int vf;
4204 
4205 	adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
4206 				  GFP_KERNEL);
4207 	if (!adapter->vf_cfg)
4208 		return -ENOMEM;
4209 
4210 	for_all_vfs(adapter, vf_cfg, vf) {
4211 		vf_cfg->if_handle = -1;
4212 		vf_cfg->pmac_id = -1;
4213 	}
4214 	return 0;
4215 }
4216 
4217 static int be_vf_setup(struct be_adapter *adapter)
4218 {
4219 	struct device *dev = &adapter->pdev->dev;
4220 	struct be_vf_cfg *vf_cfg;
4221 	int status, old_vfs, vf;
4222 	bool spoofchk;
4223 
4224 	old_vfs = pci_num_vf(adapter->pdev);
4225 
4226 	status = be_vf_setup_init(adapter);
4227 	if (status)
4228 		goto err;
4229 
4230 	if (old_vfs) {
4231 		for_all_vfs(adapter, vf_cfg, vf) {
4232 			status = be_cmd_get_if_id(adapter, vf_cfg, vf);
4233 			if (status)
4234 				goto err;
4235 		}
4236 
4237 		status = be_vfs_mac_query(adapter);
4238 		if (status)
4239 			goto err;
4240 	} else {
4241 		status = be_vfs_if_create(adapter);
4242 		if (status)
4243 			goto err;
4244 
4245 		status = be_vf_eth_addr_config(adapter);
4246 		if (status)
4247 			goto err;
4248 	}
4249 
4250 	for_all_vfs(adapter, vf_cfg, vf) {
4251 		/* Allow VFs to programs MAC/VLAN filters */
4252 		status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
4253 						  vf + 1);
4254 		if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
4255 			status = be_cmd_set_fn_privileges(adapter,
4256 							  vf_cfg->privileges |
4257 							  BE_PRIV_FILTMGMT,
4258 							  vf + 1);
4259 			if (!status) {
4260 				vf_cfg->privileges |= BE_PRIV_FILTMGMT;
4261 				dev_info(dev, "VF%d has FILTMGMT privilege\n",
4262 					 vf);
4263 			}
4264 		}
4265 
4266 		/* Allow full available bandwidth */
4267 		if (!old_vfs)
4268 			be_cmd_config_qos(adapter, 0, 0, vf + 1);
4269 
4270 		status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
4271 					       vf_cfg->if_handle, NULL,
4272 					       &spoofchk);
4273 		if (!status)
4274 			vf_cfg->spoofchk = spoofchk;
4275 
4276 		if (!old_vfs) {
4277 			be_cmd_enable_vf(adapter, vf + 1);
4278 			be_cmd_set_logical_link_config(adapter,
4279 						       IFLA_VF_LINK_STATE_AUTO,
4280 						       vf+1);
4281 		}
4282 	}
4283 
4284 	if (!old_vfs) {
4285 		status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
4286 		if (status) {
4287 			dev_err(dev, "SRIOV enable failed\n");
4288 			adapter->num_vfs = 0;
4289 			goto err;
4290 		}
4291 	}
4292 
4293 	if (BE3_chip(adapter)) {
4294 		/* On BE3, enable VEB only when SRIOV is enabled */
4295 		status = be_cmd_set_hsw_config(adapter, 0, 0,
4296 					       adapter->if_handle,
4297 					       PORT_FWD_TYPE_VEB, 0);
4298 		if (status)
4299 			goto err;
4300 	}
4301 
4302 	adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
4303 	return 0;
4304 err:
4305 	dev_err(dev, "VF setup failed\n");
4306 	be_vf_clear(adapter);
4307 	return status;
4308 }
4309 
4310 /* Converting function_mode bits on BE3 to SH mc_type enums */
4311 
4312 static u8 be_convert_mc_type(u32 function_mode)
4313 {
4314 	if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
4315 		return vNIC1;
4316 	else if (function_mode & QNQ_MODE)
4317 		return FLEX10;
4318 	else if (function_mode & VNIC_MODE)
4319 		return vNIC2;
4320 	else if (function_mode & UMC_ENABLED)
4321 		return UMC;
4322 	else
4323 		return MC_NONE;
4324 }
4325 
4326 /* On BE2/BE3 FW does not suggest the supported limits */
4327 static void BEx_get_resources(struct be_adapter *adapter,
4328 			      struct be_resources *res)
4329 {
4330 	bool use_sriov = adapter->num_vfs ? 1 : 0;
4331 
4332 	if (be_physfn(adapter))
4333 		res->max_uc_mac = BE_UC_PMAC_COUNT;
4334 	else
4335 		res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4336 
4337 	adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4338 
4339 	if (be_is_mc(adapter)) {
4340 		/* Assuming that there are 4 channels per port,
4341 		 * when multi-channel is enabled
4342 		 */
4343 		if (be_is_qnq_mode(adapter))
4344 			res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4345 		else
4346 			/* In a non-qnq multichannel mode, the pvid
4347 			 * takes up one vlan entry
4348 			 */
4349 			res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4350 	} else {
4351 		res->max_vlans = BE_NUM_VLANS_SUPPORTED;
4352 	}
4353 
4354 	res->max_mcast_mac = BE_MAX_MC;
4355 
4356 	/* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4357 	 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4358 	 *    *only* if it is RSS-capable.
4359 	 */
4360 	if (BE2_chip(adapter) || use_sriov ||  (adapter->port_num > 1) ||
4361 	    be_virtfn(adapter) ||
4362 	    (be_is_mc(adapter) &&
4363 	     !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
4364 		res->max_tx_qs = 1;
4365 	} else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4366 		struct be_resources super_nic_res = {0};
4367 
4368 		/* On a SuperNIC profile, the driver needs to use the
4369 		 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4370 		 */
4371 		be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
4372 					  ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
4373 					  0);
4374 		/* Some old versions of BE3 FW don't report max_tx_qs value */
4375 		res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4376 	} else {
4377 		res->max_tx_qs = BE3_MAX_TX_QS;
4378 	}
4379 
4380 	if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4381 	    !use_sriov && be_physfn(adapter))
4382 		res->max_rss_qs = (adapter->be3_native) ?
4383 					   BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4384 	res->max_rx_qs = res->max_rss_qs + 1;
4385 
4386 	if (be_physfn(adapter))
4387 		res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
4388 					BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4389 	else
4390 		res->max_evt_qs = 1;
4391 
4392 	res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
4393 	res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
4394 	if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4395 		res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4396 }
4397 
4398 static void be_setup_init(struct be_adapter *adapter)
4399 {
4400 	adapter->vlan_prio_bmap = 0xff;
4401 	adapter->phy.link_speed = -1;
4402 	adapter->if_handle = -1;
4403 	adapter->be3_native = false;
4404 	adapter->if_flags = 0;
4405 	adapter->phy_state = BE_UNKNOWN_PHY_STATE;
4406 	if (be_physfn(adapter))
4407 		adapter->cmd_privileges = MAX_PRIVILEGES;
4408 	else
4409 		adapter->cmd_privileges = MIN_PRIVILEGES;
4410 }
4411 
4412 /* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4413  * However, this HW limitation is not exposed to the host via any SLI cmd.
4414  * As a result, in the case of SRIOV and in particular multi-partition configs
4415  * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4416  * for distribution between the VFs. This self-imposed limit will determine the
4417  * no: of VFs for which RSS can be enabled.
4418  */
4419 static void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
4420 {
4421 	struct be_port_resources port_res = {0};
4422 	u8 rss_tables_on_port;
4423 	u16 max_vfs = be_max_vfs(adapter);
4424 
4425 	be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
4426 				  RESOURCE_LIMITS, 0);
4427 
4428 	rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
4429 
4430 	/* Each PF Pool's RSS Tables limit =
4431 	 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4432 	 */
4433 	adapter->pool_res.max_rss_tables =
4434 		max_vfs * rss_tables_on_port / port_res.max_vfs;
4435 }
4436 
4437 static int be_get_sriov_config(struct be_adapter *adapter)
4438 {
4439 	struct be_resources res = {0};
4440 	int max_vfs, old_vfs;
4441 
4442 	be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
4443 				  RESOURCE_LIMITS, 0);
4444 
4445 	/* Some old versions of BE3 FW don't report max_vfs value */
4446 	if (BE3_chip(adapter) && !res.max_vfs) {
4447 		max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4448 		res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4449 	}
4450 
4451 	adapter->pool_res = res;
4452 
4453 	/* If during previous unload of the driver, the VFs were not disabled,
4454 	 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4455 	 * Instead use the TotalVFs value stored in the pci-dev struct.
4456 	 */
4457 	old_vfs = pci_num_vf(adapter->pdev);
4458 	if (old_vfs) {
4459 		dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4460 			 old_vfs);
4461 
4462 		adapter->pool_res.max_vfs =
4463 			pci_sriov_get_totalvfs(adapter->pdev);
4464 		adapter->num_vfs = old_vfs;
4465 	}
4466 
4467 	if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4468 		be_calculate_pf_pool_rss_tables(adapter);
4469 		dev_info(&adapter->pdev->dev,
4470 			 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4471 			 be_max_pf_pool_rss_tables(adapter));
4472 	}
4473 	return 0;
4474 }
4475 
4476 static void be_alloc_sriov_res(struct be_adapter *adapter)
4477 {
4478 	int old_vfs = pci_num_vf(adapter->pdev);
4479 	struct  be_resources vft_res = {0};
4480 	int status;
4481 
4482 	be_get_sriov_config(adapter);
4483 
4484 	if (!old_vfs)
4485 		pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4486 
4487 	/* When the HW is in SRIOV capable configuration, the PF-pool
4488 	 * resources are given to PF during driver load, if there are no
4489 	 * old VFs. This facility is not available in BE3 FW.
4490 	 * Also, this is done by FW in Lancer chip.
4491 	 */
4492 	if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4493 		be_calculate_vf_res(adapter, 0, &vft_res);
4494 		status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
4495 						 &vft_res);
4496 		if (status)
4497 			dev_err(&adapter->pdev->dev,
4498 				"Failed to optimize SRIOV resources\n");
4499 	}
4500 }
4501 
4502 static int be_get_resources(struct be_adapter *adapter)
4503 {
4504 	struct device *dev = &adapter->pdev->dev;
4505 	struct be_resources res = {0};
4506 	int status;
4507 
4508 	/* For Lancer, SH etc read per-function resource limits from FW.
4509 	 * GET_FUNC_CONFIG returns per function guaranteed limits.
4510 	 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4511 	 */
4512 	if (BEx_chip(adapter)) {
4513 		BEx_get_resources(adapter, &res);
4514 	} else {
4515 		status = be_cmd_get_func_config(adapter, &res);
4516 		if (status)
4517 			return status;
4518 
4519 		/* If a deafault RXQ must be created, we'll use up one RSSQ*/
4520 		if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4521 		    !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4522 			res.max_rss_qs -= 1;
4523 	}
4524 
4525 	/* If RoCE is supported stash away half the EQs for RoCE */
4526 	res.max_nic_evt_qs = be_roce_supported(adapter) ?
4527 				res.max_evt_qs / 2 : res.max_evt_qs;
4528 	adapter->res = res;
4529 
4530 	/* If FW supports RSS default queue, then skip creating non-RSS
4531 	 * queue for non-IP traffic.
4532 	 */
4533 	adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4534 				 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4535 
4536 	dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4537 		 be_max_txqs(adapter), be_max_rxqs(adapter),
4538 		 be_max_rss(adapter), be_max_nic_eqs(adapter),
4539 		 be_max_vfs(adapter));
4540 	dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4541 		 be_max_uc(adapter), be_max_mc(adapter),
4542 		 be_max_vlans(adapter));
4543 
4544 	/* Ensure RX and TX queues are created in pairs at init time */
4545 	adapter->cfg_num_rx_irqs =
4546 				min_t(u16, netif_get_num_default_rss_queues(),
4547 				      be_max_qp_irqs(adapter));
4548 	adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
4549 	return 0;
4550 }
4551 
4552 static int be_get_config(struct be_adapter *adapter)
4553 {
4554 	int status, level;
4555 	u16 profile_id;
4556 
4557 	status = be_cmd_get_cntl_attributes(adapter);
4558 	if (status)
4559 		return status;
4560 
4561 	status = be_cmd_query_fw_cfg(adapter);
4562 	if (status)
4563 		return status;
4564 
4565 	if (!lancer_chip(adapter) && be_physfn(adapter))
4566 		be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4567 
4568 	if (BEx_chip(adapter)) {
4569 		level = be_cmd_get_fw_log_level(adapter);
4570 		adapter->msg_enable =
4571 			level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4572 	}
4573 
4574 	be_cmd_get_acpi_wol_cap(adapter);
4575 	pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
4576 	pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
4577 
4578 	be_cmd_query_port_name(adapter);
4579 
4580 	if (be_physfn(adapter)) {
4581 		status = be_cmd_get_active_profile(adapter, &profile_id);
4582 		if (!status)
4583 			dev_info(&adapter->pdev->dev,
4584 				 "Using profile 0x%x\n", profile_id);
4585 	}
4586 
4587 	return 0;
4588 }
4589 
4590 static int be_mac_setup(struct be_adapter *adapter)
4591 {
4592 	u8 mac[ETH_ALEN];
4593 	int status;
4594 
4595 	if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4596 		status = be_cmd_get_perm_mac(adapter, mac);
4597 		if (status)
4598 			return status;
4599 
4600 		eth_hw_addr_set(adapter->netdev, mac);
4601 		memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4602 
4603 		/* Initial MAC for BE3 VFs is already programmed by PF */
4604 		if (BEx_chip(adapter) && be_virtfn(adapter))
4605 			memcpy(adapter->dev_mac, mac, ETH_ALEN);
4606 	}
4607 
4608 	return 0;
4609 }
4610 
4611 static void be_schedule_worker(struct be_adapter *adapter)
4612 {
4613 	queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
4614 	adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4615 }
4616 
4617 static void be_destroy_err_recovery_workq(void)
4618 {
4619 	if (!be_err_recovery_workq)
4620 		return;
4621 
4622 	destroy_workqueue(be_err_recovery_workq);
4623 	be_err_recovery_workq = NULL;
4624 }
4625 
4626 static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
4627 {
4628 	struct be_error_recovery *err_rec = &adapter->error_recovery;
4629 
4630 	if (!be_err_recovery_workq)
4631 		return;
4632 
4633 	queue_delayed_work(be_err_recovery_workq, &err_rec->err_detection_work,
4634 			   msecs_to_jiffies(delay));
4635 	adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4636 }
4637 
4638 static int be_setup_queues(struct be_adapter *adapter)
4639 {
4640 	struct net_device *netdev = adapter->netdev;
4641 	int status;
4642 
4643 	status = be_evt_queues_create(adapter);
4644 	if (status)
4645 		goto err;
4646 
4647 	status = be_tx_qs_create(adapter);
4648 	if (status)
4649 		goto err;
4650 
4651 	status = be_rx_cqs_create(adapter);
4652 	if (status)
4653 		goto err;
4654 
4655 	status = be_mcc_queues_create(adapter);
4656 	if (status)
4657 		goto err;
4658 
4659 	status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4660 	if (status)
4661 		goto err;
4662 
4663 	status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4664 	if (status)
4665 		goto err;
4666 
4667 	return 0;
4668 err:
4669 	dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4670 	return status;
4671 }
4672 
4673 static int be_if_create(struct be_adapter *adapter)
4674 {
4675 	u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4676 	u32 cap_flags = be_if_cap_flags(adapter);
4677 
4678 	/* alloc required memory for other filtering fields */
4679 	adapter->pmac_id = kcalloc(be_max_uc(adapter),
4680 				   sizeof(*adapter->pmac_id), GFP_KERNEL);
4681 	if (!adapter->pmac_id)
4682 		return -ENOMEM;
4683 
4684 	adapter->mc_list = kcalloc(be_max_mc(adapter),
4685 				   sizeof(*adapter->mc_list), GFP_KERNEL);
4686 	if (!adapter->mc_list)
4687 		return -ENOMEM;
4688 
4689 	adapter->uc_list = kcalloc(be_max_uc(adapter),
4690 				   sizeof(*adapter->uc_list), GFP_KERNEL);
4691 	if (!adapter->uc_list)
4692 		return -ENOMEM;
4693 
4694 	if (adapter->cfg_num_rx_irqs == 1)
4695 		cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4696 
4697 	en_flags &= cap_flags;
4698 	/* will enable all the needed filter flags in be_open() */
4699 	return be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4700 				  &adapter->if_handle, 0);
4701 }
4702 
4703 int be_update_queues(struct be_adapter *adapter)
4704 {
4705 	struct net_device *netdev = adapter->netdev;
4706 	int status;
4707 
4708 	if (netif_running(netdev)) {
4709 		/* be_tx_timeout() must not run concurrently with this
4710 		 * function, synchronize with an already-running dev_watchdog
4711 		 */
4712 		netif_tx_lock_bh(netdev);
4713 		/* device cannot transmit now, avoid dev_watchdog timeouts */
4714 		netif_carrier_off(netdev);
4715 		netif_tx_unlock_bh(netdev);
4716 
4717 		be_close(netdev);
4718 	}
4719 
4720 	be_cancel_worker(adapter);
4721 
4722 	/* If any vectors have been shared with RoCE we cannot re-program
4723 	 * the MSIx table.
4724 	 */
4725 	if (!adapter->num_msix_roce_vec)
4726 		be_msix_disable(adapter);
4727 
4728 	be_clear_queues(adapter);
4729 	status = be_cmd_if_destroy(adapter, adapter->if_handle,  0);
4730 	if (status)
4731 		return status;
4732 
4733 	if (!msix_enabled(adapter)) {
4734 		status = be_msix_enable(adapter);
4735 		if (status)
4736 			return status;
4737 	}
4738 
4739 	status = be_if_create(adapter);
4740 	if (status)
4741 		return status;
4742 
4743 	status = be_setup_queues(adapter);
4744 	if (status)
4745 		return status;
4746 
4747 	be_schedule_worker(adapter);
4748 
4749 	/* The IF was destroyed and re-created. We need to clear
4750 	 * all promiscuous flags valid for the destroyed IF.
4751 	 * Without this promisc mode is not restored during
4752 	 * be_open() because the driver thinks that it is
4753 	 * already enabled in HW.
4754 	 */
4755 	adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
4756 
4757 	if (netif_running(netdev))
4758 		status = be_open(netdev);
4759 
4760 	return status;
4761 }
4762 
4763 static inline int fw_major_num(const char *fw_ver)
4764 {
4765 	int fw_major = 0, i;
4766 
4767 	i = sscanf(fw_ver, "%d.", &fw_major);
4768 	if (i != 1)
4769 		return 0;
4770 
4771 	return fw_major;
4772 }
4773 
4774 /* If it is error recovery, FLR the PF
4775  * Else if any VFs are already enabled don't FLR the PF
4776  */
4777 static bool be_reset_required(struct be_adapter *adapter)
4778 {
4779 	if (be_error_recovering(adapter))
4780 		return true;
4781 	else
4782 		return pci_num_vf(adapter->pdev) == 0;
4783 }
4784 
4785 /* Wait for the FW to be ready and perform the required initialization */
4786 static int be_func_init(struct be_adapter *adapter)
4787 {
4788 	int status;
4789 
4790 	status = be_fw_wait_ready(adapter);
4791 	if (status)
4792 		return status;
4793 
4794 	/* FW is now ready; clear errors to allow cmds/doorbell */
4795 	be_clear_error(adapter, BE_CLEAR_ALL);
4796 
4797 	if (be_reset_required(adapter)) {
4798 		status = be_cmd_reset_function(adapter);
4799 		if (status)
4800 			return status;
4801 
4802 		/* Wait for interrupts to quiesce after an FLR */
4803 		msleep(100);
4804 	}
4805 
4806 	/* Tell FW we're ready to fire cmds */
4807 	status = be_cmd_fw_init(adapter);
4808 	if (status)
4809 		return status;
4810 
4811 	/* Allow interrupts for other ULPs running on NIC function */
4812 	be_intr_set(adapter, true);
4813 
4814 	return 0;
4815 }
4816 
4817 static int be_setup(struct be_adapter *adapter)
4818 {
4819 	struct device *dev = &adapter->pdev->dev;
4820 	int status;
4821 
4822 	status = be_func_init(adapter);
4823 	if (status)
4824 		return status;
4825 
4826 	be_setup_init(adapter);
4827 
4828 	if (!lancer_chip(adapter))
4829 		be_cmd_req_native_mode(adapter);
4830 
4831 	/* invoke this cmd first to get pf_num and vf_num which are needed
4832 	 * for issuing profile related cmds
4833 	 */
4834 	if (!BEx_chip(adapter)) {
4835 		status = be_cmd_get_func_config(adapter, NULL);
4836 		if (status)
4837 			return status;
4838 	}
4839 
4840 	status = be_get_config(adapter);
4841 	if (status)
4842 		goto err;
4843 
4844 	if (!BE2_chip(adapter) && be_physfn(adapter))
4845 		be_alloc_sriov_res(adapter);
4846 
4847 	status = be_get_resources(adapter);
4848 	if (status)
4849 		goto err;
4850 
4851 	status = be_msix_enable(adapter);
4852 	if (status)
4853 		goto err;
4854 
4855 	/* will enable all the needed filter flags in be_open() */
4856 	status = be_if_create(adapter);
4857 	if (status)
4858 		goto err;
4859 
4860 	/* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4861 	rtnl_lock();
4862 	status = be_setup_queues(adapter);
4863 	rtnl_unlock();
4864 	if (status)
4865 		goto err;
4866 
4867 	be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
4868 
4869 	status = be_mac_setup(adapter);
4870 	if (status)
4871 		goto err;
4872 
4873 	be_cmd_get_fw_ver(adapter);
4874 	dev_info(dev, "FW version is %s\n", adapter->fw_ver);
4875 
4876 	if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
4877 		dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
4878 			adapter->fw_ver);
4879 		dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4880 	}
4881 
4882 	status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4883 					 adapter->rx_fc);
4884 	if (status)
4885 		be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4886 					&adapter->rx_fc);
4887 
4888 	dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4889 		 adapter->tx_fc, adapter->rx_fc);
4890 
4891 	if (be_physfn(adapter))
4892 		be_cmd_set_logical_link_config(adapter,
4893 					       IFLA_VF_LINK_STATE_AUTO, 0);
4894 
4895 	/* BE3 EVB echoes broadcast/multicast packets back to PF's vport
4896 	 * confusing a linux bridge or OVS that it might be connected to.
4897 	 * Set the EVB to PASSTHRU mode which effectively disables the EVB
4898 	 * when SRIOV is not enabled.
4899 	 */
4900 	if (BE3_chip(adapter))
4901 		be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
4902 				      PORT_FWD_TYPE_PASSTHRU, 0);
4903 
4904 	if (adapter->num_vfs)
4905 		be_vf_setup(adapter);
4906 
4907 	status = be_cmd_get_phy_info(adapter);
4908 	if (!status && be_pause_supported(adapter))
4909 		adapter->phy.fc_autoneg = 1;
4910 
4911 	if (be_physfn(adapter) && !lancer_chip(adapter))
4912 		be_cmd_set_features(adapter);
4913 
4914 	be_schedule_worker(adapter);
4915 	adapter->flags |= BE_FLAGS_SETUP_DONE;
4916 	return 0;
4917 err:
4918 	be_clear(adapter);
4919 	return status;
4920 }
4921 
4922 #ifdef CONFIG_NET_POLL_CONTROLLER
4923 static void be_netpoll(struct net_device *netdev)
4924 {
4925 	struct be_adapter *adapter = netdev_priv(netdev);
4926 	struct be_eq_obj *eqo;
4927 	int i;
4928 
4929 	for_all_evt_queues(adapter, eqo, i) {
4930 		be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
4931 		napi_schedule(&eqo->napi);
4932 	}
4933 }
4934 #endif
4935 
4936 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4937 {
4938 	const struct firmware *fw;
4939 	int status;
4940 
4941 	if (!netif_running(adapter->netdev)) {
4942 		dev_err(&adapter->pdev->dev,
4943 			"Firmware load not allowed (interface is down)\n");
4944 		return -ENETDOWN;
4945 	}
4946 
4947 	status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4948 	if (status)
4949 		goto fw_exit;
4950 
4951 	dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4952 
4953 	if (lancer_chip(adapter))
4954 		status = lancer_fw_download(adapter, fw);
4955 	else
4956 		status = be_fw_download(adapter, fw);
4957 
4958 	if (!status)
4959 		be_cmd_get_fw_ver(adapter);
4960 
4961 fw_exit:
4962 	release_firmware(fw);
4963 	return status;
4964 }
4965 
4966 static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4967 				 u16 flags, struct netlink_ext_ack *extack)
4968 {
4969 	struct be_adapter *adapter = netdev_priv(dev);
4970 	struct nlattr *attr, *br_spec;
4971 	int rem;
4972 	int status = 0;
4973 	u16 mode = 0;
4974 
4975 	if (!sriov_enabled(adapter))
4976 		return -EOPNOTSUPP;
4977 
4978 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4979 	if (!br_spec)
4980 		return -EINVAL;
4981 
4982 	nla_for_each_nested(attr, br_spec, rem) {
4983 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
4984 			continue;
4985 
4986 		if (nla_len(attr) < sizeof(mode))
4987 			return -EINVAL;
4988 
4989 		mode = nla_get_u16(attr);
4990 		if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4991 			return -EOPNOTSUPP;
4992 
4993 		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4994 			return -EINVAL;
4995 
4996 		status = be_cmd_set_hsw_config(adapter, 0, 0,
4997 					       adapter->if_handle,
4998 					       mode == BRIDGE_MODE_VEPA ?
4999 					       PORT_FWD_TYPE_VEPA :
5000 					       PORT_FWD_TYPE_VEB, 0);
5001 		if (status)
5002 			goto err;
5003 
5004 		dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
5005 			 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5006 
5007 		return status;
5008 	}
5009 err:
5010 	dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
5011 		mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5012 
5013 	return status;
5014 }
5015 
5016 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
5017 				 struct net_device *dev, u32 filter_mask,
5018 				 int nlflags)
5019 {
5020 	struct be_adapter *adapter = netdev_priv(dev);
5021 	int status = 0;
5022 	u8 hsw_mode;
5023 
5024 	/* BE and Lancer chips support VEB mode only */
5025 	if (BEx_chip(adapter) || lancer_chip(adapter)) {
5026 		/* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
5027 		if (!pci_sriov_get_totalvfs(adapter->pdev))
5028 			return 0;
5029 		hsw_mode = PORT_FWD_TYPE_VEB;
5030 	} else {
5031 		status = be_cmd_get_hsw_config(adapter, NULL, 0,
5032 					       adapter->if_handle, &hsw_mode,
5033 					       NULL);
5034 		if (status)
5035 			return 0;
5036 
5037 		if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
5038 			return 0;
5039 	}
5040 
5041 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
5042 				       hsw_mode == PORT_FWD_TYPE_VEPA ?
5043 				       BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
5044 				       0, 0, nlflags, filter_mask, NULL);
5045 }
5046 
5047 static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
5048 					 void (*func)(struct work_struct *))
5049 {
5050 	struct be_cmd_work *work;
5051 
5052 	work = kzalloc(sizeof(*work), GFP_ATOMIC);
5053 	if (!work) {
5054 		dev_err(&adapter->pdev->dev,
5055 			"be_work memory allocation failed\n");
5056 		return NULL;
5057 	}
5058 
5059 	INIT_WORK(&work->work, func);
5060 	work->adapter = adapter;
5061 	return work;
5062 }
5063 
5064 static netdev_features_t be_features_check(struct sk_buff *skb,
5065 					   struct net_device *dev,
5066 					   netdev_features_t features)
5067 {
5068 	struct be_adapter *adapter = netdev_priv(dev);
5069 	u8 l4_hdr = 0;
5070 
5071 	if (skb_is_gso(skb)) {
5072 		/* IPv6 TSO requests with extension hdrs are a problem
5073 		 * to Lancer and BE3 HW. Disable TSO6 feature.
5074 		 */
5075 		if (!skyhawk_chip(adapter) && is_ipv6_ext_hdr(skb))
5076 			features &= ~NETIF_F_TSO6;
5077 
5078 		/* Lancer cannot handle the packet with MSS less than 256.
5079 		 * Also it can't handle a TSO packet with a single segment
5080 		 * Disable the GSO support in such cases
5081 		 */
5082 		if (lancer_chip(adapter) &&
5083 		    (skb_shinfo(skb)->gso_size < 256 ||
5084 		     skb_shinfo(skb)->gso_segs == 1))
5085 			features &= ~NETIF_F_GSO_MASK;
5086 	}
5087 
5088 	/* The code below restricts offload features for some tunneled and
5089 	 * Q-in-Q packets.
5090 	 * Offload features for normal (non tunnel) packets are unchanged.
5091 	 */
5092 	features = vlan_features_check(skb, features);
5093 	if (!skb->encapsulation ||
5094 	    !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5095 		return features;
5096 
5097 	/* It's an encapsulated packet and VxLAN offloads are enabled. We
5098 	 * should disable tunnel offload features if it's not a VxLAN packet,
5099 	 * as tunnel offloads have been enabled only for VxLAN. This is done to
5100 	 * allow other tunneled traffic like GRE work fine while VxLAN
5101 	 * offloads are configured in Skyhawk-R.
5102 	 */
5103 	switch (vlan_get_protocol(skb)) {
5104 	case htons(ETH_P_IP):
5105 		l4_hdr = ip_hdr(skb)->protocol;
5106 		break;
5107 	case htons(ETH_P_IPV6):
5108 		l4_hdr = ipv6_hdr(skb)->nexthdr;
5109 		break;
5110 	default:
5111 		return features;
5112 	}
5113 
5114 	if (l4_hdr != IPPROTO_UDP ||
5115 	    skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5116 	    skb->inner_protocol != htons(ETH_P_TEB) ||
5117 	    skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5118 		sizeof(struct udphdr) + sizeof(struct vxlanhdr) ||
5119 	    !adapter->vxlan_port ||
5120 	    udp_hdr(skb)->dest != adapter->vxlan_port)
5121 		return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
5122 
5123 	return features;
5124 }
5125 
5126 static int be_get_phys_port_id(struct net_device *dev,
5127 			       struct netdev_phys_item_id *ppid)
5128 {
5129 	int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
5130 	struct be_adapter *adapter = netdev_priv(dev);
5131 	u8 *id;
5132 
5133 	if (MAX_PHYS_ITEM_ID_LEN < id_len)
5134 		return -ENOSPC;
5135 
5136 	ppid->id[0] = adapter->hba_port_num + 1;
5137 	id = &ppid->id[1];
5138 	for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
5139 	     i--, id += CNTL_SERIAL_NUM_WORD_SZ)
5140 		memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
5141 
5142 	ppid->id_len = id_len;
5143 
5144 	return 0;
5145 }
5146 
5147 static void be_set_rx_mode(struct net_device *dev)
5148 {
5149 	struct be_adapter *adapter = netdev_priv(dev);
5150 	struct be_cmd_work *work;
5151 
5152 	work = be_alloc_work(adapter, be_work_set_rx_mode);
5153 	if (work)
5154 		queue_work(be_wq, &work->work);
5155 }
5156 
5157 static const struct net_device_ops be_netdev_ops = {
5158 	.ndo_open		= be_open,
5159 	.ndo_stop		= be_close,
5160 	.ndo_start_xmit		= be_xmit,
5161 	.ndo_set_rx_mode	= be_set_rx_mode,
5162 	.ndo_set_mac_address	= be_mac_addr_set,
5163 	.ndo_get_stats64	= be_get_stats64,
5164 	.ndo_validate_addr	= eth_validate_addr,
5165 	.ndo_vlan_rx_add_vid	= be_vlan_add_vid,
5166 	.ndo_vlan_rx_kill_vid	= be_vlan_rem_vid,
5167 	.ndo_set_vf_mac		= be_set_vf_mac,
5168 	.ndo_set_vf_vlan	= be_set_vf_vlan,
5169 	.ndo_set_vf_rate	= be_set_vf_tx_rate,
5170 	.ndo_get_vf_config	= be_get_vf_config,
5171 	.ndo_set_vf_link_state  = be_set_vf_link_state,
5172 	.ndo_set_vf_spoofchk    = be_set_vf_spoofchk,
5173 	.ndo_tx_timeout		= be_tx_timeout,
5174 #ifdef CONFIG_NET_POLL_CONTROLLER
5175 	.ndo_poll_controller	= be_netpoll,
5176 #endif
5177 	.ndo_bridge_setlink	= be_ndo_bridge_setlink,
5178 	.ndo_bridge_getlink	= be_ndo_bridge_getlink,
5179 	.ndo_features_check	= be_features_check,
5180 	.ndo_get_phys_port_id   = be_get_phys_port_id,
5181 };
5182 
5183 static void be_netdev_init(struct net_device *netdev)
5184 {
5185 	struct be_adapter *adapter = netdev_priv(netdev);
5186 
5187 	netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
5188 		NETIF_F_GSO_UDP_TUNNEL |
5189 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
5190 		NETIF_F_HW_VLAN_CTAG_TX;
5191 	if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
5192 		netdev->hw_features |= NETIF_F_RXHASH;
5193 
5194 	netdev->features |= netdev->hw_features |
5195 		NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER |
5196 		NETIF_F_HIGHDMA;
5197 
5198 	netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
5199 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
5200 
5201 	netdev->priv_flags |= IFF_UNICAST_FLT;
5202 
5203 	netdev->flags |= IFF_MULTICAST;
5204 
5205 	netif_set_tso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
5206 
5207 	netdev->netdev_ops = &be_netdev_ops;
5208 
5209 	netdev->ethtool_ops = &be_ethtool_ops;
5210 
5211 	if (!lancer_chip(adapter) && !BEx_chip(adapter) && !be_is_mc(adapter))
5212 		netdev->udp_tunnel_nic_info = &be_udp_tunnels;
5213 
5214 	/* MTU range: 256 - 9000 */
5215 	netdev->min_mtu = BE_MIN_MTU;
5216 	netdev->max_mtu = BE_MAX_MTU;
5217 }
5218 
5219 static void be_cleanup(struct be_adapter *adapter)
5220 {
5221 	struct net_device *netdev = adapter->netdev;
5222 
5223 	rtnl_lock();
5224 	netif_device_detach(netdev);
5225 	if (netif_running(netdev))
5226 		be_close(netdev);
5227 	rtnl_unlock();
5228 
5229 	be_clear(adapter);
5230 }
5231 
5232 static int be_resume(struct be_adapter *adapter)
5233 {
5234 	struct net_device *netdev = adapter->netdev;
5235 	int status;
5236 
5237 	status = be_setup(adapter);
5238 	if (status)
5239 		return status;
5240 
5241 	rtnl_lock();
5242 	if (netif_running(netdev))
5243 		status = be_open(netdev);
5244 	rtnl_unlock();
5245 
5246 	if (status)
5247 		return status;
5248 
5249 	netif_device_attach(netdev);
5250 
5251 	return 0;
5252 }
5253 
5254 static void be_soft_reset(struct be_adapter *adapter)
5255 {
5256 	u32 val;
5257 
5258 	dev_info(&adapter->pdev->dev, "Initiating chip soft reset\n");
5259 	val = ioread32(adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5260 	val |= SLIPORT_SOFTRESET_SR_MASK;
5261 	iowrite32(val, adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5262 }
5263 
5264 static bool be_err_is_recoverable(struct be_adapter *adapter)
5265 {
5266 	struct be_error_recovery *err_rec = &adapter->error_recovery;
5267 	unsigned long initial_idle_time =
5268 		msecs_to_jiffies(ERR_RECOVERY_IDLE_TIME);
5269 	unsigned long recovery_interval =
5270 		msecs_to_jiffies(ERR_RECOVERY_INTERVAL);
5271 	u16 ue_err_code;
5272 	u32 val;
5273 
5274 	val = be_POST_stage_get(adapter);
5275 	if ((val & POST_STAGE_RECOVERABLE_ERR) != POST_STAGE_RECOVERABLE_ERR)
5276 		return false;
5277 	ue_err_code = val & POST_ERR_RECOVERY_CODE_MASK;
5278 	if (ue_err_code == 0)
5279 		return false;
5280 
5281 	dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
5282 		ue_err_code);
5283 
5284 	if (time_before_eq(jiffies - err_rec->probe_time, initial_idle_time)) {
5285 		dev_err(&adapter->pdev->dev,
5286 			"Cannot recover within %lu sec from driver load\n",
5287 			jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
5288 		return false;
5289 	}
5290 
5291 	if (err_rec->last_recovery_time && time_before_eq(
5292 		jiffies - err_rec->last_recovery_time, recovery_interval)) {
5293 		dev_err(&adapter->pdev->dev,
5294 			"Cannot recover within %lu sec from last recovery\n",
5295 			jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
5296 		return false;
5297 	}
5298 
5299 	if (ue_err_code == err_rec->last_err_code) {
5300 		dev_err(&adapter->pdev->dev,
5301 			"Cannot recover from a consecutive TPE error\n");
5302 		return false;
5303 	}
5304 
5305 	err_rec->last_recovery_time = jiffies;
5306 	err_rec->last_err_code = ue_err_code;
5307 	return true;
5308 }
5309 
5310 static int be_tpe_recover(struct be_adapter *adapter)
5311 {
5312 	struct be_error_recovery *err_rec = &adapter->error_recovery;
5313 	int status = -EAGAIN;
5314 	u32 val;
5315 
5316 	switch (err_rec->recovery_state) {
5317 	case ERR_RECOVERY_ST_NONE:
5318 		err_rec->recovery_state = ERR_RECOVERY_ST_DETECT;
5319 		err_rec->resched_delay = ERR_RECOVERY_UE_DETECT_DURATION;
5320 		break;
5321 
5322 	case ERR_RECOVERY_ST_DETECT:
5323 		val = be_POST_stage_get(adapter);
5324 		if ((val & POST_STAGE_RECOVERABLE_ERR) !=
5325 		    POST_STAGE_RECOVERABLE_ERR) {
5326 			dev_err(&adapter->pdev->dev,
5327 				"Unrecoverable HW error detected: 0x%x\n", val);
5328 			status = -EINVAL;
5329 			err_rec->resched_delay = 0;
5330 			break;
5331 		}
5332 
5333 		dev_err(&adapter->pdev->dev, "Recoverable HW error detected\n");
5334 
5335 		/* Only PF0 initiates Chip Soft Reset. But PF0 must wait UE2SR
5336 		 * milliseconds before it checks for final error status in
5337 		 * SLIPORT_SEMAPHORE to determine if recovery criteria is met.
5338 		 * If it does, then PF0 initiates a Soft Reset.
5339 		 */
5340 		if (adapter->pf_num == 0) {
5341 			err_rec->recovery_state = ERR_RECOVERY_ST_RESET;
5342 			err_rec->resched_delay = err_rec->ue_to_reset_time -
5343 					ERR_RECOVERY_UE_DETECT_DURATION;
5344 			break;
5345 		}
5346 
5347 		err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5348 		err_rec->resched_delay = err_rec->ue_to_poll_time -
5349 					ERR_RECOVERY_UE_DETECT_DURATION;
5350 		break;
5351 
5352 	case ERR_RECOVERY_ST_RESET:
5353 		if (!be_err_is_recoverable(adapter)) {
5354 			dev_err(&adapter->pdev->dev,
5355 				"Failed to meet recovery criteria\n");
5356 			status = -EIO;
5357 			err_rec->resched_delay = 0;
5358 			break;
5359 		}
5360 		be_soft_reset(adapter);
5361 		err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5362 		err_rec->resched_delay = err_rec->ue_to_poll_time -
5363 					err_rec->ue_to_reset_time;
5364 		break;
5365 
5366 	case ERR_RECOVERY_ST_PRE_POLL:
5367 		err_rec->recovery_state = ERR_RECOVERY_ST_REINIT;
5368 		err_rec->resched_delay = 0;
5369 		status = 0;			/* done */
5370 		break;
5371 
5372 	default:
5373 		status = -EINVAL;
5374 		err_rec->resched_delay = 0;
5375 		break;
5376 	}
5377 
5378 	return status;
5379 }
5380 
5381 static int be_err_recover(struct be_adapter *adapter)
5382 {
5383 	int status;
5384 
5385 	if (!lancer_chip(adapter)) {
5386 		if (!adapter->error_recovery.recovery_supported ||
5387 		    adapter->priv_flags & BE_DISABLE_TPE_RECOVERY)
5388 			return -EIO;
5389 		status = be_tpe_recover(adapter);
5390 		if (status)
5391 			goto err;
5392 	}
5393 
5394 	/* Wait for adapter to reach quiescent state before
5395 	 * destroying queues
5396 	 */
5397 	status = be_fw_wait_ready(adapter);
5398 	if (status)
5399 		goto err;
5400 
5401 	adapter->flags |= BE_FLAGS_TRY_RECOVERY;
5402 
5403 	be_cleanup(adapter);
5404 
5405 	status = be_resume(adapter);
5406 	if (status)
5407 		goto err;
5408 
5409 	adapter->flags &= ~BE_FLAGS_TRY_RECOVERY;
5410 
5411 err:
5412 	return status;
5413 }
5414 
5415 static void be_err_detection_task(struct work_struct *work)
5416 {
5417 	struct be_error_recovery *err_rec =
5418 			container_of(work, struct be_error_recovery,
5419 				     err_detection_work.work);
5420 	struct be_adapter *adapter =
5421 			container_of(err_rec, struct be_adapter,
5422 				     error_recovery);
5423 	u32 resched_delay = ERR_RECOVERY_DETECTION_DELAY;
5424 	struct device *dev = &adapter->pdev->dev;
5425 	int recovery_status;
5426 
5427 	be_detect_error(adapter);
5428 	if (!be_check_error(adapter, BE_ERROR_HW))
5429 		goto reschedule_task;
5430 
5431 	recovery_status = be_err_recover(adapter);
5432 	if (!recovery_status) {
5433 		err_rec->recovery_retries = 0;
5434 		err_rec->recovery_state = ERR_RECOVERY_ST_NONE;
5435 		dev_info(dev, "Adapter recovery successful\n");
5436 		goto reschedule_task;
5437 	} else if (!lancer_chip(adapter) && err_rec->resched_delay) {
5438 		/* BEx/SH recovery state machine */
5439 		if (adapter->pf_num == 0 &&
5440 		    err_rec->recovery_state > ERR_RECOVERY_ST_DETECT)
5441 			dev_err(&adapter->pdev->dev,
5442 				"Adapter recovery in progress\n");
5443 		resched_delay = err_rec->resched_delay;
5444 		goto reschedule_task;
5445 	} else if (lancer_chip(adapter) && be_virtfn(adapter)) {
5446 		/* For VFs, check if PF have allocated resources
5447 		 * every second.
5448 		 */
5449 		dev_err(dev, "Re-trying adapter recovery\n");
5450 		goto reschedule_task;
5451 	} else if (lancer_chip(adapter) && err_rec->recovery_retries++ <
5452 		   ERR_RECOVERY_MAX_RETRY_COUNT) {
5453 		/* In case of another error during recovery, it takes 30 sec
5454 		 * for adapter to come out of error. Retry error recovery after
5455 		 * this time interval.
5456 		 */
5457 		dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
5458 		resched_delay = ERR_RECOVERY_RETRY_DELAY;
5459 		goto reschedule_task;
5460 	} else {
5461 		dev_err(dev, "Adapter recovery failed\n");
5462 		dev_err(dev, "Please reboot server to recover\n");
5463 	}
5464 
5465 	return;
5466 
5467 reschedule_task:
5468 	be_schedule_err_detection(adapter, resched_delay);
5469 }
5470 
5471 static void be_log_sfp_info(struct be_adapter *adapter)
5472 {
5473 	int status;
5474 
5475 	status = be_cmd_query_sfp_info(adapter);
5476 	if (!status) {
5477 		dev_err(&adapter->pdev->dev,
5478 			"Port %c: %s Vendor: %s part no: %s",
5479 			adapter->port_name,
5480 			be_misconfig_evt_port_state[adapter->phy_state],
5481 			adapter->phy.vendor_name,
5482 			adapter->phy.vendor_pn);
5483 	}
5484 	adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
5485 }
5486 
5487 static void be_worker(struct work_struct *work)
5488 {
5489 	struct be_adapter *adapter =
5490 		container_of(work, struct be_adapter, work.work);
5491 	struct be_rx_obj *rxo;
5492 	int i;
5493 
5494 	if (be_physfn(adapter) &&
5495 	    MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5496 		be_cmd_get_die_temperature(adapter);
5497 
5498 	/* when interrupts are not yet enabled, just reap any pending
5499 	 * mcc completions
5500 	 */
5501 	if (!netif_running(adapter->netdev)) {
5502 		local_bh_disable();
5503 		be_process_mcc(adapter);
5504 		local_bh_enable();
5505 		goto reschedule;
5506 	}
5507 
5508 	if (!adapter->stats_cmd_sent) {
5509 		if (lancer_chip(adapter))
5510 			lancer_cmd_get_pport_stats(adapter,
5511 						   &adapter->stats_cmd);
5512 		else
5513 			be_cmd_get_stats(adapter, &adapter->stats_cmd);
5514 	}
5515 
5516 	for_all_rx_queues(adapter, rxo, i) {
5517 		/* Replenish RX-queues starved due to memory
5518 		 * allocation failures.
5519 		 */
5520 		if (rxo->rx_post_starved)
5521 			be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5522 	}
5523 
5524 	/* EQ-delay update for Skyhawk is done while notifying EQ */
5525 	if (!skyhawk_chip(adapter))
5526 		be_eqd_update(adapter, false);
5527 
5528 	if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
5529 		be_log_sfp_info(adapter);
5530 
5531 reschedule:
5532 	adapter->work_counter++;
5533 	queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
5534 }
5535 
5536 static void be_unmap_pci_bars(struct be_adapter *adapter)
5537 {
5538 	if (adapter->csr)
5539 		pci_iounmap(adapter->pdev, adapter->csr);
5540 	if (adapter->db)
5541 		pci_iounmap(adapter->pdev, adapter->db);
5542 	if (adapter->pcicfg && adapter->pcicfg_mapped)
5543 		pci_iounmap(adapter->pdev, adapter->pcicfg);
5544 }
5545 
5546 static int db_bar(struct be_adapter *adapter)
5547 {
5548 	if (lancer_chip(adapter) || be_virtfn(adapter))
5549 		return 0;
5550 	else
5551 		return 4;
5552 }
5553 
5554 static int be_roce_map_pci_bars(struct be_adapter *adapter)
5555 {
5556 	if (skyhawk_chip(adapter)) {
5557 		adapter->roce_db.size = 4096;
5558 		adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5559 							      db_bar(adapter));
5560 		adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5561 							       db_bar(adapter));
5562 	}
5563 	return 0;
5564 }
5565 
5566 static int be_map_pci_bars(struct be_adapter *adapter)
5567 {
5568 	struct pci_dev *pdev = adapter->pdev;
5569 	u8 __iomem *addr;
5570 	u32 sli_intf;
5571 
5572 	pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5573 	adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5574 				SLI_INTF_FAMILY_SHIFT;
5575 	adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5576 
5577 	if (BEx_chip(adapter) && be_physfn(adapter)) {
5578 		adapter->csr = pci_iomap(pdev, 2, 0);
5579 		if (!adapter->csr)
5580 			return -ENOMEM;
5581 	}
5582 
5583 	addr = pci_iomap(pdev, db_bar(adapter), 0);
5584 	if (!addr)
5585 		goto pci_map_err;
5586 	adapter->db = addr;
5587 
5588 	if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5589 		if (be_physfn(adapter)) {
5590 			/* PCICFG is the 2nd BAR in BE2 */
5591 			addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5592 			if (!addr)
5593 				goto pci_map_err;
5594 			adapter->pcicfg = addr;
5595 			adapter->pcicfg_mapped = true;
5596 		} else {
5597 			adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5598 			adapter->pcicfg_mapped = false;
5599 		}
5600 	}
5601 
5602 	be_roce_map_pci_bars(adapter);
5603 	return 0;
5604 
5605 pci_map_err:
5606 	dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
5607 	be_unmap_pci_bars(adapter);
5608 	return -ENOMEM;
5609 }
5610 
5611 static void be_drv_cleanup(struct be_adapter *adapter)
5612 {
5613 	struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5614 	struct device *dev = &adapter->pdev->dev;
5615 
5616 	if (mem->va)
5617 		dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5618 
5619 	mem = &adapter->rx_filter;
5620 	if (mem->va)
5621 		dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5622 
5623 	mem = &adapter->stats_cmd;
5624 	if (mem->va)
5625 		dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5626 }
5627 
5628 /* Allocate and initialize various fields in be_adapter struct */
5629 static int be_drv_init(struct be_adapter *adapter)
5630 {
5631 	struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5632 	struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5633 	struct be_dma_mem *rx_filter = &adapter->rx_filter;
5634 	struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5635 	struct device *dev = &adapter->pdev->dev;
5636 	int status = 0;
5637 
5638 	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
5639 	mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
5640 						&mbox_mem_alloc->dma,
5641 						GFP_KERNEL);
5642 	if (!mbox_mem_alloc->va)
5643 		return -ENOMEM;
5644 
5645 	mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5646 	mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5647 	mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5648 
5649 	rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5650 	rx_filter->va = dma_alloc_coherent(dev, rx_filter->size,
5651 					   &rx_filter->dma, GFP_KERNEL);
5652 	if (!rx_filter->va) {
5653 		status = -ENOMEM;
5654 		goto free_mbox;
5655 	}
5656 
5657 	if (lancer_chip(adapter))
5658 		stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5659 	else if (BE2_chip(adapter))
5660 		stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5661 	else if (BE3_chip(adapter))
5662 		stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5663 	else
5664 		stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5665 	stats_cmd->va = dma_alloc_coherent(dev, stats_cmd->size,
5666 					   &stats_cmd->dma, GFP_KERNEL);
5667 	if (!stats_cmd->va) {
5668 		status = -ENOMEM;
5669 		goto free_rx_filter;
5670 	}
5671 
5672 	mutex_init(&adapter->mbox_lock);
5673 	mutex_init(&adapter->mcc_lock);
5674 	mutex_init(&adapter->rx_filter_lock);
5675 	spin_lock_init(&adapter->mcc_cq_lock);
5676 	init_completion(&adapter->et_cmd_compl);
5677 
5678 	pci_save_state(adapter->pdev);
5679 
5680 	INIT_DELAYED_WORK(&adapter->work, be_worker);
5681 
5682 	adapter->error_recovery.recovery_state = ERR_RECOVERY_ST_NONE;
5683 	adapter->error_recovery.resched_delay = 0;
5684 	INIT_DELAYED_WORK(&adapter->error_recovery.err_detection_work,
5685 			  be_err_detection_task);
5686 
5687 	adapter->rx_fc = true;
5688 	adapter->tx_fc = true;
5689 
5690 	/* Must be a power of 2 or else MODULO will BUG_ON */
5691 	adapter->be_get_temp_freq = 64;
5692 
5693 	return 0;
5694 
5695 free_rx_filter:
5696 	dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5697 free_mbox:
5698 	dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5699 			  mbox_mem_alloc->dma);
5700 	return status;
5701 }
5702 
5703 static void be_remove(struct pci_dev *pdev)
5704 {
5705 	struct be_adapter *adapter = pci_get_drvdata(pdev);
5706 
5707 	if (!adapter)
5708 		return;
5709 
5710 	be_roce_dev_remove(adapter);
5711 	be_intr_set(adapter, false);
5712 
5713 	be_cancel_err_detection(adapter);
5714 
5715 	unregister_netdev(adapter->netdev);
5716 
5717 	be_clear(adapter);
5718 
5719 	if (!pci_vfs_assigned(adapter->pdev))
5720 		be_cmd_reset_function(adapter);
5721 
5722 	/* tell fw we're done with firing cmds */
5723 	be_cmd_fw_clean(adapter);
5724 
5725 	be_unmap_pci_bars(adapter);
5726 	be_drv_cleanup(adapter);
5727 
5728 	pci_release_regions(pdev);
5729 	pci_disable_device(pdev);
5730 
5731 	free_netdev(adapter->netdev);
5732 }
5733 
5734 static ssize_t be_hwmon_show_temp(struct device *dev,
5735 				  struct device_attribute *dev_attr,
5736 				  char *buf)
5737 {
5738 	struct be_adapter *adapter = dev_get_drvdata(dev);
5739 
5740 	/* Unit: millidegree Celsius */
5741 	if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5742 		return -EIO;
5743 	else
5744 		return sprintf(buf, "%u\n",
5745 			       adapter->hwmon_info.be_on_die_temp * 1000);
5746 }
5747 
5748 static SENSOR_DEVICE_ATTR(temp1_input, 0444,
5749 			  be_hwmon_show_temp, NULL, 1);
5750 
5751 static struct attribute *be_hwmon_attrs[] = {
5752 	&sensor_dev_attr_temp1_input.dev_attr.attr,
5753 	NULL
5754 };
5755 
5756 ATTRIBUTE_GROUPS(be_hwmon);
5757 
5758 static char *mc_name(struct be_adapter *adapter)
5759 {
5760 	char *str = "";	/* default */
5761 
5762 	switch (adapter->mc_type) {
5763 	case UMC:
5764 		str = "UMC";
5765 		break;
5766 	case FLEX10:
5767 		str = "FLEX10";
5768 		break;
5769 	case vNIC1:
5770 		str = "vNIC-1";
5771 		break;
5772 	case nPAR:
5773 		str = "nPAR";
5774 		break;
5775 	case UFP:
5776 		str = "UFP";
5777 		break;
5778 	case vNIC2:
5779 		str = "vNIC-2";
5780 		break;
5781 	default:
5782 		str = "";
5783 	}
5784 
5785 	return str;
5786 }
5787 
5788 static inline char *func_name(struct be_adapter *adapter)
5789 {
5790 	return be_physfn(adapter) ? "PF" : "VF";
5791 }
5792 
5793 static inline char *nic_name(struct pci_dev *pdev)
5794 {
5795 	switch (pdev->device) {
5796 	case OC_DEVICE_ID1:
5797 		return OC_NAME;
5798 	case OC_DEVICE_ID2:
5799 		return OC_NAME_BE;
5800 	case OC_DEVICE_ID3:
5801 	case OC_DEVICE_ID4:
5802 		return OC_NAME_LANCER;
5803 	case BE_DEVICE_ID2:
5804 		return BE3_NAME;
5805 	case OC_DEVICE_ID5:
5806 	case OC_DEVICE_ID6:
5807 		return OC_NAME_SH;
5808 	default:
5809 		return BE_NAME;
5810 	}
5811 }
5812 
5813 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
5814 {
5815 	struct be_adapter *adapter;
5816 	struct net_device *netdev;
5817 	int status = 0;
5818 
5819 	status = pci_enable_device(pdev);
5820 	if (status)
5821 		goto do_none;
5822 
5823 	status = pci_request_regions(pdev, DRV_NAME);
5824 	if (status)
5825 		goto disable_dev;
5826 	pci_set_master(pdev);
5827 
5828 	netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
5829 	if (!netdev) {
5830 		status = -ENOMEM;
5831 		goto rel_reg;
5832 	}
5833 	adapter = netdev_priv(netdev);
5834 	adapter->pdev = pdev;
5835 	pci_set_drvdata(pdev, adapter);
5836 	adapter->netdev = netdev;
5837 	SET_NETDEV_DEV(netdev, &pdev->dev);
5838 
5839 	status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
5840 	if (status) {
5841 		dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5842 		goto free_netdev;
5843 	}
5844 
5845 	status = be_map_pci_bars(adapter);
5846 	if (status)
5847 		goto free_netdev;
5848 
5849 	status = be_drv_init(adapter);
5850 	if (status)
5851 		goto unmap_bars;
5852 
5853 	status = be_setup(adapter);
5854 	if (status)
5855 		goto drv_cleanup;
5856 
5857 	be_netdev_init(netdev);
5858 	status = register_netdev(netdev);
5859 	if (status != 0)
5860 		goto unsetup;
5861 
5862 	be_roce_dev_add(adapter);
5863 
5864 	be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
5865 	adapter->error_recovery.probe_time = jiffies;
5866 
5867 	/* On Die temperature not supported for VF. */
5868 	if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
5869 		adapter->hwmon_info.hwmon_dev =
5870 			devm_hwmon_device_register_with_groups(&pdev->dev,
5871 							       DRV_NAME,
5872 							       adapter,
5873 							       be_hwmon_groups);
5874 		adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5875 	}
5876 
5877 	dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
5878 		 func_name(adapter), mc_name(adapter), adapter->port_name);
5879 
5880 	return 0;
5881 
5882 unsetup:
5883 	be_clear(adapter);
5884 drv_cleanup:
5885 	be_drv_cleanup(adapter);
5886 unmap_bars:
5887 	be_unmap_pci_bars(adapter);
5888 free_netdev:
5889 	free_netdev(netdev);
5890 rel_reg:
5891 	pci_release_regions(pdev);
5892 disable_dev:
5893 	pci_disable_device(pdev);
5894 do_none:
5895 	dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
5896 	return status;
5897 }
5898 
5899 static int __maybe_unused be_suspend(struct device *dev_d)
5900 {
5901 	struct be_adapter *adapter = dev_get_drvdata(dev_d);
5902 
5903 	be_intr_set(adapter, false);
5904 	be_cancel_err_detection(adapter);
5905 
5906 	be_cleanup(adapter);
5907 
5908 	return 0;
5909 }
5910 
5911 static int __maybe_unused be_pci_resume(struct device *dev_d)
5912 {
5913 	struct be_adapter *adapter = dev_get_drvdata(dev_d);
5914 	int status = 0;
5915 
5916 	status = be_resume(adapter);
5917 	if (status)
5918 		return status;
5919 
5920 	be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
5921 
5922 	return 0;
5923 }
5924 
5925 /*
5926  * An FLR will stop BE from DMAing any data.
5927  */
5928 static void be_shutdown(struct pci_dev *pdev)
5929 {
5930 	struct be_adapter *adapter = pci_get_drvdata(pdev);
5931 
5932 	if (!adapter)
5933 		return;
5934 
5935 	be_roce_dev_shutdown(adapter);
5936 	cancel_delayed_work_sync(&adapter->work);
5937 	be_cancel_err_detection(adapter);
5938 
5939 	netif_device_detach(adapter->netdev);
5940 
5941 	be_cmd_reset_function(adapter);
5942 
5943 	pci_disable_device(pdev);
5944 }
5945 
5946 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
5947 					    pci_channel_state_t state)
5948 {
5949 	struct be_adapter *adapter = pci_get_drvdata(pdev);
5950 
5951 	dev_err(&adapter->pdev->dev, "EEH error detected\n");
5952 
5953 	be_roce_dev_remove(adapter);
5954 
5955 	if (!be_check_error(adapter, BE_ERROR_EEH)) {
5956 		be_set_error(adapter, BE_ERROR_EEH);
5957 
5958 		be_cancel_err_detection(adapter);
5959 
5960 		be_cleanup(adapter);
5961 	}
5962 
5963 	if (state == pci_channel_io_perm_failure)
5964 		return PCI_ERS_RESULT_DISCONNECT;
5965 
5966 	pci_disable_device(pdev);
5967 
5968 	/* The error could cause the FW to trigger a flash debug dump.
5969 	 * Resetting the card while flash dump is in progress
5970 	 * can cause it not to recover; wait for it to finish.
5971 	 * Wait only for first function as it is needed only once per
5972 	 * adapter.
5973 	 */
5974 	if (pdev->devfn == 0)
5975 		ssleep(30);
5976 
5977 	return PCI_ERS_RESULT_NEED_RESET;
5978 }
5979 
5980 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5981 {
5982 	struct be_adapter *adapter = pci_get_drvdata(pdev);
5983 	int status;
5984 
5985 	dev_info(&adapter->pdev->dev, "EEH reset\n");
5986 
5987 	status = pci_enable_device(pdev);
5988 	if (status)
5989 		return PCI_ERS_RESULT_DISCONNECT;
5990 
5991 	pci_set_master(pdev);
5992 	pci_restore_state(pdev);
5993 
5994 	/* Check if card is ok and fw is ready */
5995 	dev_info(&adapter->pdev->dev,
5996 		 "Waiting for FW to be ready after EEH reset\n");
5997 	status = be_fw_wait_ready(adapter);
5998 	if (status)
5999 		return PCI_ERS_RESULT_DISCONNECT;
6000 
6001 	be_clear_error(adapter, BE_CLEAR_ALL);
6002 	return PCI_ERS_RESULT_RECOVERED;
6003 }
6004 
6005 static void be_eeh_resume(struct pci_dev *pdev)
6006 {
6007 	int status = 0;
6008 	struct be_adapter *adapter = pci_get_drvdata(pdev);
6009 
6010 	dev_info(&adapter->pdev->dev, "EEH resume\n");
6011 
6012 	pci_save_state(pdev);
6013 
6014 	status = be_resume(adapter);
6015 	if (status)
6016 		goto err;
6017 
6018 	be_roce_dev_add(adapter);
6019 
6020 	be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
6021 	return;
6022 err:
6023 	dev_err(&adapter->pdev->dev, "EEH resume failed\n");
6024 }
6025 
6026 static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
6027 {
6028 	struct be_adapter *adapter = pci_get_drvdata(pdev);
6029 	struct be_resources vft_res = {0};
6030 	int status;
6031 
6032 	if (!num_vfs)
6033 		be_vf_clear(adapter);
6034 
6035 	adapter->num_vfs = num_vfs;
6036 
6037 	if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
6038 		dev_warn(&pdev->dev,
6039 			 "Cannot disable VFs while they are assigned\n");
6040 		return -EBUSY;
6041 	}
6042 
6043 	/* When the HW is in SRIOV capable configuration, the PF-pool resources
6044 	 * are equally distributed across the max-number of VFs. The user may
6045 	 * request only a subset of the max-vfs to be enabled.
6046 	 * Based on num_vfs, redistribute the resources across num_vfs so that
6047 	 * each VF will have access to more number of resources.
6048 	 * This facility is not available in BE3 FW.
6049 	 * Also, this is done by FW in Lancer chip.
6050 	 */
6051 	if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
6052 		be_calculate_vf_res(adapter, adapter->num_vfs,
6053 				    &vft_res);
6054 		status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
6055 						 adapter->num_vfs, &vft_res);
6056 		if (status)
6057 			dev_err(&pdev->dev,
6058 				"Failed to optimize SR-IOV resources\n");
6059 	}
6060 
6061 	status = be_get_resources(adapter);
6062 	if (status)
6063 		return be_cmd_status(status);
6064 
6065 	/* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6066 	rtnl_lock();
6067 	status = be_update_queues(adapter);
6068 	rtnl_unlock();
6069 	if (status)
6070 		return be_cmd_status(status);
6071 
6072 	if (adapter->num_vfs)
6073 		status = be_vf_setup(adapter);
6074 
6075 	if (!status)
6076 		return adapter->num_vfs;
6077 
6078 	return 0;
6079 }
6080 
6081 static const struct pci_error_handlers be_eeh_handlers = {
6082 	.error_detected = be_eeh_err_detected,
6083 	.slot_reset = be_eeh_reset,
6084 	.resume = be_eeh_resume,
6085 };
6086 
6087 static SIMPLE_DEV_PM_OPS(be_pci_pm_ops, be_suspend, be_pci_resume);
6088 
6089 static struct pci_driver be_driver = {
6090 	.name = DRV_NAME,
6091 	.id_table = be_dev_ids,
6092 	.probe = be_probe,
6093 	.remove = be_remove,
6094 	.driver.pm = &be_pci_pm_ops,
6095 	.shutdown = be_shutdown,
6096 	.sriov_configure = be_pci_sriov_configure,
6097 	.err_handler = &be_eeh_handlers
6098 };
6099 
6100 static int __init be_init_module(void)
6101 {
6102 	int status;
6103 
6104 	if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6105 	    rx_frag_size != 2048) {
6106 		printk(KERN_WARNING DRV_NAME
6107 			" : Module param rx_frag_size must be 2048/4096/8192."
6108 			" Using 2048\n");
6109 		rx_frag_size = 2048;
6110 	}
6111 
6112 	if (num_vfs > 0) {
6113 		pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6114 		pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6115 	}
6116 
6117 	be_wq = create_singlethread_workqueue("be_wq");
6118 	if (!be_wq) {
6119 		pr_warn(DRV_NAME "workqueue creation failed\n");
6120 		return -1;
6121 	}
6122 
6123 	be_err_recovery_workq =
6124 		create_singlethread_workqueue("be_err_recover");
6125 	if (!be_err_recovery_workq)
6126 		pr_warn(DRV_NAME "Could not create error recovery workqueue\n");
6127 
6128 	status = pci_register_driver(&be_driver);
6129 	if (status) {
6130 		destroy_workqueue(be_wq);
6131 		be_destroy_err_recovery_workq();
6132 	}
6133 	return status;
6134 }
6135 module_init(be_init_module);
6136 
6137 static void __exit be_exit_module(void)
6138 {
6139 	pci_unregister_driver(&be_driver);
6140 
6141 	be_destroy_err_recovery_workq();
6142 
6143 	if (be_wq)
6144 		destroy_workqueue(be_wq);
6145 }
6146 module_exit(be_exit_module);
6147