1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17 
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 
24 MODULE_VERSION(DRV_VER);
25 MODULE_DEVICE_TABLE(pci, be_dev_ids);
26 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27 MODULE_AUTHOR("ServerEngines Corporation");
28 MODULE_LICENSE("GPL");
29 
30 static unsigned int num_vfs;
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
33 
34 static ushort rx_frag_size = 2048;
35 module_param(rx_frag_size, ushort, S_IRUGO);
36 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37 
38 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
39 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
40 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
41 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
43 	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
44 	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
45 	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
46 	{ 0 }
47 };
48 MODULE_DEVICE_TABLE(pci, be_dev_ids);
49 /* UE Status Low CSR */
50 static const char * const ue_status_low_desc[] = {
51 	"CEV",
52 	"CTX",
53 	"DBUF",
54 	"ERX",
55 	"Host",
56 	"MPU",
57 	"NDMA",
58 	"PTC ",
59 	"RDMA ",
60 	"RXF ",
61 	"RXIPS ",
62 	"RXULP0 ",
63 	"RXULP1 ",
64 	"RXULP2 ",
65 	"TIM ",
66 	"TPOST ",
67 	"TPRE ",
68 	"TXIPS ",
69 	"TXULP0 ",
70 	"TXULP1 ",
71 	"UC ",
72 	"WDMA ",
73 	"TXULP2 ",
74 	"HOST1 ",
75 	"P0_OB_LINK ",
76 	"P1_OB_LINK ",
77 	"HOST_GPIO ",
78 	"MBOX ",
79 	"AXGMAC0",
80 	"AXGMAC1",
81 	"JTAG",
82 	"MPU_INTPEND"
83 };
84 /* UE Status High CSR */
85 static const char * const ue_status_hi_desc[] = {
86 	"LPCMEMHOST",
87 	"MGMT_MAC",
88 	"PCS0ONLINE",
89 	"MPU_IRAM",
90 	"PCS1ONLINE",
91 	"PCTL0",
92 	"PCTL1",
93 	"PMEM",
94 	"RR",
95 	"TXPB",
96 	"RXPP",
97 	"XAUI",
98 	"TXP",
99 	"ARM",
100 	"IPC",
101 	"HOST2",
102 	"HOST3",
103 	"HOST4",
104 	"HOST5",
105 	"HOST6",
106 	"HOST7",
107 	"HOST8",
108 	"HOST9",
109 	"NETC",
110 	"Unknown",
111 	"Unknown",
112 	"Unknown",
113 	"Unknown",
114 	"Unknown",
115 	"Unknown",
116 	"Unknown",
117 	"Unknown"
118 };
119 
120 /* Is BE in a multi-channel mode */
121 static inline bool be_is_mc(struct be_adapter *adapter) {
122 	return (adapter->function_mode & FLEX10_MODE ||
123 		adapter->function_mode & VNIC_MODE ||
124 		adapter->function_mode & UMC_ENABLED);
125 }
126 
127 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128 {
129 	struct be_dma_mem *mem = &q->dma_mem;
130 	if (mem->va) {
131 		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 				  mem->dma);
133 		mem->va = NULL;
134 	}
135 }
136 
137 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 		u16 len, u16 entry_size)
139 {
140 	struct be_dma_mem *mem = &q->dma_mem;
141 
142 	memset(q, 0, sizeof(*q));
143 	q->len = len;
144 	q->entry_size = entry_size;
145 	mem->size = len * entry_size;
146 	mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 				     GFP_KERNEL);
148 	if (!mem->va)
149 		return -ENOMEM;
150 	memset(mem->va, 0, mem->size);
151 	return 0;
152 }
153 
154 static void be_intr_set(struct be_adapter *adapter, bool enable)
155 {
156 	u32 reg, enabled;
157 
158 	if (adapter->eeh_error)
159 		return;
160 
161 	pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 				&reg);
163 	enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164 
165 	if (!enabled && enable)
166 		reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167 	else if (enabled && !enable)
168 		reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169 	else
170 		return;
171 
172 	pci_write_config_dword(adapter->pdev,
173 			PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
174 }
175 
176 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
177 {
178 	u32 val = 0;
179 	val |= qid & DB_RQ_RING_ID_MASK;
180 	val |= posted << DB_RQ_NUM_POSTED_SHIFT;
181 
182 	wmb();
183 	iowrite32(val, adapter->db + DB_RQ_OFFSET);
184 }
185 
186 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
187 {
188 	u32 val = 0;
189 	val |= qid & DB_TXULP_RING_ID_MASK;
190 	val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
191 
192 	wmb();
193 	iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
194 }
195 
196 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
197 		bool arm, bool clear_int, u16 num_popped)
198 {
199 	u32 val = 0;
200 	val |= qid & DB_EQ_RING_ID_MASK;
201 	val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 			DB_EQ_RING_ID_EXT_MASK_SHIFT);
203 
204 	if (adapter->eeh_error)
205 		return;
206 
207 	if (arm)
208 		val |= 1 << DB_EQ_REARM_SHIFT;
209 	if (clear_int)
210 		val |= 1 << DB_EQ_CLR_SHIFT;
211 	val |= 1 << DB_EQ_EVNT_SHIFT;
212 	val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
213 	iowrite32(val, adapter->db + DB_EQ_OFFSET);
214 }
215 
216 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
217 {
218 	u32 val = 0;
219 	val |= qid & DB_CQ_RING_ID_MASK;
220 	val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 			DB_CQ_RING_ID_EXT_MASK_SHIFT);
222 
223 	if (adapter->eeh_error)
224 		return;
225 
226 	if (arm)
227 		val |= 1 << DB_CQ_REARM_SHIFT;
228 	val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
229 	iowrite32(val, adapter->db + DB_CQ_OFFSET);
230 }
231 
232 static int be_mac_addr_set(struct net_device *netdev, void *p)
233 {
234 	struct be_adapter *adapter = netdev_priv(netdev);
235 	struct sockaddr *addr = p;
236 	int status = 0;
237 	u8 current_mac[ETH_ALEN];
238 	u32 pmac_id = adapter->pmac_id[0];
239 
240 	if (!is_valid_ether_addr(addr->sa_data))
241 		return -EADDRNOTAVAIL;
242 
243 	status = be_cmd_mac_addr_query(adapter, current_mac,
244 				MAC_ADDRESS_TYPE_NETWORK, false,
245 				adapter->if_handle, 0);
246 	if (status)
247 		goto err;
248 
249 	if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 		status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
251 				adapter->if_handle, &adapter->pmac_id[0], 0);
252 		if (status)
253 			goto err;
254 
255 		be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 	}
257 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 	return 0;
259 err:
260 	dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
261 	return status;
262 }
263 
264 static void populate_be2_stats(struct be_adapter *adapter)
265 {
266 	struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 	struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
269 	struct be_port_rxf_stats_v0 *port_stats =
270 					&rxf_stats->port[adapter->port_num];
271 	struct be_drv_stats *drvs = &adapter->drv_stats;
272 
273 	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
274 	drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 	drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 	drvs->rx_control_frames = port_stats->rx_control_frames;
277 	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 	drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
288 	drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
289 	drvs->rx_dropped_header_too_small =
290 		port_stats->rx_dropped_header_too_small;
291 	drvs->rx_address_mismatch_drops =
292 					port_stats->rx_address_mismatch_drops +
293 					port_stats->rx_vlan_mismatch_drops;
294 	drvs->rx_alignment_symbol_errors =
295 		port_stats->rx_alignment_symbol_errors;
296 
297 	drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 	drvs->tx_controlframes = port_stats->tx_controlframes;
299 
300 	if (adapter->port_num)
301 		drvs->jabber_events = rxf_stats->port1_jabber_events;
302 	else
303 		drvs->jabber_events = rxf_stats->port0_jabber_events;
304 	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
305 	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
306 	drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
308 	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
310 	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311 }
312 
313 static void populate_be3_stats(struct be_adapter *adapter)
314 {
315 	struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 	struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
318 	struct be_port_rxf_stats_v1 *port_stats =
319 					&rxf_stats->port[adapter->port_num];
320 	struct be_drv_stats *drvs = &adapter->drv_stats;
321 
322 	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
323 	drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 	drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
325 	drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 	drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 	drvs->rx_control_frames = port_stats->rx_control_frames;
328 	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 	drvs->rx_dropped_header_too_small =
339 		port_stats->rx_dropped_header_too_small;
340 	drvs->rx_input_fifo_overflow_drop =
341 		port_stats->rx_input_fifo_overflow_drop;
342 	drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
343 	drvs->rx_alignment_symbol_errors =
344 		port_stats->rx_alignment_symbol_errors;
345 	drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
346 	drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 	drvs->tx_controlframes = port_stats->tx_controlframes;
348 	drvs->jabber_events = port_stats->jabber_events;
349 	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
350 	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
351 	drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
353 	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
355 	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356 }
357 
358 static void populate_lancer_stats(struct be_adapter *adapter)
359 {
360 
361 	struct be_drv_stats *drvs = &adapter->drv_stats;
362 	struct lancer_pport_stats *pport_stats =
363 					pport_stats_from_cmd(adapter);
364 
365 	be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 	drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 	drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 	drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
369 	drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
370 	drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
371 	drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 	drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 	drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 	drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 	drvs->rx_dropped_tcp_length =
376 				pport_stats->rx_dropped_invalid_tcp_length;
377 	drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 	drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 	drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 	drvs->rx_dropped_header_too_small =
381 				pport_stats->rx_dropped_header_too_small;
382 	drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383 	drvs->rx_address_mismatch_drops =
384 					pport_stats->rx_address_mismatch_drops +
385 					pport_stats->rx_vlan_mismatch_drops;
386 	drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
387 	drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
388 	drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 	drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
390 	drvs->jabber_events = pport_stats->rx_jabbers;
391 	drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 	drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
393 	drvs->rx_drops_too_many_frags =
394 				pport_stats->rx_drops_too_many_frags_lo;
395 }
396 
397 static void accumulate_16bit_val(u32 *acc, u16 val)
398 {
399 #define lo(x)			(x & 0xFFFF)
400 #define hi(x)			(x & 0xFFFF0000)
401 	bool wrapped = val < lo(*acc);
402 	u32 newacc = hi(*acc) + val;
403 
404 	if (wrapped)
405 		newacc += 65536;
406 	ACCESS_ONCE(*acc) = newacc;
407 }
408 
409 void be_parse_stats(struct be_adapter *adapter)
410 {
411 	struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 	struct be_rx_obj *rxo;
413 	int i;
414 
415 	if (adapter->generation == BE_GEN3) {
416 		if (lancer_chip(adapter))
417 			populate_lancer_stats(adapter);
418 		 else
419 			populate_be3_stats(adapter);
420 	} else {
421 		populate_be2_stats(adapter);
422 	}
423 
424 	if (lancer_chip(adapter))
425 		goto done;
426 
427 	/* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
428 	for_all_rx_queues(adapter, rxo, i) {
429 		/* below erx HW counter can actually wrap around after
430 		 * 65535. Driver accumulates a 32-bit value
431 		 */
432 		accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 				(u16)erx->rx_drops_no_fragments[rxo->q.id]);
434 	}
435 done:
436 	return;
437 }
438 
439 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 					struct rtnl_link_stats64 *stats)
441 {
442 	struct be_adapter *adapter = netdev_priv(netdev);
443 	struct be_drv_stats *drvs = &adapter->drv_stats;
444 	struct be_rx_obj *rxo;
445 	struct be_tx_obj *txo;
446 	u64 pkts, bytes;
447 	unsigned int start;
448 	int i;
449 
450 	for_all_rx_queues(adapter, rxo, i) {
451 		const struct be_rx_stats *rx_stats = rx_stats(rxo);
452 		do {
453 			start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 			pkts = rx_stats(rxo)->rx_pkts;
455 			bytes = rx_stats(rxo)->rx_bytes;
456 		} while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 		stats->rx_packets += pkts;
458 		stats->rx_bytes += bytes;
459 		stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 		stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 					rx_stats(rxo)->rx_drops_no_frags;
462 	}
463 
464 	for_all_tx_queues(adapter, txo, i) {
465 		const struct be_tx_stats *tx_stats = tx_stats(txo);
466 		do {
467 			start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 			pkts = tx_stats(txo)->tx_pkts;
469 			bytes = tx_stats(txo)->tx_bytes;
470 		} while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 		stats->tx_packets += pkts;
472 		stats->tx_bytes += bytes;
473 	}
474 
475 	/* bad pkts received */
476 	stats->rx_errors = drvs->rx_crc_errors +
477 		drvs->rx_alignment_symbol_errors +
478 		drvs->rx_in_range_errors +
479 		drvs->rx_out_range_errors +
480 		drvs->rx_frame_too_long +
481 		drvs->rx_dropped_too_small +
482 		drvs->rx_dropped_too_short +
483 		drvs->rx_dropped_header_too_small +
484 		drvs->rx_dropped_tcp_length +
485 		drvs->rx_dropped_runt;
486 
487 	/* detailed rx errors */
488 	stats->rx_length_errors = drvs->rx_in_range_errors +
489 		drvs->rx_out_range_errors +
490 		drvs->rx_frame_too_long;
491 
492 	stats->rx_crc_errors = drvs->rx_crc_errors;
493 
494 	/* frame alignment errors */
495 	stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
496 
497 	/* receiver fifo overrun */
498 	/* drops_no_pbuf is no per i/f, it's per BE card */
499 	stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
500 				drvs->rx_input_fifo_overflow_drop +
501 				drvs->rx_drops_no_pbuf;
502 	return stats;
503 }
504 
505 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
506 {
507 	struct net_device *netdev = adapter->netdev;
508 
509 	if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
510 		netif_carrier_off(netdev);
511 		adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
512 	}
513 
514 	if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 		netif_carrier_on(netdev);
516 	else
517 		netif_carrier_off(netdev);
518 }
519 
520 static void be_tx_stats_update(struct be_tx_obj *txo,
521 			u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
522 {
523 	struct be_tx_stats *stats = tx_stats(txo);
524 
525 	u64_stats_update_begin(&stats->sync);
526 	stats->tx_reqs++;
527 	stats->tx_wrbs += wrb_cnt;
528 	stats->tx_bytes += copied;
529 	stats->tx_pkts += (gso_segs ? gso_segs : 1);
530 	if (stopped)
531 		stats->tx_stops++;
532 	u64_stats_update_end(&stats->sync);
533 }
534 
535 /* Determine number of WRB entries needed to xmit data in an skb */
536 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537 								bool *dummy)
538 {
539 	int cnt = (skb->len > skb->data_len);
540 
541 	cnt += skb_shinfo(skb)->nr_frags;
542 
543 	/* to account for hdr wrb */
544 	cnt++;
545 	if (lancer_chip(adapter) || !(cnt & 1)) {
546 		*dummy = false;
547 	} else {
548 		/* add a dummy to make it an even num */
549 		cnt++;
550 		*dummy = true;
551 	}
552 	BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553 	return cnt;
554 }
555 
556 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557 {
558 	wrb->frag_pa_hi = upper_32_bits(addr);
559 	wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 	wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561 	wrb->rsvd0 = 0;
562 }
563 
564 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
565 					struct sk_buff *skb)
566 {
567 	u8 vlan_prio;
568 	u16 vlan_tag;
569 
570 	vlan_tag = vlan_tx_tag_get(skb);
571 	vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
572 	/* If vlan priority provided by OS is NOT in available bmap */
573 	if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
574 		vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
575 				adapter->recommended_prio;
576 
577 	return vlan_tag;
578 }
579 
580 static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
581 {
582 	return vlan_tx_tag_present(skb) || adapter->pvid;
583 }
584 
585 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
586 		struct sk_buff *skb, u32 wrb_cnt, u32 len)
587 {
588 	u16 vlan_tag;
589 
590 	memset(hdr, 0, sizeof(*hdr));
591 
592 	AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
593 
594 	if (skb_is_gso(skb)) {
595 		AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
596 		AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
597 			hdr, skb_shinfo(skb)->gso_size);
598 		if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
599 			AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
600 		if (lancer_chip(adapter) && adapter->sli_family  ==
601 							LANCER_A0_SLI_FAMILY) {
602 			AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
603 			if (is_tcp_pkt(skb))
604 				AMAP_SET_BITS(struct amap_eth_hdr_wrb,
605 								tcpcs, hdr, 1);
606 			else if (is_udp_pkt(skb))
607 				AMAP_SET_BITS(struct amap_eth_hdr_wrb,
608 								udpcs, hdr, 1);
609 		}
610 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
611 		if (is_tcp_pkt(skb))
612 			AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
613 		else if (is_udp_pkt(skb))
614 			AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
615 	}
616 
617 	if (vlan_tx_tag_present(skb)) {
618 		AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
619 		vlan_tag = be_get_tx_vlan_tag(adapter, skb);
620 		AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
621 	}
622 
623 	AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
624 	AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
625 	AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
626 	AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
627 }
628 
629 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
630 		bool unmap_single)
631 {
632 	dma_addr_t dma;
633 
634 	be_dws_le_to_cpu(wrb, sizeof(*wrb));
635 
636 	dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
637 	if (wrb->frag_len) {
638 		if (unmap_single)
639 			dma_unmap_single(dev, dma, wrb->frag_len,
640 					 DMA_TO_DEVICE);
641 		else
642 			dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
643 	}
644 }
645 
646 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
647 		struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
648 {
649 	dma_addr_t busaddr;
650 	int i, copied = 0;
651 	struct device *dev = &adapter->pdev->dev;
652 	struct sk_buff *first_skb = skb;
653 	struct be_eth_wrb *wrb;
654 	struct be_eth_hdr_wrb *hdr;
655 	bool map_single = false;
656 	u16 map_head;
657 
658 	hdr = queue_head_node(txq);
659 	queue_head_inc(txq);
660 	map_head = txq->head;
661 
662 	if (skb->len > skb->data_len) {
663 		int len = skb_headlen(skb);
664 		busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
665 		if (dma_mapping_error(dev, busaddr))
666 			goto dma_err;
667 		map_single = true;
668 		wrb = queue_head_node(txq);
669 		wrb_fill(wrb, busaddr, len);
670 		be_dws_cpu_to_le(wrb, sizeof(*wrb));
671 		queue_head_inc(txq);
672 		copied += len;
673 	}
674 
675 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
676 		const struct skb_frag_struct *frag =
677 			&skb_shinfo(skb)->frags[i];
678 		busaddr = skb_frag_dma_map(dev, frag, 0,
679 					   skb_frag_size(frag), DMA_TO_DEVICE);
680 		if (dma_mapping_error(dev, busaddr))
681 			goto dma_err;
682 		wrb = queue_head_node(txq);
683 		wrb_fill(wrb, busaddr, skb_frag_size(frag));
684 		be_dws_cpu_to_le(wrb, sizeof(*wrb));
685 		queue_head_inc(txq);
686 		copied += skb_frag_size(frag);
687 	}
688 
689 	if (dummy_wrb) {
690 		wrb = queue_head_node(txq);
691 		wrb_fill(wrb, 0, 0);
692 		be_dws_cpu_to_le(wrb, sizeof(*wrb));
693 		queue_head_inc(txq);
694 	}
695 
696 	wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
697 	be_dws_cpu_to_le(hdr, sizeof(*hdr));
698 
699 	return copied;
700 dma_err:
701 	txq->head = map_head;
702 	while (copied) {
703 		wrb = queue_head_node(txq);
704 		unmap_tx_frag(dev, wrb, map_single);
705 		map_single = false;
706 		copied -= wrb->frag_len;
707 		queue_head_inc(txq);
708 	}
709 	return 0;
710 }
711 
712 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
713 					     struct sk_buff *skb)
714 {
715 	u16 vlan_tag = 0;
716 
717 	skb = skb_share_check(skb, GFP_ATOMIC);
718 	if (unlikely(!skb))
719 		return skb;
720 
721 	if (vlan_tx_tag_present(skb)) {
722 		vlan_tag = be_get_tx_vlan_tag(adapter, skb);
723 		__vlan_put_tag(skb, vlan_tag);
724 		skb->vlan_tci = 0;
725 	}
726 
727 	return skb;
728 }
729 
730 static netdev_tx_t be_xmit(struct sk_buff *skb,
731 			struct net_device *netdev)
732 {
733 	struct be_adapter *adapter = netdev_priv(netdev);
734 	struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
735 	struct be_queue_info *txq = &txo->q;
736 	struct iphdr *ip = NULL;
737 	u32 wrb_cnt = 0, copied = 0;
738 	u32 start = txq->head, eth_hdr_len;
739 	bool dummy_wrb, stopped = false;
740 
741 	eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
742 		VLAN_ETH_HLEN : ETH_HLEN;
743 
744 	/* HW has a bug which considers padding bytes as legal
745 	 * and modifies the IPv4 hdr's 'tot_len' field
746 	 */
747 	if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
748 			is_ipv4_pkt(skb)) {
749 		ip = (struct iphdr *)ip_hdr(skb);
750 		pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
751 	}
752 
753 	/* HW has a bug wherein it will calculate CSUM for VLAN
754 	 * pkts even though it is disabled.
755 	 * Manually insert VLAN in pkt.
756 	 */
757 	if (skb->ip_summed != CHECKSUM_PARTIAL &&
758 			be_vlan_tag_chk(adapter, skb)) {
759 		skb = be_insert_vlan_in_pkt(adapter, skb);
760 		if (unlikely(!skb))
761 			goto tx_drop;
762 	}
763 
764 	wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
765 
766 	copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
767 	if (copied) {
768 		int gso_segs = skb_shinfo(skb)->gso_segs;
769 
770 		/* record the sent skb in the sent_skb table */
771 		BUG_ON(txo->sent_skb_list[start]);
772 		txo->sent_skb_list[start] = skb;
773 
774 		/* Ensure txq has space for the next skb; Else stop the queue
775 		 * *BEFORE* ringing the tx doorbell, so that we serialze the
776 		 * tx compls of the current transmit which'll wake up the queue
777 		 */
778 		atomic_add(wrb_cnt, &txq->used);
779 		if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
780 								txq->len) {
781 			netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
782 			stopped = true;
783 		}
784 
785 		be_txq_notify(adapter, txq->id, wrb_cnt);
786 
787 		be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
788 	} else {
789 		txq->head = start;
790 		dev_kfree_skb_any(skb);
791 	}
792 tx_drop:
793 	return NETDEV_TX_OK;
794 }
795 
796 static int be_change_mtu(struct net_device *netdev, int new_mtu)
797 {
798 	struct be_adapter *adapter = netdev_priv(netdev);
799 	if (new_mtu < BE_MIN_MTU ||
800 			new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
801 					(ETH_HLEN + ETH_FCS_LEN))) {
802 		dev_info(&adapter->pdev->dev,
803 			"MTU must be between %d and %d bytes\n",
804 			BE_MIN_MTU,
805 			(BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
806 		return -EINVAL;
807 	}
808 	dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
809 			netdev->mtu, new_mtu);
810 	netdev->mtu = new_mtu;
811 	return 0;
812 }
813 
814 /*
815  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
816  * If the user configures more, place BE in vlan promiscuous mode.
817  */
818 static int be_vid_config(struct be_adapter *adapter)
819 {
820 	u16 vids[BE_NUM_VLANS_SUPPORTED];
821 	u16 num = 0, i;
822 	int status = 0;
823 
824 	/* No need to further configure vids if in promiscuous mode */
825 	if (adapter->promiscuous)
826 		return 0;
827 
828 	if (adapter->vlans_added > adapter->max_vlans)
829 		goto set_vlan_promisc;
830 
831 	/* Construct VLAN Table to give to HW */
832 	for (i = 0; i < VLAN_N_VID; i++)
833 		if (adapter->vlan_tag[i])
834 			vids[num++] = cpu_to_le16(i);
835 
836 	status = be_cmd_vlan_config(adapter, adapter->if_handle,
837 				    vids, num, 1, 0);
838 
839 	/* Set to VLAN promisc mode as setting VLAN filter failed */
840 	if (status) {
841 		dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
842 		dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
843 		goto set_vlan_promisc;
844 	}
845 
846 	return status;
847 
848 set_vlan_promisc:
849 	status = be_cmd_vlan_config(adapter, adapter->if_handle,
850 				    NULL, 0, 1, 1);
851 	return status;
852 }
853 
854 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
855 {
856 	struct be_adapter *adapter = netdev_priv(netdev);
857 	int status = 0;
858 
859 	if (!be_physfn(adapter)) {
860 		status = -EINVAL;
861 		goto ret;
862 	}
863 
864 	adapter->vlan_tag[vid] = 1;
865 	if (adapter->vlans_added <= (adapter->max_vlans + 1))
866 		status = be_vid_config(adapter);
867 
868 	if (!status)
869 		adapter->vlans_added++;
870 	else
871 		adapter->vlan_tag[vid] = 0;
872 ret:
873 	return status;
874 }
875 
876 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
877 {
878 	struct be_adapter *adapter = netdev_priv(netdev);
879 	int status = 0;
880 
881 	if (!be_physfn(adapter)) {
882 		status = -EINVAL;
883 		goto ret;
884 	}
885 
886 	adapter->vlan_tag[vid] = 0;
887 	if (adapter->vlans_added <= adapter->max_vlans)
888 		status = be_vid_config(adapter);
889 
890 	if (!status)
891 		adapter->vlans_added--;
892 	else
893 		adapter->vlan_tag[vid] = 1;
894 ret:
895 	return status;
896 }
897 
898 static void be_set_rx_mode(struct net_device *netdev)
899 {
900 	struct be_adapter *adapter = netdev_priv(netdev);
901 	int status;
902 
903 	if (netdev->flags & IFF_PROMISC) {
904 		be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
905 		adapter->promiscuous = true;
906 		goto done;
907 	}
908 
909 	/* BE was previously in promiscuous mode; disable it */
910 	if (adapter->promiscuous) {
911 		adapter->promiscuous = false;
912 		be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
913 
914 		if (adapter->vlans_added)
915 			be_vid_config(adapter);
916 	}
917 
918 	/* Enable multicast promisc if num configured exceeds what we support */
919 	if (netdev->flags & IFF_ALLMULTI ||
920 			netdev_mc_count(netdev) > BE_MAX_MC) {
921 		be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
922 		goto done;
923 	}
924 
925 	if (netdev_uc_count(netdev) != adapter->uc_macs) {
926 		struct netdev_hw_addr *ha;
927 		int i = 1; /* First slot is claimed by the Primary MAC */
928 
929 		for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
930 			be_cmd_pmac_del(adapter, adapter->if_handle,
931 					adapter->pmac_id[i], 0);
932 		}
933 
934 		if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
935 			be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
936 			adapter->promiscuous = true;
937 			goto done;
938 		}
939 
940 		netdev_for_each_uc_addr(ha, adapter->netdev) {
941 			adapter->uc_macs++; /* First slot is for Primary MAC */
942 			be_cmd_pmac_add(adapter, (u8 *)ha->addr,
943 					adapter->if_handle,
944 					&adapter->pmac_id[adapter->uc_macs], 0);
945 		}
946 	}
947 
948 	status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
949 
950 	/* Set to MCAST promisc mode if setting MULTICAST address fails */
951 	if (status) {
952 		dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
953 		dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
954 		be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
955 	}
956 done:
957 	return;
958 }
959 
960 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
961 {
962 	struct be_adapter *adapter = netdev_priv(netdev);
963 	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
964 	int status;
965 
966 	if (!sriov_enabled(adapter))
967 		return -EPERM;
968 
969 	if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
970 		return -EINVAL;
971 
972 	if (lancer_chip(adapter)) {
973 		status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
974 	} else {
975 		status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
976 					 vf_cfg->pmac_id, vf + 1);
977 
978 		status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
979 					 &vf_cfg->pmac_id, vf + 1);
980 	}
981 
982 	if (status)
983 		dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
984 				mac, vf);
985 	else
986 		memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
987 
988 	return status;
989 }
990 
991 static int be_get_vf_config(struct net_device *netdev, int vf,
992 			struct ifla_vf_info *vi)
993 {
994 	struct be_adapter *adapter = netdev_priv(netdev);
995 	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
996 
997 	if (!sriov_enabled(adapter))
998 		return -EPERM;
999 
1000 	if (vf >= adapter->num_vfs)
1001 		return -EINVAL;
1002 
1003 	vi->vf = vf;
1004 	vi->tx_rate = vf_cfg->tx_rate;
1005 	vi->vlan = vf_cfg->vlan_tag;
1006 	vi->qos = 0;
1007 	memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1008 
1009 	return 0;
1010 }
1011 
1012 static int be_set_vf_vlan(struct net_device *netdev,
1013 			int vf, u16 vlan, u8 qos)
1014 {
1015 	struct be_adapter *adapter = netdev_priv(netdev);
1016 	int status = 0;
1017 
1018 	if (!sriov_enabled(adapter))
1019 		return -EPERM;
1020 
1021 	if (vf >= adapter->num_vfs || vlan > 4095)
1022 		return -EINVAL;
1023 
1024 	if (vlan) {
1025 		if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1026 			/* If this is new value, program it. Else skip. */
1027 			adapter->vf_cfg[vf].vlan_tag = vlan;
1028 
1029 			status = be_cmd_set_hsw_config(adapter, vlan,
1030 				vf + 1, adapter->vf_cfg[vf].if_handle);
1031 		}
1032 	} else {
1033 		/* Reset Transparent Vlan Tagging. */
1034 		adapter->vf_cfg[vf].vlan_tag = 0;
1035 		vlan = adapter->vf_cfg[vf].def_vid;
1036 		status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1037 			adapter->vf_cfg[vf].if_handle);
1038 	}
1039 
1040 
1041 	if (status)
1042 		dev_info(&adapter->pdev->dev,
1043 				"VLAN %d config on VF %d failed\n", vlan, vf);
1044 	return status;
1045 }
1046 
1047 static int be_set_vf_tx_rate(struct net_device *netdev,
1048 			int vf, int rate)
1049 {
1050 	struct be_adapter *adapter = netdev_priv(netdev);
1051 	int status = 0;
1052 
1053 	if (!sriov_enabled(adapter))
1054 		return -EPERM;
1055 
1056 	if (vf >= adapter->num_vfs)
1057 		return -EINVAL;
1058 
1059 	if (rate < 100 || rate > 10000) {
1060 		dev_err(&adapter->pdev->dev,
1061 			"tx rate must be between 100 and 10000 Mbps\n");
1062 		return -EINVAL;
1063 	}
1064 
1065 	status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1066 
1067 	if (status)
1068 		dev_err(&adapter->pdev->dev,
1069 				"tx rate %d on VF %d failed\n", rate, vf);
1070 	else
1071 		adapter->vf_cfg[vf].tx_rate = rate;
1072 	return status;
1073 }
1074 
1075 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1076 {
1077 	struct pci_dev *dev, *pdev = adapter->pdev;
1078 	int vfs = 0, assigned_vfs = 0, pos, vf_fn;
1079 	u16 offset, stride;
1080 
1081 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1082 	if (!pos)
1083 		return 0;
1084 	pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1085 	pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1086 
1087 	dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1088 	while (dev) {
1089 		vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1090 		if (dev->is_virtfn && dev->devfn == vf_fn &&
1091 			dev->bus->number == pdev->bus->number) {
1092 			vfs++;
1093 			if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1094 				assigned_vfs++;
1095 		}
1096 		dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1097 	}
1098 	return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1099 }
1100 
1101 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1102 {
1103 	struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1104 	ulong now = jiffies;
1105 	ulong delta = now - stats->rx_jiffies;
1106 	u64 pkts;
1107 	unsigned int start, eqd;
1108 
1109 	if (!eqo->enable_aic) {
1110 		eqd = eqo->eqd;
1111 		goto modify_eqd;
1112 	}
1113 
1114 	if (eqo->idx >= adapter->num_rx_qs)
1115 		return;
1116 
1117 	stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1118 
1119 	/* Wrapped around */
1120 	if (time_before(now, stats->rx_jiffies)) {
1121 		stats->rx_jiffies = now;
1122 		return;
1123 	}
1124 
1125 	/* Update once a second */
1126 	if (delta < HZ)
1127 		return;
1128 
1129 	do {
1130 		start = u64_stats_fetch_begin_bh(&stats->sync);
1131 		pkts = stats->rx_pkts;
1132 	} while (u64_stats_fetch_retry_bh(&stats->sync, start));
1133 
1134 	stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1135 	stats->rx_pkts_prev = pkts;
1136 	stats->rx_jiffies = now;
1137 	eqd = (stats->rx_pps / 110000) << 3;
1138 	eqd = min(eqd, eqo->max_eqd);
1139 	eqd = max(eqd, eqo->min_eqd);
1140 	if (eqd < 10)
1141 		eqd = 0;
1142 
1143 modify_eqd:
1144 	if (eqd != eqo->cur_eqd) {
1145 		be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1146 		eqo->cur_eqd = eqd;
1147 	}
1148 }
1149 
1150 static void be_rx_stats_update(struct be_rx_obj *rxo,
1151 		struct be_rx_compl_info *rxcp)
1152 {
1153 	struct be_rx_stats *stats = rx_stats(rxo);
1154 
1155 	u64_stats_update_begin(&stats->sync);
1156 	stats->rx_compl++;
1157 	stats->rx_bytes += rxcp->pkt_size;
1158 	stats->rx_pkts++;
1159 	if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1160 		stats->rx_mcast_pkts++;
1161 	if (rxcp->err)
1162 		stats->rx_compl_err++;
1163 	u64_stats_update_end(&stats->sync);
1164 }
1165 
1166 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1167 {
1168 	/* L4 checksum is not reliable for non TCP/UDP packets.
1169 	 * Also ignore ipcksm for ipv6 pkts */
1170 	return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1171 				(rxcp->ip_csum || rxcp->ipv6);
1172 }
1173 
1174 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1175 						u16 frag_idx)
1176 {
1177 	struct be_adapter *adapter = rxo->adapter;
1178 	struct be_rx_page_info *rx_page_info;
1179 	struct be_queue_info *rxq = &rxo->q;
1180 
1181 	rx_page_info = &rxo->page_info_tbl[frag_idx];
1182 	BUG_ON(!rx_page_info->page);
1183 
1184 	if (rx_page_info->last_page_user) {
1185 		dma_unmap_page(&adapter->pdev->dev,
1186 			       dma_unmap_addr(rx_page_info, bus),
1187 			       adapter->big_page_size, DMA_FROM_DEVICE);
1188 		rx_page_info->last_page_user = false;
1189 	}
1190 
1191 	atomic_dec(&rxq->used);
1192 	return rx_page_info;
1193 }
1194 
1195 /* Throwaway the data in the Rx completion */
1196 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1197 				struct be_rx_compl_info *rxcp)
1198 {
1199 	struct be_queue_info *rxq = &rxo->q;
1200 	struct be_rx_page_info *page_info;
1201 	u16 i, num_rcvd = rxcp->num_rcvd;
1202 
1203 	for (i = 0; i < num_rcvd; i++) {
1204 		page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1205 		put_page(page_info->page);
1206 		memset(page_info, 0, sizeof(*page_info));
1207 		index_inc(&rxcp->rxq_idx, rxq->len);
1208 	}
1209 }
1210 
1211 /*
1212  * skb_fill_rx_data forms a complete skb for an ether frame
1213  * indicated by rxcp.
1214  */
1215 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1216 			     struct be_rx_compl_info *rxcp)
1217 {
1218 	struct be_queue_info *rxq = &rxo->q;
1219 	struct be_rx_page_info *page_info;
1220 	u16 i, j;
1221 	u16 hdr_len, curr_frag_len, remaining;
1222 	u8 *start;
1223 
1224 	page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1225 	start = page_address(page_info->page) + page_info->page_offset;
1226 	prefetch(start);
1227 
1228 	/* Copy data in the first descriptor of this completion */
1229 	curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1230 
1231 	skb->len = curr_frag_len;
1232 	if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1233 		memcpy(skb->data, start, curr_frag_len);
1234 		/* Complete packet has now been moved to data */
1235 		put_page(page_info->page);
1236 		skb->data_len = 0;
1237 		skb->tail += curr_frag_len;
1238 	} else {
1239 		hdr_len = ETH_HLEN;
1240 		memcpy(skb->data, start, hdr_len);
1241 		skb_shinfo(skb)->nr_frags = 1;
1242 		skb_frag_set_page(skb, 0, page_info->page);
1243 		skb_shinfo(skb)->frags[0].page_offset =
1244 					page_info->page_offset + hdr_len;
1245 		skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1246 		skb->data_len = curr_frag_len - hdr_len;
1247 		skb->truesize += rx_frag_size;
1248 		skb->tail += hdr_len;
1249 	}
1250 	page_info->page = NULL;
1251 
1252 	if (rxcp->pkt_size <= rx_frag_size) {
1253 		BUG_ON(rxcp->num_rcvd != 1);
1254 		return;
1255 	}
1256 
1257 	/* More frags present for this completion */
1258 	index_inc(&rxcp->rxq_idx, rxq->len);
1259 	remaining = rxcp->pkt_size - curr_frag_len;
1260 	for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1261 		page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1262 		curr_frag_len = min(remaining, rx_frag_size);
1263 
1264 		/* Coalesce all frags from the same physical page in one slot */
1265 		if (page_info->page_offset == 0) {
1266 			/* Fresh page */
1267 			j++;
1268 			skb_frag_set_page(skb, j, page_info->page);
1269 			skb_shinfo(skb)->frags[j].page_offset =
1270 							page_info->page_offset;
1271 			skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1272 			skb_shinfo(skb)->nr_frags++;
1273 		} else {
1274 			put_page(page_info->page);
1275 		}
1276 
1277 		skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1278 		skb->len += curr_frag_len;
1279 		skb->data_len += curr_frag_len;
1280 		skb->truesize += rx_frag_size;
1281 		remaining -= curr_frag_len;
1282 		index_inc(&rxcp->rxq_idx, rxq->len);
1283 		page_info->page = NULL;
1284 	}
1285 	BUG_ON(j > MAX_SKB_FRAGS);
1286 }
1287 
1288 /* Process the RX completion indicated by rxcp when GRO is disabled */
1289 static void be_rx_compl_process(struct be_rx_obj *rxo,
1290 				struct be_rx_compl_info *rxcp)
1291 {
1292 	struct be_adapter *adapter = rxo->adapter;
1293 	struct net_device *netdev = adapter->netdev;
1294 	struct sk_buff *skb;
1295 
1296 	skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1297 	if (unlikely(!skb)) {
1298 		rx_stats(rxo)->rx_drops_no_skbs++;
1299 		be_rx_compl_discard(rxo, rxcp);
1300 		return;
1301 	}
1302 
1303 	skb_fill_rx_data(rxo, skb, rxcp);
1304 
1305 	if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1306 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1307 	else
1308 		skb_checksum_none_assert(skb);
1309 
1310 	skb->protocol = eth_type_trans(skb, netdev);
1311 	skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1312 	if (netdev->features & NETIF_F_RXHASH)
1313 		skb->rxhash = rxcp->rss_hash;
1314 
1315 
1316 	if (rxcp->vlanf)
1317 		__vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1318 
1319 	netif_receive_skb(skb);
1320 }
1321 
1322 /* Process the RX completion indicated by rxcp when GRO is enabled */
1323 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1324 			     struct be_rx_compl_info *rxcp)
1325 {
1326 	struct be_adapter *adapter = rxo->adapter;
1327 	struct be_rx_page_info *page_info;
1328 	struct sk_buff *skb = NULL;
1329 	struct be_queue_info *rxq = &rxo->q;
1330 	u16 remaining, curr_frag_len;
1331 	u16 i, j;
1332 
1333 	skb = napi_get_frags(napi);
1334 	if (!skb) {
1335 		be_rx_compl_discard(rxo, rxcp);
1336 		return;
1337 	}
1338 
1339 	remaining = rxcp->pkt_size;
1340 	for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1341 		page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1342 
1343 		curr_frag_len = min(remaining, rx_frag_size);
1344 
1345 		/* Coalesce all frags from the same physical page in one slot */
1346 		if (i == 0 || page_info->page_offset == 0) {
1347 			/* First frag or Fresh page */
1348 			j++;
1349 			skb_frag_set_page(skb, j, page_info->page);
1350 			skb_shinfo(skb)->frags[j].page_offset =
1351 							page_info->page_offset;
1352 			skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1353 		} else {
1354 			put_page(page_info->page);
1355 		}
1356 		skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1357 		skb->truesize += rx_frag_size;
1358 		remaining -= curr_frag_len;
1359 		index_inc(&rxcp->rxq_idx, rxq->len);
1360 		memset(page_info, 0, sizeof(*page_info));
1361 	}
1362 	BUG_ON(j > MAX_SKB_FRAGS);
1363 
1364 	skb_shinfo(skb)->nr_frags = j + 1;
1365 	skb->len = rxcp->pkt_size;
1366 	skb->data_len = rxcp->pkt_size;
1367 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1368 	skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1369 	if (adapter->netdev->features & NETIF_F_RXHASH)
1370 		skb->rxhash = rxcp->rss_hash;
1371 
1372 	if (rxcp->vlanf)
1373 		__vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1374 
1375 	napi_gro_frags(napi);
1376 }
1377 
1378 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1379 				 struct be_rx_compl_info *rxcp)
1380 {
1381 	rxcp->pkt_size =
1382 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1383 	rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1384 	rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1385 	rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1386 	rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1387 	rxcp->ip_csum =
1388 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1389 	rxcp->l4_csum =
1390 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1391 	rxcp->ipv6 =
1392 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1393 	rxcp->rxq_idx =
1394 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1395 	rxcp->num_rcvd =
1396 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1397 	rxcp->pkt_type =
1398 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1399 	rxcp->rss_hash =
1400 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1401 	if (rxcp->vlanf) {
1402 		rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1403 					  compl);
1404 		rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1405 					       compl);
1406 	}
1407 	rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1408 }
1409 
1410 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1411 				 struct be_rx_compl_info *rxcp)
1412 {
1413 	rxcp->pkt_size =
1414 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1415 	rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1416 	rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1417 	rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1418 	rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1419 	rxcp->ip_csum =
1420 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1421 	rxcp->l4_csum =
1422 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1423 	rxcp->ipv6 =
1424 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1425 	rxcp->rxq_idx =
1426 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1427 	rxcp->num_rcvd =
1428 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1429 	rxcp->pkt_type =
1430 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1431 	rxcp->rss_hash =
1432 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1433 	if (rxcp->vlanf) {
1434 		rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1435 					  compl);
1436 		rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1437 					       compl);
1438 	}
1439 	rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1440 }
1441 
1442 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1443 {
1444 	struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1445 	struct be_rx_compl_info *rxcp = &rxo->rxcp;
1446 	struct be_adapter *adapter = rxo->adapter;
1447 
1448 	/* For checking the valid bit it is Ok to use either definition as the
1449 	 * valid bit is at the same position in both v0 and v1 Rx compl */
1450 	if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1451 		return NULL;
1452 
1453 	rmb();
1454 	be_dws_le_to_cpu(compl, sizeof(*compl));
1455 
1456 	if (adapter->be3_native)
1457 		be_parse_rx_compl_v1(compl, rxcp);
1458 	else
1459 		be_parse_rx_compl_v0(compl, rxcp);
1460 
1461 	if (rxcp->vlanf) {
1462 		/* vlanf could be wrongly set in some cards.
1463 		 * ignore if vtm is not set */
1464 		if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1465 			rxcp->vlanf = 0;
1466 
1467 		if (!lancer_chip(adapter))
1468 			rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1469 
1470 		if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1471 		    !adapter->vlan_tag[rxcp->vlan_tag])
1472 			rxcp->vlanf = 0;
1473 	}
1474 
1475 	/* As the compl has been parsed, reset it; we wont touch it again */
1476 	compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1477 
1478 	queue_tail_inc(&rxo->cq);
1479 	return rxcp;
1480 }
1481 
1482 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1483 {
1484 	u32 order = get_order(size);
1485 
1486 	if (order > 0)
1487 		gfp |= __GFP_COMP;
1488 	return  alloc_pages(gfp, order);
1489 }
1490 
1491 /*
1492  * Allocate a page, split it to fragments of size rx_frag_size and post as
1493  * receive buffers to BE
1494  */
1495 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1496 {
1497 	struct be_adapter *adapter = rxo->adapter;
1498 	struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1499 	struct be_queue_info *rxq = &rxo->q;
1500 	struct page *pagep = NULL;
1501 	struct be_eth_rx_d *rxd;
1502 	u64 page_dmaaddr = 0, frag_dmaaddr;
1503 	u32 posted, page_offset = 0;
1504 
1505 	page_info = &rxo->page_info_tbl[rxq->head];
1506 	for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1507 		if (!pagep) {
1508 			pagep = be_alloc_pages(adapter->big_page_size, gfp);
1509 			if (unlikely(!pagep)) {
1510 				rx_stats(rxo)->rx_post_fail++;
1511 				break;
1512 			}
1513 			page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1514 						    0, adapter->big_page_size,
1515 						    DMA_FROM_DEVICE);
1516 			page_info->page_offset = 0;
1517 		} else {
1518 			get_page(pagep);
1519 			page_info->page_offset = page_offset + rx_frag_size;
1520 		}
1521 		page_offset = page_info->page_offset;
1522 		page_info->page = pagep;
1523 		dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1524 		frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1525 
1526 		rxd = queue_head_node(rxq);
1527 		rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1528 		rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1529 
1530 		/* Any space left in the current big page for another frag? */
1531 		if ((page_offset + rx_frag_size + rx_frag_size) >
1532 					adapter->big_page_size) {
1533 			pagep = NULL;
1534 			page_info->last_page_user = true;
1535 		}
1536 
1537 		prev_page_info = page_info;
1538 		queue_head_inc(rxq);
1539 		page_info = &rxo->page_info_tbl[rxq->head];
1540 	}
1541 	if (pagep)
1542 		prev_page_info->last_page_user = true;
1543 
1544 	if (posted) {
1545 		atomic_add(posted, &rxq->used);
1546 		be_rxq_notify(adapter, rxq->id, posted);
1547 	} else if (atomic_read(&rxq->used) == 0) {
1548 		/* Let be_worker replenish when memory is available */
1549 		rxo->rx_post_starved = true;
1550 	}
1551 }
1552 
1553 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1554 {
1555 	struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1556 
1557 	if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1558 		return NULL;
1559 
1560 	rmb();
1561 	be_dws_le_to_cpu(txcp, sizeof(*txcp));
1562 
1563 	txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1564 
1565 	queue_tail_inc(tx_cq);
1566 	return txcp;
1567 }
1568 
1569 static u16 be_tx_compl_process(struct be_adapter *adapter,
1570 		struct be_tx_obj *txo, u16 last_index)
1571 {
1572 	struct be_queue_info *txq = &txo->q;
1573 	struct be_eth_wrb *wrb;
1574 	struct sk_buff **sent_skbs = txo->sent_skb_list;
1575 	struct sk_buff *sent_skb;
1576 	u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1577 	bool unmap_skb_hdr = true;
1578 
1579 	sent_skb = sent_skbs[txq->tail];
1580 	BUG_ON(!sent_skb);
1581 	sent_skbs[txq->tail] = NULL;
1582 
1583 	/* skip header wrb */
1584 	queue_tail_inc(txq);
1585 
1586 	do {
1587 		cur_index = txq->tail;
1588 		wrb = queue_tail_node(txq);
1589 		unmap_tx_frag(&adapter->pdev->dev, wrb,
1590 			      (unmap_skb_hdr && skb_headlen(sent_skb)));
1591 		unmap_skb_hdr = false;
1592 
1593 		num_wrbs++;
1594 		queue_tail_inc(txq);
1595 	} while (cur_index != last_index);
1596 
1597 	kfree_skb(sent_skb);
1598 	return num_wrbs;
1599 }
1600 
1601 /* Return the number of events in the event queue */
1602 static inline int events_get(struct be_eq_obj *eqo)
1603 {
1604 	struct be_eq_entry *eqe;
1605 	int num = 0;
1606 
1607 	do {
1608 		eqe = queue_tail_node(&eqo->q);
1609 		if (eqe->evt == 0)
1610 			break;
1611 
1612 		rmb();
1613 		eqe->evt = 0;
1614 		num++;
1615 		queue_tail_inc(&eqo->q);
1616 	} while (true);
1617 
1618 	return num;
1619 }
1620 
1621 static int event_handle(struct be_eq_obj *eqo)
1622 {
1623 	bool rearm = false;
1624 	int num = events_get(eqo);
1625 
1626 	/* Deal with any spurious interrupts that come without events */
1627 	if (!num)
1628 		rearm = true;
1629 
1630 	if (num || msix_enabled(eqo->adapter))
1631 		be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1632 
1633 	if (num)
1634 		napi_schedule(&eqo->napi);
1635 
1636 	return num;
1637 }
1638 
1639 /* Leaves the EQ is disarmed state */
1640 static void be_eq_clean(struct be_eq_obj *eqo)
1641 {
1642 	int num = events_get(eqo);
1643 
1644 	be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1645 }
1646 
1647 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1648 {
1649 	struct be_rx_page_info *page_info;
1650 	struct be_queue_info *rxq = &rxo->q;
1651 	struct be_queue_info *rx_cq = &rxo->cq;
1652 	struct be_rx_compl_info *rxcp;
1653 	u16 tail;
1654 
1655 	/* First cleanup pending rx completions */
1656 	while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1657 		be_rx_compl_discard(rxo, rxcp);
1658 		be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
1659 	}
1660 
1661 	/* Then free posted rx buffer that were not used */
1662 	tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1663 	for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1664 		page_info = get_rx_page_info(rxo, tail);
1665 		put_page(page_info->page);
1666 		memset(page_info, 0, sizeof(*page_info));
1667 	}
1668 	BUG_ON(atomic_read(&rxq->used));
1669 	rxq->tail = rxq->head = 0;
1670 }
1671 
1672 static void be_tx_compl_clean(struct be_adapter *adapter)
1673 {
1674 	struct be_tx_obj *txo;
1675 	struct be_queue_info *txq;
1676 	struct be_eth_tx_compl *txcp;
1677 	u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1678 	struct sk_buff *sent_skb;
1679 	bool dummy_wrb;
1680 	int i, pending_txqs;
1681 
1682 	/* Wait for a max of 200ms for all the tx-completions to arrive. */
1683 	do {
1684 		pending_txqs = adapter->num_tx_qs;
1685 
1686 		for_all_tx_queues(adapter, txo, i) {
1687 			txq = &txo->q;
1688 			while ((txcp = be_tx_compl_get(&txo->cq))) {
1689 				end_idx =
1690 					AMAP_GET_BITS(struct amap_eth_tx_compl,
1691 						      wrb_index, txcp);
1692 				num_wrbs += be_tx_compl_process(adapter, txo,
1693 								end_idx);
1694 				cmpl++;
1695 			}
1696 			if (cmpl) {
1697 				be_cq_notify(adapter, txo->cq.id, false, cmpl);
1698 				atomic_sub(num_wrbs, &txq->used);
1699 				cmpl = 0;
1700 				num_wrbs = 0;
1701 			}
1702 			if (atomic_read(&txq->used) == 0)
1703 				pending_txqs--;
1704 		}
1705 
1706 		if (pending_txqs == 0 || ++timeo > 200)
1707 			break;
1708 
1709 		mdelay(1);
1710 	} while (true);
1711 
1712 	for_all_tx_queues(adapter, txo, i) {
1713 		txq = &txo->q;
1714 		if (atomic_read(&txq->used))
1715 			dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1716 				atomic_read(&txq->used));
1717 
1718 		/* free posted tx for which compls will never arrive */
1719 		while (atomic_read(&txq->used)) {
1720 			sent_skb = txo->sent_skb_list[txq->tail];
1721 			end_idx = txq->tail;
1722 			num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1723 						   &dummy_wrb);
1724 			index_adv(&end_idx, num_wrbs - 1, txq->len);
1725 			num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1726 			atomic_sub(num_wrbs, &txq->used);
1727 		}
1728 	}
1729 }
1730 
1731 static void be_evt_queues_destroy(struct be_adapter *adapter)
1732 {
1733 	struct be_eq_obj *eqo;
1734 	int i;
1735 
1736 	for_all_evt_queues(adapter, eqo, i) {
1737 		if (eqo->q.created) {
1738 			be_eq_clean(eqo);
1739 			be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1740 		}
1741 		be_queue_free(adapter, &eqo->q);
1742 	}
1743 }
1744 
1745 static int be_evt_queues_create(struct be_adapter *adapter)
1746 {
1747 	struct be_queue_info *eq;
1748 	struct be_eq_obj *eqo;
1749 	int i, rc;
1750 
1751 	adapter->num_evt_qs = num_irqs(adapter);
1752 
1753 	for_all_evt_queues(adapter, eqo, i) {
1754 		eqo->adapter = adapter;
1755 		eqo->tx_budget = BE_TX_BUDGET;
1756 		eqo->idx = i;
1757 		eqo->max_eqd = BE_MAX_EQD;
1758 		eqo->enable_aic = true;
1759 
1760 		eq = &eqo->q;
1761 		rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1762 					sizeof(struct be_eq_entry));
1763 		if (rc)
1764 			return rc;
1765 
1766 		rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1767 		if (rc)
1768 			return rc;
1769 	}
1770 	return 0;
1771 }
1772 
1773 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1774 {
1775 	struct be_queue_info *q;
1776 
1777 	q = &adapter->mcc_obj.q;
1778 	if (q->created)
1779 		be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1780 	be_queue_free(adapter, q);
1781 
1782 	q = &adapter->mcc_obj.cq;
1783 	if (q->created)
1784 		be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1785 	be_queue_free(adapter, q);
1786 }
1787 
1788 /* Must be called only after TX qs are created as MCC shares TX EQ */
1789 static int be_mcc_queues_create(struct be_adapter *adapter)
1790 {
1791 	struct be_queue_info *q, *cq;
1792 
1793 	cq = &adapter->mcc_obj.cq;
1794 	if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1795 			sizeof(struct be_mcc_compl)))
1796 		goto err;
1797 
1798 	/* Use the default EQ for MCC completions */
1799 	if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1800 		goto mcc_cq_free;
1801 
1802 	q = &adapter->mcc_obj.q;
1803 	if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1804 		goto mcc_cq_destroy;
1805 
1806 	if (be_cmd_mccq_create(adapter, q, cq))
1807 		goto mcc_q_free;
1808 
1809 	return 0;
1810 
1811 mcc_q_free:
1812 	be_queue_free(adapter, q);
1813 mcc_cq_destroy:
1814 	be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1815 mcc_cq_free:
1816 	be_queue_free(adapter, cq);
1817 err:
1818 	return -1;
1819 }
1820 
1821 static void be_tx_queues_destroy(struct be_adapter *adapter)
1822 {
1823 	struct be_queue_info *q;
1824 	struct be_tx_obj *txo;
1825 	u8 i;
1826 
1827 	for_all_tx_queues(adapter, txo, i) {
1828 		q = &txo->q;
1829 		if (q->created)
1830 			be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1831 		be_queue_free(adapter, q);
1832 
1833 		q = &txo->cq;
1834 		if (q->created)
1835 			be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1836 		be_queue_free(adapter, q);
1837 	}
1838 }
1839 
1840 static int be_num_txqs_want(struct be_adapter *adapter)
1841 {
1842 	if (sriov_want(adapter) || be_is_mc(adapter) ||
1843 	    lancer_chip(adapter) || !be_physfn(adapter) ||
1844 	    adapter->generation == BE_GEN2)
1845 		return 1;
1846 	else
1847 		return MAX_TX_QS;
1848 }
1849 
1850 static int be_tx_cqs_create(struct be_adapter *adapter)
1851 {
1852 	struct be_queue_info *cq, *eq;
1853 	int status;
1854 	struct be_tx_obj *txo;
1855 	u8 i;
1856 
1857 	adapter->num_tx_qs = be_num_txqs_want(adapter);
1858 	if (adapter->num_tx_qs != MAX_TX_QS) {
1859 		rtnl_lock();
1860 		netif_set_real_num_tx_queues(adapter->netdev,
1861 			adapter->num_tx_qs);
1862 		rtnl_unlock();
1863 	}
1864 
1865 	for_all_tx_queues(adapter, txo, i) {
1866 		cq = &txo->cq;
1867 		status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1868 					sizeof(struct be_eth_tx_compl));
1869 		if (status)
1870 			return status;
1871 
1872 		/* If num_evt_qs is less than num_tx_qs, then more than
1873 		 * one txq share an eq
1874 		 */
1875 		eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1876 		status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1877 		if (status)
1878 			return status;
1879 	}
1880 	return 0;
1881 }
1882 
1883 static int be_tx_qs_create(struct be_adapter *adapter)
1884 {
1885 	struct be_tx_obj *txo;
1886 	int i, status;
1887 
1888 	for_all_tx_queues(adapter, txo, i) {
1889 		status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1890 					sizeof(struct be_eth_wrb));
1891 		if (status)
1892 			return status;
1893 
1894 		status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1895 		if (status)
1896 			return status;
1897 	}
1898 
1899 	return 0;
1900 }
1901 
1902 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1903 {
1904 	struct be_queue_info *q;
1905 	struct be_rx_obj *rxo;
1906 	int i;
1907 
1908 	for_all_rx_queues(adapter, rxo, i) {
1909 		q = &rxo->cq;
1910 		if (q->created)
1911 			be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1912 		be_queue_free(adapter, q);
1913 	}
1914 }
1915 
1916 static int be_rx_cqs_create(struct be_adapter *adapter)
1917 {
1918 	struct be_queue_info *eq, *cq;
1919 	struct be_rx_obj *rxo;
1920 	int rc, i;
1921 
1922 	/* We'll create as many RSS rings as there are irqs.
1923 	 * But when there's only one irq there's no use creating RSS rings
1924 	 */
1925 	adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1926 				num_irqs(adapter) + 1 : 1;
1927 	if (adapter->num_rx_qs != MAX_RX_QS) {
1928 		rtnl_lock();
1929 		netif_set_real_num_rx_queues(adapter->netdev,
1930 					     adapter->num_rx_qs);
1931 		rtnl_unlock();
1932 	}
1933 
1934 	adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1935 	for_all_rx_queues(adapter, rxo, i) {
1936 		rxo->adapter = adapter;
1937 		cq = &rxo->cq;
1938 		rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1939 				sizeof(struct be_eth_rx_compl));
1940 		if (rc)
1941 			return rc;
1942 
1943 		eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1944 		rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
1945 		if (rc)
1946 			return rc;
1947 	}
1948 
1949 	if (adapter->num_rx_qs != MAX_RX_QS)
1950 		dev_info(&adapter->pdev->dev,
1951 			"Created only %d receive queues", adapter->num_rx_qs);
1952 
1953 	return 0;
1954 }
1955 
1956 static irqreturn_t be_intx(int irq, void *dev)
1957 {
1958 	struct be_adapter *adapter = dev;
1959 	int num_evts;
1960 
1961 	/* With INTx only one EQ is used */
1962 	num_evts = event_handle(&adapter->eq_obj[0]);
1963 	if (num_evts)
1964 		return IRQ_HANDLED;
1965 	else
1966 		return IRQ_NONE;
1967 }
1968 
1969 static irqreturn_t be_msix(int irq, void *dev)
1970 {
1971 	struct be_eq_obj *eqo = dev;
1972 
1973 	event_handle(eqo);
1974 	return IRQ_HANDLED;
1975 }
1976 
1977 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1978 {
1979 	return (rxcp->tcpf && !rxcp->err) ? true : false;
1980 }
1981 
1982 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1983 			int budget)
1984 {
1985 	struct be_adapter *adapter = rxo->adapter;
1986 	struct be_queue_info *rx_cq = &rxo->cq;
1987 	struct be_rx_compl_info *rxcp;
1988 	u32 work_done;
1989 
1990 	for (work_done = 0; work_done < budget; work_done++) {
1991 		rxcp = be_rx_compl_get(rxo);
1992 		if (!rxcp)
1993 			break;
1994 
1995 		/* Is it a flush compl that has no data */
1996 		if (unlikely(rxcp->num_rcvd == 0))
1997 			goto loop_continue;
1998 
1999 		/* Discard compl with partial DMA Lancer B0 */
2000 		if (unlikely(!rxcp->pkt_size)) {
2001 			be_rx_compl_discard(rxo, rxcp);
2002 			goto loop_continue;
2003 		}
2004 
2005 		/* On BE drop pkts that arrive due to imperfect filtering in
2006 		 * promiscuous mode on some skews
2007 		 */
2008 		if (unlikely(rxcp->port != adapter->port_num &&
2009 				!lancer_chip(adapter))) {
2010 			be_rx_compl_discard(rxo, rxcp);
2011 			goto loop_continue;
2012 		}
2013 
2014 		if (do_gro(rxcp))
2015 			be_rx_compl_process_gro(rxo, napi, rxcp);
2016 		else
2017 			be_rx_compl_process(rxo, rxcp);
2018 loop_continue:
2019 		be_rx_stats_update(rxo, rxcp);
2020 	}
2021 
2022 	if (work_done) {
2023 		be_cq_notify(adapter, rx_cq->id, true, work_done);
2024 
2025 		if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2026 			be_post_rx_frags(rxo, GFP_ATOMIC);
2027 	}
2028 
2029 	return work_done;
2030 }
2031 
2032 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2033 			  int budget, int idx)
2034 {
2035 	struct be_eth_tx_compl *txcp;
2036 	int num_wrbs = 0, work_done;
2037 
2038 	for (work_done = 0; work_done < budget; work_done++) {
2039 		txcp = be_tx_compl_get(&txo->cq);
2040 		if (!txcp)
2041 			break;
2042 		num_wrbs += be_tx_compl_process(adapter, txo,
2043 				AMAP_GET_BITS(struct amap_eth_tx_compl,
2044 					wrb_index, txcp));
2045 	}
2046 
2047 	if (work_done) {
2048 		be_cq_notify(adapter, txo->cq.id, true, work_done);
2049 		atomic_sub(num_wrbs, &txo->q.used);
2050 
2051 		/* As Tx wrbs have been freed up, wake up netdev queue
2052 		 * if it was stopped due to lack of tx wrbs.  */
2053 		if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2054 			atomic_read(&txo->q.used) < txo->q.len / 2) {
2055 			netif_wake_subqueue(adapter->netdev, idx);
2056 		}
2057 
2058 		u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2059 		tx_stats(txo)->tx_compl += work_done;
2060 		u64_stats_update_end(&tx_stats(txo)->sync_compl);
2061 	}
2062 	return (work_done < budget); /* Done */
2063 }
2064 
2065 int be_poll(struct napi_struct *napi, int budget)
2066 {
2067 	struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2068 	struct be_adapter *adapter = eqo->adapter;
2069 	int max_work = 0, work, i;
2070 	bool tx_done;
2071 
2072 	/* Process all TXQs serviced by this EQ */
2073 	for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2074 		tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2075 					eqo->tx_budget, i);
2076 		if (!tx_done)
2077 			max_work = budget;
2078 	}
2079 
2080 	/* This loop will iterate twice for EQ0 in which
2081 	 * completions of the last RXQ (default one) are also processed
2082 	 * For other EQs the loop iterates only once
2083 	 */
2084 	for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2085 		work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2086 		max_work = max(work, max_work);
2087 	}
2088 
2089 	if (is_mcc_eqo(eqo))
2090 		be_process_mcc(adapter);
2091 
2092 	if (max_work < budget) {
2093 		napi_complete(napi);
2094 		be_eq_notify(adapter, eqo->q.id, true, false, 0);
2095 	} else {
2096 		/* As we'll continue in polling mode, count and clear events */
2097 		be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
2098 	}
2099 	return max_work;
2100 }
2101 
2102 void be_detect_error(struct be_adapter *adapter)
2103 {
2104 	u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2105 	u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2106 	u32 i;
2107 
2108 	if (be_crit_error(adapter))
2109 		return;
2110 
2111 	if (lancer_chip(adapter)) {
2112 		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2113 		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2114 			sliport_err1 = ioread32(adapter->db +
2115 					SLIPORT_ERROR1_OFFSET);
2116 			sliport_err2 = ioread32(adapter->db +
2117 					SLIPORT_ERROR2_OFFSET);
2118 		}
2119 	} else {
2120 		pci_read_config_dword(adapter->pdev,
2121 				PCICFG_UE_STATUS_LOW, &ue_lo);
2122 		pci_read_config_dword(adapter->pdev,
2123 				PCICFG_UE_STATUS_HIGH, &ue_hi);
2124 		pci_read_config_dword(adapter->pdev,
2125 				PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2126 		pci_read_config_dword(adapter->pdev,
2127 				PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2128 
2129 		ue_lo = (ue_lo & ~ue_lo_mask);
2130 		ue_hi = (ue_hi & ~ue_hi_mask);
2131 	}
2132 
2133 	if (ue_lo || ue_hi ||
2134 		sliport_status & SLIPORT_STATUS_ERR_MASK) {
2135 		adapter->hw_error = true;
2136 		dev_err(&adapter->pdev->dev,
2137 			"Error detected in the card\n");
2138 	}
2139 
2140 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2141 		dev_err(&adapter->pdev->dev,
2142 			"ERR: sliport status 0x%x\n", sliport_status);
2143 		dev_err(&adapter->pdev->dev,
2144 			"ERR: sliport error1 0x%x\n", sliport_err1);
2145 		dev_err(&adapter->pdev->dev,
2146 			"ERR: sliport error2 0x%x\n", sliport_err2);
2147 	}
2148 
2149 	if (ue_lo) {
2150 		for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2151 			if (ue_lo & 1)
2152 				dev_err(&adapter->pdev->dev,
2153 				"UE: %s bit set\n", ue_status_low_desc[i]);
2154 		}
2155 	}
2156 
2157 	if (ue_hi) {
2158 		for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2159 			if (ue_hi & 1)
2160 				dev_err(&adapter->pdev->dev,
2161 				"UE: %s bit set\n", ue_status_hi_desc[i]);
2162 		}
2163 	}
2164 
2165 }
2166 
2167 static void be_msix_disable(struct be_adapter *adapter)
2168 {
2169 	if (msix_enabled(adapter)) {
2170 		pci_disable_msix(adapter->pdev);
2171 		adapter->num_msix_vec = 0;
2172 	}
2173 }
2174 
2175 static uint be_num_rss_want(struct be_adapter *adapter)
2176 {
2177 	u32 num = 0;
2178 	if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2179 	     !sriov_want(adapter) && be_physfn(adapter) &&
2180 	     !be_is_mc(adapter)) {
2181 		num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2182 		num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2183 	}
2184 	return num;
2185 }
2186 
2187 static void be_msix_enable(struct be_adapter *adapter)
2188 {
2189 #define BE_MIN_MSIX_VECTORS		1
2190 	int i, status, num_vec, num_roce_vec = 0;
2191 
2192 	/* If RSS queues are not used, need a vec for default RX Q */
2193 	num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2194 	if (be_roce_supported(adapter)) {
2195 		num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2196 					(num_online_cpus() + 1));
2197 		num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2198 		num_vec += num_roce_vec;
2199 		num_vec = min(num_vec, MAX_MSIX_VECTORS);
2200 	}
2201 	num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2202 
2203 	for (i = 0; i < num_vec; i++)
2204 		adapter->msix_entries[i].entry = i;
2205 
2206 	status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2207 	if (status == 0) {
2208 		goto done;
2209 	} else if (status >= BE_MIN_MSIX_VECTORS) {
2210 		num_vec = status;
2211 		if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2212 				num_vec) == 0)
2213 			goto done;
2214 	}
2215 	return;
2216 done:
2217 	if (be_roce_supported(adapter)) {
2218 		if (num_vec > num_roce_vec) {
2219 			adapter->num_msix_vec = num_vec - num_roce_vec;
2220 			adapter->num_msix_roce_vec =
2221 				num_vec - adapter->num_msix_vec;
2222 		} else {
2223 			adapter->num_msix_vec = num_vec;
2224 			adapter->num_msix_roce_vec = 0;
2225 		}
2226 	} else
2227 		adapter->num_msix_vec = num_vec;
2228 	return;
2229 }
2230 
2231 static inline int be_msix_vec_get(struct be_adapter *adapter,
2232 				struct be_eq_obj *eqo)
2233 {
2234 	return adapter->msix_entries[eqo->idx].vector;
2235 }
2236 
2237 static int be_msix_register(struct be_adapter *adapter)
2238 {
2239 	struct net_device *netdev = adapter->netdev;
2240 	struct be_eq_obj *eqo;
2241 	int status, i, vec;
2242 
2243 	for_all_evt_queues(adapter, eqo, i) {
2244 		sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2245 		vec = be_msix_vec_get(adapter, eqo);
2246 		status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2247 		if (status)
2248 			goto err_msix;
2249 	}
2250 
2251 	return 0;
2252 err_msix:
2253 	for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2254 		free_irq(be_msix_vec_get(adapter, eqo), eqo);
2255 	dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2256 		status);
2257 	be_msix_disable(adapter);
2258 	return status;
2259 }
2260 
2261 static int be_irq_register(struct be_adapter *adapter)
2262 {
2263 	struct net_device *netdev = adapter->netdev;
2264 	int status;
2265 
2266 	if (msix_enabled(adapter)) {
2267 		status = be_msix_register(adapter);
2268 		if (status == 0)
2269 			goto done;
2270 		/* INTx is not supported for VF */
2271 		if (!be_physfn(adapter))
2272 			return status;
2273 	}
2274 
2275 	/* INTx */
2276 	netdev->irq = adapter->pdev->irq;
2277 	status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2278 			adapter);
2279 	if (status) {
2280 		dev_err(&adapter->pdev->dev,
2281 			"INTx request IRQ failed - err %d\n", status);
2282 		return status;
2283 	}
2284 done:
2285 	adapter->isr_registered = true;
2286 	return 0;
2287 }
2288 
2289 static void be_irq_unregister(struct be_adapter *adapter)
2290 {
2291 	struct net_device *netdev = adapter->netdev;
2292 	struct be_eq_obj *eqo;
2293 	int i;
2294 
2295 	if (!adapter->isr_registered)
2296 		return;
2297 
2298 	/* INTx */
2299 	if (!msix_enabled(adapter)) {
2300 		free_irq(netdev->irq, adapter);
2301 		goto done;
2302 	}
2303 
2304 	/* MSIx */
2305 	for_all_evt_queues(adapter, eqo, i)
2306 		free_irq(be_msix_vec_get(adapter, eqo), eqo);
2307 
2308 done:
2309 	adapter->isr_registered = false;
2310 }
2311 
2312 static void be_rx_qs_destroy(struct be_adapter *adapter)
2313 {
2314 	struct be_queue_info *q;
2315 	struct be_rx_obj *rxo;
2316 	int i;
2317 
2318 	for_all_rx_queues(adapter, rxo, i) {
2319 		q = &rxo->q;
2320 		if (q->created) {
2321 			be_cmd_rxq_destroy(adapter, q);
2322 			/* After the rxq is invalidated, wait for a grace time
2323 			 * of 1ms for all dma to end and the flush compl to
2324 			 * arrive
2325 			 */
2326 			mdelay(1);
2327 			be_rx_cq_clean(rxo);
2328 		}
2329 		be_queue_free(adapter, q);
2330 	}
2331 }
2332 
2333 static int be_close(struct net_device *netdev)
2334 {
2335 	struct be_adapter *adapter = netdev_priv(netdev);
2336 	struct be_eq_obj *eqo;
2337 	int i;
2338 
2339 	be_roce_dev_close(adapter);
2340 
2341 	be_async_mcc_disable(adapter);
2342 
2343 	if (!lancer_chip(adapter))
2344 		be_intr_set(adapter, false);
2345 
2346 	for_all_evt_queues(adapter, eqo, i) {
2347 		napi_disable(&eqo->napi);
2348 		if (msix_enabled(adapter))
2349 			synchronize_irq(be_msix_vec_get(adapter, eqo));
2350 		else
2351 			synchronize_irq(netdev->irq);
2352 		be_eq_clean(eqo);
2353 	}
2354 
2355 	be_irq_unregister(adapter);
2356 
2357 	/* Wait for all pending tx completions to arrive so that
2358 	 * all tx skbs are freed.
2359 	 */
2360 	be_tx_compl_clean(adapter);
2361 
2362 	be_rx_qs_destroy(adapter);
2363 	return 0;
2364 }
2365 
2366 static int be_rx_qs_create(struct be_adapter *adapter)
2367 {
2368 	struct be_rx_obj *rxo;
2369 	int rc, i, j;
2370 	u8 rsstable[128];
2371 
2372 	for_all_rx_queues(adapter, rxo, i) {
2373 		rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2374 				    sizeof(struct be_eth_rx_d));
2375 		if (rc)
2376 			return rc;
2377 	}
2378 
2379 	/* The FW would like the default RXQ to be created first */
2380 	rxo = default_rxo(adapter);
2381 	rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2382 			       adapter->if_handle, false, &rxo->rss_id);
2383 	if (rc)
2384 		return rc;
2385 
2386 	for_all_rss_queues(adapter, rxo, i) {
2387 		rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2388 				       rx_frag_size, adapter->if_handle,
2389 				       true, &rxo->rss_id);
2390 		if (rc)
2391 			return rc;
2392 	}
2393 
2394 	if (be_multi_rxq(adapter)) {
2395 		for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2396 			for_all_rss_queues(adapter, rxo, i) {
2397 				if ((j + i) >= 128)
2398 					break;
2399 				rsstable[j + i] = rxo->rss_id;
2400 			}
2401 		}
2402 		rc = be_cmd_rss_config(adapter, rsstable, 128);
2403 		if (rc)
2404 			return rc;
2405 	}
2406 
2407 	/* First time posting */
2408 	for_all_rx_queues(adapter, rxo, i)
2409 		be_post_rx_frags(rxo, GFP_KERNEL);
2410 	return 0;
2411 }
2412 
2413 static int be_open(struct net_device *netdev)
2414 {
2415 	struct be_adapter *adapter = netdev_priv(netdev);
2416 	struct be_eq_obj *eqo;
2417 	struct be_rx_obj *rxo;
2418 	struct be_tx_obj *txo;
2419 	u8 link_status;
2420 	int status, i;
2421 
2422 	status = be_rx_qs_create(adapter);
2423 	if (status)
2424 		goto err;
2425 
2426 	be_irq_register(adapter);
2427 
2428 	if (!lancer_chip(adapter))
2429 		be_intr_set(adapter, true);
2430 
2431 	for_all_rx_queues(adapter, rxo, i)
2432 		be_cq_notify(adapter, rxo->cq.id, true, 0);
2433 
2434 	for_all_tx_queues(adapter, txo, i)
2435 		be_cq_notify(adapter, txo->cq.id, true, 0);
2436 
2437 	be_async_mcc_enable(adapter);
2438 
2439 	for_all_evt_queues(adapter, eqo, i) {
2440 		napi_enable(&eqo->napi);
2441 		be_eq_notify(adapter, eqo->q.id, true, false, 0);
2442 	}
2443 
2444 	status = be_cmd_link_status_query(adapter, NULL, NULL,
2445 					  &link_status, 0);
2446 	if (!status)
2447 		be_link_status_update(adapter, link_status);
2448 
2449 	be_roce_dev_open(adapter);
2450 	return 0;
2451 err:
2452 	be_close(adapter->netdev);
2453 	return -EIO;
2454 }
2455 
2456 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2457 {
2458 	struct be_dma_mem cmd;
2459 	int status = 0;
2460 	u8 mac[ETH_ALEN];
2461 
2462 	memset(mac, 0, ETH_ALEN);
2463 
2464 	cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2465 	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2466 				    GFP_KERNEL);
2467 	if (cmd.va == NULL)
2468 		return -1;
2469 	memset(cmd.va, 0, cmd.size);
2470 
2471 	if (enable) {
2472 		status = pci_write_config_dword(adapter->pdev,
2473 			PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2474 		if (status) {
2475 			dev_err(&adapter->pdev->dev,
2476 				"Could not enable Wake-on-lan\n");
2477 			dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2478 					  cmd.dma);
2479 			return status;
2480 		}
2481 		status = be_cmd_enable_magic_wol(adapter,
2482 				adapter->netdev->dev_addr, &cmd);
2483 		pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2484 		pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2485 	} else {
2486 		status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2487 		pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2488 		pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2489 	}
2490 
2491 	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2492 	return status;
2493 }
2494 
2495 /*
2496  * Generate a seed MAC address from the PF MAC Address using jhash.
2497  * MAC Address for VFs are assigned incrementally starting from the seed.
2498  * These addresses are programmed in the ASIC by the PF and the VF driver
2499  * queries for the MAC address during its probe.
2500  */
2501 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2502 {
2503 	u32 vf;
2504 	int status = 0;
2505 	u8 mac[ETH_ALEN];
2506 	struct be_vf_cfg *vf_cfg;
2507 
2508 	be_vf_eth_addr_generate(adapter, mac);
2509 
2510 	for_all_vfs(adapter, vf_cfg, vf) {
2511 		if (lancer_chip(adapter)) {
2512 			status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2513 		} else {
2514 			status = be_cmd_pmac_add(adapter, mac,
2515 						 vf_cfg->if_handle,
2516 						 &vf_cfg->pmac_id, vf + 1);
2517 		}
2518 
2519 		if (status)
2520 			dev_err(&adapter->pdev->dev,
2521 			"Mac address assignment failed for VF %d\n", vf);
2522 		else
2523 			memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2524 
2525 		mac[5] += 1;
2526 	}
2527 	return status;
2528 }
2529 
2530 static void be_vf_clear(struct be_adapter *adapter)
2531 {
2532 	struct be_vf_cfg *vf_cfg;
2533 	u32 vf;
2534 
2535 	if (be_find_vfs(adapter, ASSIGNED)) {
2536 		dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2537 		goto done;
2538 	}
2539 
2540 	for_all_vfs(adapter, vf_cfg, vf) {
2541 		if (lancer_chip(adapter))
2542 			be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2543 		else
2544 			be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2545 					vf_cfg->pmac_id, vf + 1);
2546 
2547 		be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2548 	}
2549 	pci_disable_sriov(adapter->pdev);
2550 done:
2551 	kfree(adapter->vf_cfg);
2552 	adapter->num_vfs = 0;
2553 }
2554 
2555 static int be_clear(struct be_adapter *adapter)
2556 {
2557 	int i = 1;
2558 
2559 	if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2560 		cancel_delayed_work_sync(&adapter->work);
2561 		adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2562 	}
2563 
2564 	if (sriov_enabled(adapter))
2565 		be_vf_clear(adapter);
2566 
2567 	for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2568 		be_cmd_pmac_del(adapter, adapter->if_handle,
2569 			adapter->pmac_id[i], 0);
2570 
2571 	be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2572 
2573 	be_mcc_queues_destroy(adapter);
2574 	be_rx_cqs_destroy(adapter);
2575 	be_tx_queues_destroy(adapter);
2576 	be_evt_queues_destroy(adapter);
2577 
2578 	be_msix_disable(adapter);
2579 	return 0;
2580 }
2581 
2582 static int be_vf_setup_init(struct be_adapter *adapter)
2583 {
2584 	struct be_vf_cfg *vf_cfg;
2585 	int vf;
2586 
2587 	adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2588 				  GFP_KERNEL);
2589 	if (!adapter->vf_cfg)
2590 		return -ENOMEM;
2591 
2592 	for_all_vfs(adapter, vf_cfg, vf) {
2593 		vf_cfg->if_handle = -1;
2594 		vf_cfg->pmac_id = -1;
2595 	}
2596 	return 0;
2597 }
2598 
2599 static int be_vf_setup(struct be_adapter *adapter)
2600 {
2601 	struct be_vf_cfg *vf_cfg;
2602 	struct device *dev = &adapter->pdev->dev;
2603 	u32 cap_flags, en_flags, vf;
2604 	u16 def_vlan, lnk_speed;
2605 	int status, enabled_vfs;
2606 
2607 	enabled_vfs = be_find_vfs(adapter, ENABLED);
2608 	if (enabled_vfs) {
2609 		dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2610 		dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2611 		return 0;
2612 	}
2613 
2614 	if (num_vfs > adapter->dev_num_vfs) {
2615 		dev_warn(dev, "Device supports %d VFs and not %d\n",
2616 			 adapter->dev_num_vfs, num_vfs);
2617 		num_vfs = adapter->dev_num_vfs;
2618 	}
2619 
2620 	status = pci_enable_sriov(adapter->pdev, num_vfs);
2621 	if (!status) {
2622 		adapter->num_vfs = num_vfs;
2623 	} else {
2624 		/* Platform doesn't support SRIOV though device supports it */
2625 		dev_warn(dev, "SRIOV enable failed\n");
2626 		return 0;
2627 	}
2628 
2629 	status = be_vf_setup_init(adapter);
2630 	if (status)
2631 		goto err;
2632 
2633 	cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2634 				BE_IF_FLAGS_MULTICAST;
2635 	for_all_vfs(adapter, vf_cfg, vf) {
2636 		status = be_cmd_if_create(adapter, cap_flags, en_flags,
2637 					  &vf_cfg->if_handle, vf + 1);
2638 		if (status)
2639 			goto err;
2640 	}
2641 
2642 	if (!enabled_vfs) {
2643 		status = be_vf_eth_addr_config(adapter);
2644 		if (status)
2645 			goto err;
2646 	}
2647 
2648 	for_all_vfs(adapter, vf_cfg, vf) {
2649 		status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2650 						  NULL, vf + 1);
2651 		if (status)
2652 			goto err;
2653 		vf_cfg->tx_rate = lnk_speed * 10;
2654 
2655 		status = be_cmd_get_hsw_config(adapter, &def_vlan,
2656 				vf + 1, vf_cfg->if_handle);
2657 		if (status)
2658 			goto err;
2659 		vf_cfg->def_vid = def_vlan;
2660 	}
2661 	return 0;
2662 err:
2663 	return status;
2664 }
2665 
2666 static void be_setup_init(struct be_adapter *adapter)
2667 {
2668 	adapter->vlan_prio_bmap = 0xff;
2669 	adapter->phy.link_speed = -1;
2670 	adapter->if_handle = -1;
2671 	adapter->be3_native = false;
2672 	adapter->promiscuous = false;
2673 	adapter->eq_next_idx = 0;
2674 	adapter->phy.forced_port_speed = -1;
2675 }
2676 
2677 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2678 			   bool *active_mac, u32 *pmac_id)
2679 {
2680 	int status = 0;
2681 
2682 	if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2683 		memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2684 		if (!lancer_chip(adapter) && !be_physfn(adapter))
2685 			*active_mac = true;
2686 		else
2687 			*active_mac = false;
2688 
2689 		return status;
2690 	}
2691 
2692 	if (lancer_chip(adapter)) {
2693 		status = be_cmd_get_mac_from_list(adapter, mac,
2694 						  active_mac, pmac_id, 0);
2695 		if (*active_mac) {
2696 			status = be_cmd_mac_addr_query(adapter, mac,
2697 						       MAC_ADDRESS_TYPE_NETWORK,
2698 						       false, if_handle,
2699 						       *pmac_id);
2700 		}
2701 	} else if (be_physfn(adapter)) {
2702 		/* For BE3, for PF get permanent MAC */
2703 		status = be_cmd_mac_addr_query(adapter, mac,
2704 					       MAC_ADDRESS_TYPE_NETWORK, true,
2705 					       0, 0);
2706 		*active_mac = false;
2707 	} else {
2708 		/* For BE3, for VF get soft MAC assigned by PF*/
2709 		status = be_cmd_mac_addr_query(adapter, mac,
2710 					       MAC_ADDRESS_TYPE_NETWORK, false,
2711 					       if_handle, 0);
2712 		*active_mac = true;
2713 	}
2714 	return status;
2715 }
2716 
2717 /* Routine to query per function resource limits */
2718 static int be_get_config(struct be_adapter *adapter)
2719 {
2720 	int pos;
2721 	u16 dev_num_vfs;
2722 
2723 	pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2724 	if (pos) {
2725 		pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2726 				     &dev_num_vfs);
2727 		adapter->dev_num_vfs = dev_num_vfs;
2728 	}
2729 	return 0;
2730 }
2731 
2732 static int be_setup(struct be_adapter *adapter)
2733 {
2734 	struct device *dev = &adapter->pdev->dev;
2735 	u32 cap_flags, en_flags;
2736 	u32 tx_fc, rx_fc;
2737 	int status;
2738 	u8 mac[ETH_ALEN];
2739 	bool active_mac;
2740 
2741 	be_setup_init(adapter);
2742 
2743 	be_get_config(adapter);
2744 
2745 	be_cmd_req_native_mode(adapter);
2746 
2747 	be_msix_enable(adapter);
2748 
2749 	status = be_evt_queues_create(adapter);
2750 	if (status)
2751 		goto err;
2752 
2753 	status = be_tx_cqs_create(adapter);
2754 	if (status)
2755 		goto err;
2756 
2757 	status = be_rx_cqs_create(adapter);
2758 	if (status)
2759 		goto err;
2760 
2761 	status = be_mcc_queues_create(adapter);
2762 	if (status)
2763 		goto err;
2764 
2765 	en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2766 			BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2767 	cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2768 			BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2769 
2770 	if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2771 		cap_flags |= BE_IF_FLAGS_RSS;
2772 		en_flags |= BE_IF_FLAGS_RSS;
2773 	}
2774 
2775 	if (lancer_chip(adapter) && !be_physfn(adapter)) {
2776 		en_flags = BE_IF_FLAGS_UNTAGGED |
2777 			    BE_IF_FLAGS_BROADCAST |
2778 			    BE_IF_FLAGS_MULTICAST;
2779 		cap_flags = en_flags;
2780 	}
2781 
2782 	status = be_cmd_if_create(adapter, cap_flags, en_flags,
2783 				  &adapter->if_handle, 0);
2784 	if (status != 0)
2785 		goto err;
2786 
2787 	memset(mac, 0, ETH_ALEN);
2788 	active_mac = false;
2789 	status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2790 				 &active_mac, &adapter->pmac_id[0]);
2791 	if (status != 0)
2792 		goto err;
2793 
2794 	if (!active_mac) {
2795 		status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2796 					 &adapter->pmac_id[0], 0);
2797 		if (status != 0)
2798 			goto err;
2799 	}
2800 
2801 	if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2802 		memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2803 		memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2804 	}
2805 
2806 	status = be_tx_qs_create(adapter);
2807 	if (status)
2808 		goto err;
2809 
2810 	be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2811 
2812 	if (adapter->vlans_added)
2813 		be_vid_config(adapter);
2814 
2815 	be_set_rx_mode(adapter->netdev);
2816 
2817 	be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2818 
2819 	if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2820 		be_cmd_set_flow_control(adapter, adapter->tx_fc,
2821 					adapter->rx_fc);
2822 
2823 	if (be_physfn(adapter) && num_vfs) {
2824 		if (adapter->dev_num_vfs)
2825 			be_vf_setup(adapter);
2826 		else
2827 			dev_warn(dev, "device doesn't support SRIOV\n");
2828 	}
2829 
2830 	be_cmd_get_phy_info(adapter);
2831 	if (be_pause_supported(adapter))
2832 		adapter->phy.fc_autoneg = 1;
2833 
2834 	schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2835 	adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2836 	return 0;
2837 err:
2838 	be_clear(adapter);
2839 	return status;
2840 }
2841 
2842 #ifdef CONFIG_NET_POLL_CONTROLLER
2843 static void be_netpoll(struct net_device *netdev)
2844 {
2845 	struct be_adapter *adapter = netdev_priv(netdev);
2846 	struct be_eq_obj *eqo;
2847 	int i;
2848 
2849 	for_all_evt_queues(adapter, eqo, i)
2850 		event_handle(eqo);
2851 
2852 	return;
2853 }
2854 #endif
2855 
2856 #define FW_FILE_HDR_SIGN 	"ServerEngines Corp. "
2857 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
2858 
2859 static bool be_flash_redboot(struct be_adapter *adapter,
2860 			const u8 *p, u32 img_start, int image_size,
2861 			int hdr_size)
2862 {
2863 	u32 crc_offset;
2864 	u8 flashed_crc[4];
2865 	int status;
2866 
2867 	crc_offset = hdr_size + img_start + image_size - 4;
2868 
2869 	p += crc_offset;
2870 
2871 	status = be_cmd_get_flash_crc(adapter, flashed_crc,
2872 			(image_size - 4));
2873 	if (status) {
2874 		dev_err(&adapter->pdev->dev,
2875 		"could not get crc from flash, not flashing redboot\n");
2876 		return false;
2877 	}
2878 
2879 	/*update redboot only if crc does not match*/
2880 	if (!memcmp(flashed_crc, p, 4))
2881 		return false;
2882 	else
2883 		return true;
2884 }
2885 
2886 static bool phy_flashing_required(struct be_adapter *adapter)
2887 {
2888 	return (adapter->phy.phy_type == TN_8022 &&
2889 		adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
2890 }
2891 
2892 static bool is_comp_in_ufi(struct be_adapter *adapter,
2893 			   struct flash_section_info *fsec, int type)
2894 {
2895 	int i = 0, img_type = 0;
2896 	struct flash_section_info_g2 *fsec_g2 = NULL;
2897 
2898 	if (adapter->generation != BE_GEN3)
2899 		fsec_g2 = (struct flash_section_info_g2 *)fsec;
2900 
2901 	for (i = 0; i < MAX_FLASH_COMP; i++) {
2902 		if (fsec_g2)
2903 			img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2904 		else
2905 			img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2906 
2907 		if (img_type == type)
2908 			return true;
2909 	}
2910 	return false;
2911 
2912 }
2913 
2914 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2915 					 int header_size,
2916 					 const struct firmware *fw)
2917 {
2918 	struct flash_section_info *fsec = NULL;
2919 	const u8 *p = fw->data;
2920 
2921 	p += header_size;
2922 	while (p < (fw->data + fw->size)) {
2923 		fsec = (struct flash_section_info *)p;
2924 		if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2925 			return fsec;
2926 		p += 32;
2927 	}
2928 	return NULL;
2929 }
2930 
2931 static int be_flash_data(struct be_adapter *adapter,
2932 			 const struct firmware *fw,
2933 			 struct be_dma_mem *flash_cmd,
2934 			 int num_of_images)
2935 
2936 {
2937 	int status = 0, i, filehdr_size = 0;
2938 	int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
2939 	u32 total_bytes = 0, flash_op;
2940 	int num_bytes;
2941 	const u8 *p = fw->data;
2942 	struct be_cmd_write_flashrom *req = flash_cmd->va;
2943 	const struct flash_comp *pflashcomp;
2944 	int num_comp, hdr_size;
2945 	struct flash_section_info *fsec = NULL;
2946 
2947 	struct flash_comp gen3_flash_types[] = {
2948 		{ FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2949 			FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2950 		{ FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2951 			FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2952 		{ FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2953 			FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2954 		{ FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2955 			FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2956 		{ FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2957 			FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2958 		{ FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2959 			FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2960 		{ FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2961 			FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2962 		{ FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2963 			FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2964 		{ FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2965 			FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2966 		{ FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2967 			FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
2968 	};
2969 
2970 	struct flash_comp gen2_flash_types[] = {
2971 		{ FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2972 			FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2973 		{ FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2974 			FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2975 		{ FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2976 			FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2977 		{ FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2978 			FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2979 		{ FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2980 			FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2981 		{ FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2982 			FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2983 		{ FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2984 			FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2985 		{ FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2986 			 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
2987 	};
2988 
2989 	if (adapter->generation == BE_GEN3) {
2990 		pflashcomp = gen3_flash_types;
2991 		filehdr_size = sizeof(struct flash_file_hdr_g3);
2992 		num_comp = ARRAY_SIZE(gen3_flash_types);
2993 	} else {
2994 		pflashcomp = gen2_flash_types;
2995 		filehdr_size = sizeof(struct flash_file_hdr_g2);
2996 		num_comp = ARRAY_SIZE(gen2_flash_types);
2997 	}
2998 	/* Get flash section info*/
2999 	fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3000 	if (!fsec) {
3001 		dev_err(&adapter->pdev->dev,
3002 			"Invalid Cookie. UFI corrupted ?\n");
3003 		return -1;
3004 	}
3005 	for (i = 0; i < num_comp; i++) {
3006 		if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3007 			continue;
3008 
3009 		if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3010 		    memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3011 			continue;
3012 
3013 		if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
3014 			if (!phy_flashing_required(adapter))
3015 				continue;
3016 		}
3017 
3018 		hdr_size = filehdr_size +
3019 			   (num_of_images * sizeof(struct image_hdr));
3020 
3021 		if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
3022 		    (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
3023 				       pflashcomp[i].size, hdr_size)))
3024 			continue;
3025 
3026 		/* Flash the component */
3027 		p = fw->data;
3028 		p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3029 		if (p + pflashcomp[i].size > fw->data + fw->size)
3030 			return -1;
3031 		total_bytes = pflashcomp[i].size;
3032 		while (total_bytes) {
3033 			if (total_bytes > 32*1024)
3034 				num_bytes = 32*1024;
3035 			else
3036 				num_bytes = total_bytes;
3037 			total_bytes -= num_bytes;
3038 			if (!total_bytes) {
3039 				if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3040 					flash_op = FLASHROM_OPER_PHY_FLASH;
3041 				else
3042 					flash_op = FLASHROM_OPER_FLASH;
3043 			} else {
3044 				if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3045 					flash_op = FLASHROM_OPER_PHY_SAVE;
3046 				else
3047 					flash_op = FLASHROM_OPER_SAVE;
3048 			}
3049 			memcpy(req->params.data_buf, p, num_bytes);
3050 			p += num_bytes;
3051 			status = be_cmd_write_flashrom(adapter, flash_cmd,
3052 				pflashcomp[i].optype, flash_op, num_bytes);
3053 			if (status) {
3054 				if ((status == ILLEGAL_IOCTL_REQ) &&
3055 					(pflashcomp[i].optype ==
3056 						OPTYPE_PHY_FW))
3057 					break;
3058 				dev_err(&adapter->pdev->dev,
3059 					"cmd to write to flash rom failed.\n");
3060 				return -1;
3061 			}
3062 		}
3063 	}
3064 	return 0;
3065 }
3066 
3067 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3068 {
3069 	if (fhdr == NULL)
3070 		return 0;
3071 	if (fhdr->build[0] == '3')
3072 		return BE_GEN3;
3073 	else if (fhdr->build[0] == '2')
3074 		return BE_GEN2;
3075 	else
3076 		return 0;
3077 }
3078 
3079 static int lancer_wait_idle(struct be_adapter *adapter)
3080 {
3081 #define SLIPORT_IDLE_TIMEOUT 30
3082 	u32 reg_val;
3083 	int status = 0, i;
3084 
3085 	for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3086 		reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3087 		if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3088 			break;
3089 
3090 		ssleep(1);
3091 	}
3092 
3093 	if (i == SLIPORT_IDLE_TIMEOUT)
3094 		status = -1;
3095 
3096 	return status;
3097 }
3098 
3099 static int lancer_fw_reset(struct be_adapter *adapter)
3100 {
3101 	int status = 0;
3102 
3103 	status = lancer_wait_idle(adapter);
3104 	if (status)
3105 		return status;
3106 
3107 	iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3108 		  PHYSDEV_CONTROL_OFFSET);
3109 
3110 	return status;
3111 }
3112 
3113 static int lancer_fw_download(struct be_adapter *adapter,
3114 				const struct firmware *fw)
3115 {
3116 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3117 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3118 	struct be_dma_mem flash_cmd;
3119 	const u8 *data_ptr = NULL;
3120 	u8 *dest_image_ptr = NULL;
3121 	size_t image_size = 0;
3122 	u32 chunk_size = 0;
3123 	u32 data_written = 0;
3124 	u32 offset = 0;
3125 	int status = 0;
3126 	u8 add_status = 0;
3127 	u8 change_status;
3128 
3129 	if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3130 		dev_err(&adapter->pdev->dev,
3131 			"FW Image not properly aligned. "
3132 			"Length must be 4 byte aligned.\n");
3133 		status = -EINVAL;
3134 		goto lancer_fw_exit;
3135 	}
3136 
3137 	flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3138 				+ LANCER_FW_DOWNLOAD_CHUNK;
3139 	flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3140 						&flash_cmd.dma, GFP_KERNEL);
3141 	if (!flash_cmd.va) {
3142 		status = -ENOMEM;
3143 		dev_err(&adapter->pdev->dev,
3144 			"Memory allocation failure while flashing\n");
3145 		goto lancer_fw_exit;
3146 	}
3147 
3148 	dest_image_ptr = flash_cmd.va +
3149 				sizeof(struct lancer_cmd_req_write_object);
3150 	image_size = fw->size;
3151 	data_ptr = fw->data;
3152 
3153 	while (image_size) {
3154 		chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3155 
3156 		/* Copy the image chunk content. */
3157 		memcpy(dest_image_ptr, data_ptr, chunk_size);
3158 
3159 		status = lancer_cmd_write_object(adapter, &flash_cmd,
3160 						 chunk_size, offset,
3161 						 LANCER_FW_DOWNLOAD_LOCATION,
3162 						 &data_written, &change_status,
3163 						 &add_status);
3164 		if (status)
3165 			break;
3166 
3167 		offset += data_written;
3168 		data_ptr += data_written;
3169 		image_size -= data_written;
3170 	}
3171 
3172 	if (!status) {
3173 		/* Commit the FW written */
3174 		status = lancer_cmd_write_object(adapter, &flash_cmd,
3175 						 0, offset,
3176 						 LANCER_FW_DOWNLOAD_LOCATION,
3177 						 &data_written, &change_status,
3178 						 &add_status);
3179 	}
3180 
3181 	dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3182 				flash_cmd.dma);
3183 	if (status) {
3184 		dev_err(&adapter->pdev->dev,
3185 			"Firmware load error. "
3186 			"Status code: 0x%x Additional Status: 0x%x\n",
3187 			status, add_status);
3188 		goto lancer_fw_exit;
3189 	}
3190 
3191 	if (change_status == LANCER_FW_RESET_NEEDED) {
3192 		status = lancer_fw_reset(adapter);
3193 		if (status) {
3194 			dev_err(&adapter->pdev->dev,
3195 				"Adapter busy for FW reset.\n"
3196 				"New FW will not be active.\n");
3197 			goto lancer_fw_exit;
3198 		}
3199 	} else if (change_status != LANCER_NO_RESET_NEEDED) {
3200 			dev_err(&adapter->pdev->dev,
3201 				"System reboot required for new FW"
3202 				" to be active\n");
3203 	}
3204 
3205 	dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3206 lancer_fw_exit:
3207 	return status;
3208 }
3209 
3210 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3211 {
3212 	struct flash_file_hdr_g2 *fhdr;
3213 	struct flash_file_hdr_g3 *fhdr3;
3214 	struct image_hdr *img_hdr_ptr = NULL;
3215 	struct be_dma_mem flash_cmd;
3216 	const u8 *p;
3217 	int status = 0, i = 0, num_imgs = 0;
3218 
3219 	p = fw->data;
3220 	fhdr = (struct flash_file_hdr_g2 *) p;
3221 
3222 	flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
3223 	flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3224 					  &flash_cmd.dma, GFP_KERNEL);
3225 	if (!flash_cmd.va) {
3226 		status = -ENOMEM;
3227 		dev_err(&adapter->pdev->dev,
3228 			"Memory allocation failure while flashing\n");
3229 		goto be_fw_exit;
3230 	}
3231 
3232 	if ((adapter->generation == BE_GEN3) &&
3233 			(get_ufigen_type(fhdr) == BE_GEN3)) {
3234 		fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
3235 		num_imgs = le32_to_cpu(fhdr3->num_imgs);
3236 		for (i = 0; i < num_imgs; i++) {
3237 			img_hdr_ptr = (struct image_hdr *) (fw->data +
3238 					(sizeof(struct flash_file_hdr_g3) +
3239 					 i * sizeof(struct image_hdr)));
3240 			if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3241 				status = be_flash_data(adapter, fw, &flash_cmd,
3242 							num_imgs);
3243 		}
3244 	} else if ((adapter->generation == BE_GEN2) &&
3245 			(get_ufigen_type(fhdr) == BE_GEN2)) {
3246 		status = be_flash_data(adapter, fw, &flash_cmd, 0);
3247 	} else {
3248 		dev_err(&adapter->pdev->dev,
3249 			"UFI and Interface are not compatible for flashing\n");
3250 		status = -1;
3251 	}
3252 
3253 	dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3254 			  flash_cmd.dma);
3255 	if (status) {
3256 		dev_err(&adapter->pdev->dev, "Firmware load error\n");
3257 		goto be_fw_exit;
3258 	}
3259 
3260 	dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3261 
3262 be_fw_exit:
3263 	return status;
3264 }
3265 
3266 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3267 {
3268 	const struct firmware *fw;
3269 	int status;
3270 
3271 	if (!netif_running(adapter->netdev)) {
3272 		dev_err(&adapter->pdev->dev,
3273 			"Firmware load not allowed (interface is down)\n");
3274 		return -1;
3275 	}
3276 
3277 	status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3278 	if (status)
3279 		goto fw_exit;
3280 
3281 	dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3282 
3283 	if (lancer_chip(adapter))
3284 		status = lancer_fw_download(adapter, fw);
3285 	else
3286 		status = be_fw_download(adapter, fw);
3287 
3288 fw_exit:
3289 	release_firmware(fw);
3290 	return status;
3291 }
3292 
3293 static const struct net_device_ops be_netdev_ops = {
3294 	.ndo_open		= be_open,
3295 	.ndo_stop		= be_close,
3296 	.ndo_start_xmit		= be_xmit,
3297 	.ndo_set_rx_mode	= be_set_rx_mode,
3298 	.ndo_set_mac_address	= be_mac_addr_set,
3299 	.ndo_change_mtu		= be_change_mtu,
3300 	.ndo_get_stats64	= be_get_stats64,
3301 	.ndo_validate_addr	= eth_validate_addr,
3302 	.ndo_vlan_rx_add_vid	= be_vlan_add_vid,
3303 	.ndo_vlan_rx_kill_vid	= be_vlan_rem_vid,
3304 	.ndo_set_vf_mac		= be_set_vf_mac,
3305 	.ndo_set_vf_vlan	= be_set_vf_vlan,
3306 	.ndo_set_vf_tx_rate	= be_set_vf_tx_rate,
3307 	.ndo_get_vf_config	= be_get_vf_config,
3308 #ifdef CONFIG_NET_POLL_CONTROLLER
3309 	.ndo_poll_controller	= be_netpoll,
3310 #endif
3311 };
3312 
3313 static void be_netdev_init(struct net_device *netdev)
3314 {
3315 	struct be_adapter *adapter = netdev_priv(netdev);
3316 	struct be_eq_obj *eqo;
3317 	int i;
3318 
3319 	netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3320 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3321 		NETIF_F_HW_VLAN_TX;
3322 	if (be_multi_rxq(adapter))
3323 		netdev->hw_features |= NETIF_F_RXHASH;
3324 
3325 	netdev->features |= netdev->hw_features |
3326 		NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3327 
3328 	netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3329 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3330 
3331 	netdev->priv_flags |= IFF_UNICAST_FLT;
3332 
3333 	netdev->flags |= IFF_MULTICAST;
3334 
3335 	netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3336 
3337 	netdev->netdev_ops = &be_netdev_ops;
3338 
3339 	SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3340 
3341 	for_all_evt_queues(adapter, eqo, i)
3342 		netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3343 }
3344 
3345 static void be_unmap_pci_bars(struct be_adapter *adapter)
3346 {
3347 	if (adapter->csr)
3348 		iounmap(adapter->csr);
3349 	if (adapter->db)
3350 		iounmap(adapter->db);
3351 	if (adapter->roce_db.base)
3352 		pci_iounmap(adapter->pdev, adapter->roce_db.base);
3353 }
3354 
3355 static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3356 {
3357 	struct pci_dev *pdev = adapter->pdev;
3358 	u8 __iomem *addr;
3359 
3360 	addr = pci_iomap(pdev, 2, 0);
3361 	if (addr == NULL)
3362 		return -ENOMEM;
3363 
3364 	adapter->roce_db.base = addr;
3365 	adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3366 	adapter->roce_db.size = 8192;
3367 	adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3368 	return 0;
3369 }
3370 
3371 static int be_map_pci_bars(struct be_adapter *adapter)
3372 {
3373 	u8 __iomem *addr;
3374 	int db_reg;
3375 
3376 	if (lancer_chip(adapter)) {
3377 		if (be_type_2_3(adapter)) {
3378 			addr = ioremap_nocache(
3379 					pci_resource_start(adapter->pdev, 0),
3380 					pci_resource_len(adapter->pdev, 0));
3381 			if (addr == NULL)
3382 				return -ENOMEM;
3383 			adapter->db = addr;
3384 		}
3385 		if (adapter->if_type == SLI_INTF_TYPE_3) {
3386 			if (lancer_roce_map_pci_bars(adapter))
3387 				goto pci_map_err;
3388 		}
3389 		return 0;
3390 	}
3391 
3392 	if (be_physfn(adapter)) {
3393 		addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3394 				pci_resource_len(adapter->pdev, 2));
3395 		if (addr == NULL)
3396 			return -ENOMEM;
3397 		adapter->csr = addr;
3398 	}
3399 
3400 	if (adapter->generation == BE_GEN2) {
3401 		db_reg = 4;
3402 	} else {
3403 		if (be_physfn(adapter))
3404 			db_reg = 4;
3405 		else
3406 			db_reg = 0;
3407 	}
3408 	addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3409 				pci_resource_len(adapter->pdev, db_reg));
3410 	if (addr == NULL)
3411 		goto pci_map_err;
3412 	adapter->db = addr;
3413 	if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3414 		adapter->roce_db.size = 4096;
3415 		adapter->roce_db.io_addr =
3416 				pci_resource_start(adapter->pdev, db_reg);
3417 		adapter->roce_db.total_size =
3418 				pci_resource_len(adapter->pdev, db_reg);
3419 	}
3420 	return 0;
3421 pci_map_err:
3422 	be_unmap_pci_bars(adapter);
3423 	return -ENOMEM;
3424 }
3425 
3426 static void be_ctrl_cleanup(struct be_adapter *adapter)
3427 {
3428 	struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3429 
3430 	be_unmap_pci_bars(adapter);
3431 
3432 	if (mem->va)
3433 		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3434 				  mem->dma);
3435 
3436 	mem = &adapter->rx_filter;
3437 	if (mem->va)
3438 		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3439 				  mem->dma);
3440 }
3441 
3442 static int be_ctrl_init(struct be_adapter *adapter)
3443 {
3444 	struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3445 	struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3446 	struct be_dma_mem *rx_filter = &adapter->rx_filter;
3447 	int status;
3448 
3449 	status = be_map_pci_bars(adapter);
3450 	if (status)
3451 		goto done;
3452 
3453 	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3454 	mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3455 						mbox_mem_alloc->size,
3456 						&mbox_mem_alloc->dma,
3457 						GFP_KERNEL);
3458 	if (!mbox_mem_alloc->va) {
3459 		status = -ENOMEM;
3460 		goto unmap_pci_bars;
3461 	}
3462 	mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3463 	mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3464 	mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3465 	memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3466 
3467 	rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3468 	rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3469 					&rx_filter->dma, GFP_KERNEL);
3470 	if (rx_filter->va == NULL) {
3471 		status = -ENOMEM;
3472 		goto free_mbox;
3473 	}
3474 	memset(rx_filter->va, 0, rx_filter->size);
3475 
3476 	mutex_init(&adapter->mbox_lock);
3477 	spin_lock_init(&adapter->mcc_lock);
3478 	spin_lock_init(&adapter->mcc_cq_lock);
3479 
3480 	init_completion(&adapter->flash_compl);
3481 	pci_save_state(adapter->pdev);
3482 	return 0;
3483 
3484 free_mbox:
3485 	dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3486 			  mbox_mem_alloc->va, mbox_mem_alloc->dma);
3487 
3488 unmap_pci_bars:
3489 	be_unmap_pci_bars(adapter);
3490 
3491 done:
3492 	return status;
3493 }
3494 
3495 static void be_stats_cleanup(struct be_adapter *adapter)
3496 {
3497 	struct be_dma_mem *cmd = &adapter->stats_cmd;
3498 
3499 	if (cmd->va)
3500 		dma_free_coherent(&adapter->pdev->dev, cmd->size,
3501 				  cmd->va, cmd->dma);
3502 }
3503 
3504 static int be_stats_init(struct be_adapter *adapter)
3505 {
3506 	struct be_dma_mem *cmd = &adapter->stats_cmd;
3507 
3508 	if (adapter->generation == BE_GEN2) {
3509 		cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3510 	} else {
3511 		if (lancer_chip(adapter))
3512 			cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3513 		else
3514 			cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3515 	}
3516 	cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3517 				     GFP_KERNEL);
3518 	if (cmd->va == NULL)
3519 		return -1;
3520 	memset(cmd->va, 0, cmd->size);
3521 	return 0;
3522 }
3523 
3524 static void __devexit be_remove(struct pci_dev *pdev)
3525 {
3526 	struct be_adapter *adapter = pci_get_drvdata(pdev);
3527 
3528 	if (!adapter)
3529 		return;
3530 
3531 	be_roce_dev_remove(adapter);
3532 
3533 	cancel_delayed_work_sync(&adapter->func_recovery_work);
3534 
3535 	unregister_netdev(adapter->netdev);
3536 
3537 	be_clear(adapter);
3538 
3539 	/* tell fw we're done with firing cmds */
3540 	be_cmd_fw_clean(adapter);
3541 
3542 	be_stats_cleanup(adapter);
3543 
3544 	be_ctrl_cleanup(adapter);
3545 
3546 	pci_set_drvdata(pdev, NULL);
3547 	pci_release_regions(pdev);
3548 	pci_disable_device(pdev);
3549 
3550 	free_netdev(adapter->netdev);
3551 }
3552 
3553 bool be_is_wol_supported(struct be_adapter *adapter)
3554 {
3555 	return ((adapter->wol_cap & BE_WOL_CAP) &&
3556 		!be_is_wol_excluded(adapter)) ? true : false;
3557 }
3558 
3559 u32 be_get_fw_log_level(struct be_adapter *adapter)
3560 {
3561 	struct be_dma_mem extfat_cmd;
3562 	struct be_fat_conf_params *cfgs;
3563 	int status;
3564 	u32 level = 0;
3565 	int j;
3566 
3567 	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3568 	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3569 	extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3570 					     &extfat_cmd.dma);
3571 
3572 	if (!extfat_cmd.va) {
3573 		dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3574 			__func__);
3575 		goto err;
3576 	}
3577 
3578 	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3579 	if (!status) {
3580 		cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3581 						sizeof(struct be_cmd_resp_hdr));
3582 		for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3583 			if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3584 				level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3585 		}
3586 	}
3587 	pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3588 			    extfat_cmd.dma);
3589 err:
3590 	return level;
3591 }
3592 static int be_get_initial_config(struct be_adapter *adapter)
3593 {
3594 	int status;
3595 	u32 level;
3596 
3597 	status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3598 			&adapter->function_mode, &adapter->function_caps);
3599 	if (status)
3600 		return status;
3601 
3602 	if (adapter->function_mode & FLEX10_MODE)
3603 		adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3604 	else
3605 		adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3606 
3607 	if (be_physfn(adapter))
3608 		adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3609 	else
3610 		adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3611 
3612 	/* primary mac needs 1 pmac entry */
3613 	adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3614 				  sizeof(u32), GFP_KERNEL);
3615 	if (!adapter->pmac_id)
3616 		return -ENOMEM;
3617 
3618 	status = be_cmd_get_cntl_attributes(adapter);
3619 	if (status)
3620 		return status;
3621 
3622 	status = be_cmd_get_acpi_wol_cap(adapter);
3623 	if (status) {
3624 		/* in case of a failure to get wol capabillities
3625 		 * check the exclusion list to determine WOL capability */
3626 		if (!be_is_wol_excluded(adapter))
3627 			adapter->wol_cap |= BE_WOL_CAP;
3628 	}
3629 
3630 	if (be_is_wol_supported(adapter))
3631 		adapter->wol = true;
3632 
3633 	/* Must be a power of 2 or else MODULO will BUG_ON */
3634 	adapter->be_get_temp_freq = 64;
3635 
3636 	level = be_get_fw_log_level(adapter);
3637 	adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3638 
3639 	return 0;
3640 }
3641 
3642 static int be_dev_type_check(struct be_adapter *adapter)
3643 {
3644 	struct pci_dev *pdev = adapter->pdev;
3645 	u32 sli_intf = 0, if_type;
3646 
3647 	switch (pdev->device) {
3648 	case BE_DEVICE_ID1:
3649 	case OC_DEVICE_ID1:
3650 		adapter->generation = BE_GEN2;
3651 		break;
3652 	case BE_DEVICE_ID2:
3653 	case OC_DEVICE_ID2:
3654 		adapter->generation = BE_GEN3;
3655 		break;
3656 	case OC_DEVICE_ID3:
3657 	case OC_DEVICE_ID4:
3658 		pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3659 		adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3660 						SLI_INTF_IF_TYPE_SHIFT;
3661 		if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3662 						SLI_INTF_IF_TYPE_SHIFT;
3663 		if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3664 			!be_type_2_3(adapter)) {
3665 			dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3666 			return -EINVAL;
3667 		}
3668 		adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3669 					 SLI_INTF_FAMILY_SHIFT);
3670 		adapter->generation = BE_GEN3;
3671 		break;
3672 	case OC_DEVICE_ID5:
3673 		pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3674 		if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
3675 			dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3676 			return -EINVAL;
3677 		}
3678 		adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3679 					 SLI_INTF_FAMILY_SHIFT);
3680 		adapter->generation = BE_GEN3;
3681 		break;
3682 	default:
3683 		adapter->generation = 0;
3684 	}
3685 
3686 	pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3687 	adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3688 	return 0;
3689 }
3690 
3691 static int lancer_recover_func(struct be_adapter *adapter)
3692 {
3693 	int status;
3694 
3695 	status = lancer_test_and_set_rdy_state(adapter);
3696 	if (status)
3697 		goto err;
3698 
3699 	if (netif_running(adapter->netdev))
3700 		be_close(adapter->netdev);
3701 
3702 	be_clear(adapter);
3703 
3704 	adapter->hw_error = false;
3705 	adapter->fw_timeout = false;
3706 
3707 	status = be_setup(adapter);
3708 	if (status)
3709 		goto err;
3710 
3711 	if (netif_running(adapter->netdev)) {
3712 		status = be_open(adapter->netdev);
3713 		if (status)
3714 			goto err;
3715 	}
3716 
3717 	dev_err(&adapter->pdev->dev,
3718 		"Adapter SLIPORT recovery succeeded\n");
3719 	return 0;
3720 err:
3721 	dev_err(&adapter->pdev->dev,
3722 		"Adapter SLIPORT recovery failed\n");
3723 
3724 	return status;
3725 }
3726 
3727 static void be_func_recovery_task(struct work_struct *work)
3728 {
3729 	struct be_adapter *adapter =
3730 		container_of(work, struct be_adapter,  func_recovery_work.work);
3731 	int status;
3732 
3733 	be_detect_error(adapter);
3734 
3735 	if (adapter->hw_error && lancer_chip(adapter)) {
3736 
3737 		if (adapter->eeh_error)
3738 			goto out;
3739 
3740 		rtnl_lock();
3741 		netif_device_detach(adapter->netdev);
3742 		rtnl_unlock();
3743 
3744 		status = lancer_recover_func(adapter);
3745 
3746 		if (!status)
3747 			netif_device_attach(adapter->netdev);
3748 	}
3749 
3750 out:
3751 	schedule_delayed_work(&adapter->func_recovery_work,
3752 			      msecs_to_jiffies(1000));
3753 }
3754 
3755 static void be_worker(struct work_struct *work)
3756 {
3757 	struct be_adapter *adapter =
3758 		container_of(work, struct be_adapter, work.work);
3759 	struct be_rx_obj *rxo;
3760 	struct be_eq_obj *eqo;
3761 	int i;
3762 
3763 	/* when interrupts are not yet enabled, just reap any pending
3764 	* mcc completions */
3765 	if (!netif_running(adapter->netdev)) {
3766 		be_process_mcc(adapter);
3767 		goto reschedule;
3768 	}
3769 
3770 	if (!adapter->stats_cmd_sent) {
3771 		if (lancer_chip(adapter))
3772 			lancer_cmd_get_pport_stats(adapter,
3773 						&adapter->stats_cmd);
3774 		else
3775 			be_cmd_get_stats(adapter, &adapter->stats_cmd);
3776 	}
3777 
3778 	if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3779 		be_cmd_get_die_temperature(adapter);
3780 
3781 	for_all_rx_queues(adapter, rxo, i) {
3782 		if (rxo->rx_post_starved) {
3783 			rxo->rx_post_starved = false;
3784 			be_post_rx_frags(rxo, GFP_KERNEL);
3785 		}
3786 	}
3787 
3788 	for_all_evt_queues(adapter, eqo, i)
3789 		be_eqd_update(adapter, eqo);
3790 
3791 reschedule:
3792 	adapter->work_counter++;
3793 	schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3794 }
3795 
3796 static bool be_reset_required(struct be_adapter *adapter)
3797 {
3798 	return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
3799 }
3800 
3801 static int __devinit be_probe(struct pci_dev *pdev,
3802 			const struct pci_device_id *pdev_id)
3803 {
3804 	int status = 0;
3805 	struct be_adapter *adapter;
3806 	struct net_device *netdev;
3807 	char port_name;
3808 
3809 	status = pci_enable_device(pdev);
3810 	if (status)
3811 		goto do_none;
3812 
3813 	status = pci_request_regions(pdev, DRV_NAME);
3814 	if (status)
3815 		goto disable_dev;
3816 	pci_set_master(pdev);
3817 
3818 	netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
3819 	if (netdev == NULL) {
3820 		status = -ENOMEM;
3821 		goto rel_reg;
3822 	}
3823 	adapter = netdev_priv(netdev);
3824 	adapter->pdev = pdev;
3825 	pci_set_drvdata(pdev, adapter);
3826 
3827 	status = be_dev_type_check(adapter);
3828 	if (status)
3829 		goto free_netdev;
3830 
3831 	adapter->netdev = netdev;
3832 	SET_NETDEV_DEV(netdev, &pdev->dev);
3833 
3834 	status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3835 	if (!status) {
3836 		netdev->features |= NETIF_F_HIGHDMA;
3837 	} else {
3838 		status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3839 		if (status) {
3840 			dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3841 			goto free_netdev;
3842 		}
3843 	}
3844 
3845 	status = be_ctrl_init(adapter);
3846 	if (status)
3847 		goto free_netdev;
3848 
3849 	/* sync up with fw's ready state */
3850 	if (be_physfn(adapter)) {
3851 		status = be_fw_wait_ready(adapter);
3852 		if (status)
3853 			goto ctrl_clean;
3854 	}
3855 
3856 	/* tell fw we're ready to fire cmds */
3857 	status = be_cmd_fw_init(adapter);
3858 	if (status)
3859 		goto ctrl_clean;
3860 
3861 	if (be_reset_required(adapter)) {
3862 		status = be_cmd_reset_function(adapter);
3863 		if (status)
3864 			goto ctrl_clean;
3865 	}
3866 
3867 	/* The INTR bit may be set in the card when probed by a kdump kernel
3868 	 * after a crash.
3869 	 */
3870 	if (!lancer_chip(adapter))
3871 		be_intr_set(adapter, false);
3872 
3873 	status = be_stats_init(adapter);
3874 	if (status)
3875 		goto ctrl_clean;
3876 
3877 	status = be_get_initial_config(adapter);
3878 	if (status)
3879 		goto stats_clean;
3880 
3881 	INIT_DELAYED_WORK(&adapter->work, be_worker);
3882 	INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
3883 	adapter->rx_fc = adapter->tx_fc = true;
3884 
3885 	status = be_setup(adapter);
3886 	if (status)
3887 		goto msix_disable;
3888 
3889 	be_netdev_init(netdev);
3890 	status = register_netdev(netdev);
3891 	if (status != 0)
3892 		goto unsetup;
3893 
3894 	be_roce_dev_add(adapter);
3895 
3896 	schedule_delayed_work(&adapter->func_recovery_work,
3897 			      msecs_to_jiffies(1000));
3898 
3899 	be_cmd_query_port_name(adapter, &port_name);
3900 
3901 	dev_info(&pdev->dev, "%s: %s port %c\n", netdev->name, nic_name(pdev),
3902 		 port_name);
3903 
3904 	return 0;
3905 
3906 unsetup:
3907 	be_clear(adapter);
3908 msix_disable:
3909 	be_msix_disable(adapter);
3910 stats_clean:
3911 	be_stats_cleanup(adapter);
3912 ctrl_clean:
3913 	be_ctrl_cleanup(adapter);
3914 free_netdev:
3915 	free_netdev(netdev);
3916 	pci_set_drvdata(pdev, NULL);
3917 rel_reg:
3918 	pci_release_regions(pdev);
3919 disable_dev:
3920 	pci_disable_device(pdev);
3921 do_none:
3922 	dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3923 	return status;
3924 }
3925 
3926 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3927 {
3928 	struct be_adapter *adapter = pci_get_drvdata(pdev);
3929 	struct net_device *netdev =  adapter->netdev;
3930 
3931 	if (adapter->wol)
3932 		be_setup_wol(adapter, true);
3933 
3934 	cancel_delayed_work_sync(&adapter->func_recovery_work);
3935 
3936 	netif_device_detach(netdev);
3937 	if (netif_running(netdev)) {
3938 		rtnl_lock();
3939 		be_close(netdev);
3940 		rtnl_unlock();
3941 	}
3942 	be_clear(adapter);
3943 
3944 	pci_save_state(pdev);
3945 	pci_disable_device(pdev);
3946 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
3947 	return 0;
3948 }
3949 
3950 static int be_resume(struct pci_dev *pdev)
3951 {
3952 	int status = 0;
3953 	struct be_adapter *adapter = pci_get_drvdata(pdev);
3954 	struct net_device *netdev =  adapter->netdev;
3955 
3956 	netif_device_detach(netdev);
3957 
3958 	status = pci_enable_device(pdev);
3959 	if (status)
3960 		return status;
3961 
3962 	pci_set_power_state(pdev, 0);
3963 	pci_restore_state(pdev);
3964 
3965 	/* tell fw we're ready to fire cmds */
3966 	status = be_cmd_fw_init(adapter);
3967 	if (status)
3968 		return status;
3969 
3970 	be_setup(adapter);
3971 	if (netif_running(netdev)) {
3972 		rtnl_lock();
3973 		be_open(netdev);
3974 		rtnl_unlock();
3975 	}
3976 
3977 	schedule_delayed_work(&adapter->func_recovery_work,
3978 			      msecs_to_jiffies(1000));
3979 	netif_device_attach(netdev);
3980 
3981 	if (adapter->wol)
3982 		be_setup_wol(adapter, false);
3983 
3984 	return 0;
3985 }
3986 
3987 /*
3988  * An FLR will stop BE from DMAing any data.
3989  */
3990 static void be_shutdown(struct pci_dev *pdev)
3991 {
3992 	struct be_adapter *adapter = pci_get_drvdata(pdev);
3993 
3994 	if (!adapter)
3995 		return;
3996 
3997 	cancel_delayed_work_sync(&adapter->work);
3998 	cancel_delayed_work_sync(&adapter->func_recovery_work);
3999 
4000 	netif_device_detach(adapter->netdev);
4001 
4002 	if (adapter->wol)
4003 		be_setup_wol(adapter, true);
4004 
4005 	be_cmd_reset_function(adapter);
4006 
4007 	pci_disable_device(pdev);
4008 }
4009 
4010 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4011 				pci_channel_state_t state)
4012 {
4013 	struct be_adapter *adapter = pci_get_drvdata(pdev);
4014 	struct net_device *netdev =  adapter->netdev;
4015 
4016 	dev_err(&adapter->pdev->dev, "EEH error detected\n");
4017 
4018 	adapter->eeh_error = true;
4019 
4020 	cancel_delayed_work_sync(&adapter->func_recovery_work);
4021 
4022 	rtnl_lock();
4023 	netif_device_detach(netdev);
4024 	rtnl_unlock();
4025 
4026 	if (netif_running(netdev)) {
4027 		rtnl_lock();
4028 		be_close(netdev);
4029 		rtnl_unlock();
4030 	}
4031 	be_clear(adapter);
4032 
4033 	if (state == pci_channel_io_perm_failure)
4034 		return PCI_ERS_RESULT_DISCONNECT;
4035 
4036 	pci_disable_device(pdev);
4037 
4038 	/* The error could cause the FW to trigger a flash debug dump.
4039 	 * Resetting the card while flash dump is in progress
4040 	 * can cause it not to recover; wait for it to finish
4041 	 */
4042 	ssleep(30);
4043 	return PCI_ERS_RESULT_NEED_RESET;
4044 }
4045 
4046 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4047 {
4048 	struct be_adapter *adapter = pci_get_drvdata(pdev);
4049 	int status;
4050 
4051 	dev_info(&adapter->pdev->dev, "EEH reset\n");
4052 	be_clear_all_error(adapter);
4053 
4054 	status = pci_enable_device(pdev);
4055 	if (status)
4056 		return PCI_ERS_RESULT_DISCONNECT;
4057 
4058 	pci_set_master(pdev);
4059 	pci_set_power_state(pdev, 0);
4060 	pci_restore_state(pdev);
4061 
4062 	/* Check if card is ok and fw is ready */
4063 	status = be_fw_wait_ready(adapter);
4064 	if (status)
4065 		return PCI_ERS_RESULT_DISCONNECT;
4066 
4067 	return PCI_ERS_RESULT_RECOVERED;
4068 }
4069 
4070 static void be_eeh_resume(struct pci_dev *pdev)
4071 {
4072 	int status = 0;
4073 	struct be_adapter *adapter = pci_get_drvdata(pdev);
4074 	struct net_device *netdev =  adapter->netdev;
4075 
4076 	dev_info(&adapter->pdev->dev, "EEH resume\n");
4077 
4078 	pci_save_state(pdev);
4079 
4080 	/* tell fw we're ready to fire cmds */
4081 	status = be_cmd_fw_init(adapter);
4082 	if (status)
4083 		goto err;
4084 
4085 	status = be_cmd_reset_function(adapter);
4086 	if (status)
4087 		goto err;
4088 
4089 	status = be_setup(adapter);
4090 	if (status)
4091 		goto err;
4092 
4093 	if (netif_running(netdev)) {
4094 		status = be_open(netdev);
4095 		if (status)
4096 			goto err;
4097 	}
4098 
4099 	schedule_delayed_work(&adapter->func_recovery_work,
4100 			      msecs_to_jiffies(1000));
4101 	netif_device_attach(netdev);
4102 	return;
4103 err:
4104 	dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4105 }
4106 
4107 static struct pci_error_handlers be_eeh_handlers = {
4108 	.error_detected = be_eeh_err_detected,
4109 	.slot_reset = be_eeh_reset,
4110 	.resume = be_eeh_resume,
4111 };
4112 
4113 static struct pci_driver be_driver = {
4114 	.name = DRV_NAME,
4115 	.id_table = be_dev_ids,
4116 	.probe = be_probe,
4117 	.remove = be_remove,
4118 	.suspend = be_suspend,
4119 	.resume = be_resume,
4120 	.shutdown = be_shutdown,
4121 	.err_handler = &be_eeh_handlers
4122 };
4123 
4124 static int __init be_init_module(void)
4125 {
4126 	if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4127 	    rx_frag_size != 2048) {
4128 		printk(KERN_WARNING DRV_NAME
4129 			" : Module param rx_frag_size must be 2048/4096/8192."
4130 			" Using 2048\n");
4131 		rx_frag_size = 2048;
4132 	}
4133 
4134 	return pci_register_driver(&be_driver);
4135 }
4136 module_init(be_init_module);
4137 
4138 static void __exit be_exit_module(void)
4139 {
4140 	pci_unregister_driver(&be_driver);
4141 }
4142 module_exit(be_exit_module);
4143