1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2013 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 
17 #include <linux/stringify.h>
18 #include <linux/kernel.h>
19 #include <linux/timer.h>
20 #include <linux/errno.h>
21 #include <linux/ioport.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/interrupt.h>
25 #include <linux/pci.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if.h>
40 #include <linux/if_vlan.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/checksum.h>
44 #include <linux/workqueue.h>
45 #include <linux/crc32.h>
46 #include <linux/prefetch.h>
47 #include <linux/cache.h>
48 #include <linux/firmware.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 
52 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
53 #define BCM_CNIC 1
54 #include "cnic_if.h"
55 #endif
56 #include "bnx2.h"
57 #include "bnx2_fw.h"
58 
59 #define DRV_MODULE_NAME		"bnx2"
60 #define DRV_MODULE_VERSION	"2.2.5"
61 #define DRV_MODULE_RELDATE	"December 20, 2013"
62 #define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.3.fw"
63 #define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
64 #define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1b.fw"
65 #define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-6.0.17.fw"
66 #define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-6.0.17.fw"
67 
68 #define RUN_AT(x) (jiffies + (x))
69 
70 /* Time in jiffies before concluding the transmitter is hung. */
71 #define TX_TIMEOUT  (5*HZ)
72 
73 static char version[] =
74 	"Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75 
76 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80 MODULE_FIRMWARE(FW_MIPS_FILE_06);
81 MODULE_FIRMWARE(FW_RV2P_FILE_06);
82 MODULE_FIRMWARE(FW_MIPS_FILE_09);
83 MODULE_FIRMWARE(FW_RV2P_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
85 
86 static int disable_msi = 0;
87 
88 module_param(disable_msi, int, S_IRUGO);
89 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90 
91 typedef enum {
92 	BCM5706 = 0,
93 	NC370T,
94 	NC370I,
95 	BCM5706S,
96 	NC370F,
97 	BCM5708,
98 	BCM5708S,
99 	BCM5709,
100 	BCM5709S,
101 	BCM5716,
102 	BCM5716S,
103 } board_t;
104 
105 /* indexed by board_t, above */
106 static struct {
107 	char *name;
108 } board_info[] = {
109 	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
110 	{ "HP NC370T Multifunction Gigabit Server Adapter" },
111 	{ "HP NC370i Multifunction Gigabit Server Adapter" },
112 	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
113 	{ "HP NC370F Multifunction Gigabit Server Adapter" },
114 	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
115 	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
116 	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
117 	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
118 	{ "Broadcom NetXtreme II BCM5716 1000Base-T" },
119 	{ "Broadcom NetXtreme II BCM5716 1000Base-SX" },
120 	};
121 
122 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
123 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
124 	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
125 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126 	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
127 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
129 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
130 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
131 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
132 	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
133 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
135 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
136 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
137 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
138 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
139 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
140 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
141 	{ PCI_VENDOR_ID_BROADCOM, 0x163b,
142 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
143 	{ PCI_VENDOR_ID_BROADCOM, 0x163c,
144 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
145 	{ 0, }
146 };
147 
148 static const struct flash_spec flash_table[] =
149 {
150 #define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
151 #define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
152 	/* Slow EEPROM */
153 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
154 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
155 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
156 	 "EEPROM - slow"},
157 	/* Expansion entry 0001 */
158 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
159 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
160 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
161 	 "Entry 0001"},
162 	/* Saifun SA25F010 (non-buffered flash) */
163 	/* strap, cfg1, & write1 need updates */
164 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
165 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
166 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
167 	 "Non-buffered flash (128kB)"},
168 	/* Saifun SA25F020 (non-buffered flash) */
169 	/* strap, cfg1, & write1 need updates */
170 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
171 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
172 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
173 	 "Non-buffered flash (256kB)"},
174 	/* Expansion entry 0100 */
175 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
176 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
177 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
178 	 "Entry 0100"},
179 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
180 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
181 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
182 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
183 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
184 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
185 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
186 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
187 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
188 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
189 	/* Saifun SA25F005 (non-buffered flash) */
190 	/* strap, cfg1, & write1 need updates */
191 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
192 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
194 	 "Non-buffered flash (64kB)"},
195 	/* Fast EEPROM */
196 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
197 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
198 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
199 	 "EEPROM - fast"},
200 	/* Expansion entry 1001 */
201 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
202 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
203 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
204 	 "Entry 1001"},
205 	/* Expansion entry 1010 */
206 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
207 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
208 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
209 	 "Entry 1010"},
210 	/* ATMEL AT45DB011B (buffered flash) */
211 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
212 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
213 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
214 	 "Buffered flash (128kB)"},
215 	/* Expansion entry 1100 */
216 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
217 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
218 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
219 	 "Entry 1100"},
220 	/* Expansion entry 1101 */
221 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
222 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
223 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
224 	 "Entry 1101"},
225 	/* Ateml Expansion entry 1110 */
226 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
227 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
228 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
229 	 "Entry 1110 (Atmel)"},
230 	/* ATMEL AT45DB021B (buffered flash) */
231 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
232 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
233 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
234 	 "Buffered flash (256kB)"},
235 };
236 
237 static const struct flash_spec flash_5709 = {
238 	.flags		= BNX2_NV_BUFFERED,
239 	.page_bits	= BCM5709_FLASH_PAGE_BITS,
240 	.page_size	= BCM5709_FLASH_PAGE_SIZE,
241 	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
242 	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
243 	.name		= "5709 Buffered flash (256kB)",
244 };
245 
246 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
247 
248 static void bnx2_init_napi(struct bnx2 *bp);
249 static void bnx2_del_napi(struct bnx2 *bp);
250 
251 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
252 {
253 	u32 diff;
254 
255 	/* Tell compiler to fetch tx_prod and tx_cons from memory. */
256 	barrier();
257 
258 	/* The ring uses 256 indices for 255 entries, one of them
259 	 * needs to be skipped.
260 	 */
261 	diff = txr->tx_prod - txr->tx_cons;
262 	if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
263 		diff &= 0xffff;
264 		if (diff == BNX2_TX_DESC_CNT)
265 			diff = BNX2_MAX_TX_DESC_CNT;
266 	}
267 	return bp->tx_ring_size - diff;
268 }
269 
270 static u32
271 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
272 {
273 	u32 val;
274 
275 	spin_lock_bh(&bp->indirect_lock);
276 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
277 	val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
278 	spin_unlock_bh(&bp->indirect_lock);
279 	return val;
280 }
281 
282 static void
283 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
284 {
285 	spin_lock_bh(&bp->indirect_lock);
286 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
287 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
288 	spin_unlock_bh(&bp->indirect_lock);
289 }
290 
291 static void
292 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
293 {
294 	bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
295 }
296 
297 static u32
298 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
299 {
300 	return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
301 }
302 
303 static void
304 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
305 {
306 	offset += cid_addr;
307 	spin_lock_bh(&bp->indirect_lock);
308 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
309 		int i;
310 
311 		BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
312 		BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
313 			offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
314 		for (i = 0; i < 5; i++) {
315 			val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
316 			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
317 				break;
318 			udelay(5);
319 		}
320 	} else {
321 		BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
322 		BNX2_WR(bp, BNX2_CTX_DATA, val);
323 	}
324 	spin_unlock_bh(&bp->indirect_lock);
325 }
326 
327 #ifdef BCM_CNIC
328 static int
329 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
330 {
331 	struct bnx2 *bp = netdev_priv(dev);
332 	struct drv_ctl_io *io = &info->data.io;
333 
334 	switch (info->cmd) {
335 	case DRV_CTL_IO_WR_CMD:
336 		bnx2_reg_wr_ind(bp, io->offset, io->data);
337 		break;
338 	case DRV_CTL_IO_RD_CMD:
339 		io->data = bnx2_reg_rd_ind(bp, io->offset);
340 		break;
341 	case DRV_CTL_CTX_WR_CMD:
342 		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
343 		break;
344 	default:
345 		return -EINVAL;
346 	}
347 	return 0;
348 }
349 
350 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
351 {
352 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
353 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
354 	int sb_id;
355 
356 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
357 		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
358 		bnapi->cnic_present = 0;
359 		sb_id = bp->irq_nvecs;
360 		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
361 	} else {
362 		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
363 		bnapi->cnic_tag = bnapi->last_status_idx;
364 		bnapi->cnic_present = 1;
365 		sb_id = 0;
366 		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
367 	}
368 
369 	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
370 	cp->irq_arr[0].status_blk = (void *)
371 		((unsigned long) bnapi->status_blk.msi +
372 		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
373 	cp->irq_arr[0].status_blk_num = sb_id;
374 	cp->num_irq = 1;
375 }
376 
377 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
378 			      void *data)
379 {
380 	struct bnx2 *bp = netdev_priv(dev);
381 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
382 
383 	if (ops == NULL)
384 		return -EINVAL;
385 
386 	if (cp->drv_state & CNIC_DRV_STATE_REGD)
387 		return -EBUSY;
388 
389 	if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
390 		return -ENODEV;
391 
392 	bp->cnic_data = data;
393 	rcu_assign_pointer(bp->cnic_ops, ops);
394 
395 	cp->num_irq = 0;
396 	cp->drv_state = CNIC_DRV_STATE_REGD;
397 
398 	bnx2_setup_cnic_irq_info(bp);
399 
400 	return 0;
401 }
402 
403 static int bnx2_unregister_cnic(struct net_device *dev)
404 {
405 	struct bnx2 *bp = netdev_priv(dev);
406 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
407 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
408 
409 	mutex_lock(&bp->cnic_lock);
410 	cp->drv_state = 0;
411 	bnapi->cnic_present = 0;
412 	RCU_INIT_POINTER(bp->cnic_ops, NULL);
413 	mutex_unlock(&bp->cnic_lock);
414 	synchronize_rcu();
415 	return 0;
416 }
417 
418 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
419 {
420 	struct bnx2 *bp = netdev_priv(dev);
421 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
422 
423 	if (!cp->max_iscsi_conn)
424 		return NULL;
425 
426 	cp->drv_owner = THIS_MODULE;
427 	cp->chip_id = bp->chip_id;
428 	cp->pdev = bp->pdev;
429 	cp->io_base = bp->regview;
430 	cp->drv_ctl = bnx2_drv_ctl;
431 	cp->drv_register_cnic = bnx2_register_cnic;
432 	cp->drv_unregister_cnic = bnx2_unregister_cnic;
433 
434 	return cp;
435 }
436 
437 static void
438 bnx2_cnic_stop(struct bnx2 *bp)
439 {
440 	struct cnic_ops *c_ops;
441 	struct cnic_ctl_info info;
442 
443 	mutex_lock(&bp->cnic_lock);
444 	c_ops = rcu_dereference_protected(bp->cnic_ops,
445 					  lockdep_is_held(&bp->cnic_lock));
446 	if (c_ops) {
447 		info.cmd = CNIC_CTL_STOP_CMD;
448 		c_ops->cnic_ctl(bp->cnic_data, &info);
449 	}
450 	mutex_unlock(&bp->cnic_lock);
451 }
452 
453 static void
454 bnx2_cnic_start(struct bnx2 *bp)
455 {
456 	struct cnic_ops *c_ops;
457 	struct cnic_ctl_info info;
458 
459 	mutex_lock(&bp->cnic_lock);
460 	c_ops = rcu_dereference_protected(bp->cnic_ops,
461 					  lockdep_is_held(&bp->cnic_lock));
462 	if (c_ops) {
463 		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
464 			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
465 
466 			bnapi->cnic_tag = bnapi->last_status_idx;
467 		}
468 		info.cmd = CNIC_CTL_START_CMD;
469 		c_ops->cnic_ctl(bp->cnic_data, &info);
470 	}
471 	mutex_unlock(&bp->cnic_lock);
472 }
473 
474 #else
475 
476 static void
477 bnx2_cnic_stop(struct bnx2 *bp)
478 {
479 }
480 
481 static void
482 bnx2_cnic_start(struct bnx2 *bp)
483 {
484 }
485 
486 #endif
487 
488 static int
489 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
490 {
491 	u32 val1;
492 	int i, ret;
493 
494 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
495 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
496 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
497 
498 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
499 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
500 
501 		udelay(40);
502 	}
503 
504 	val1 = (bp->phy_addr << 21) | (reg << 16) |
505 		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
506 		BNX2_EMAC_MDIO_COMM_START_BUSY;
507 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
508 
509 	for (i = 0; i < 50; i++) {
510 		udelay(10);
511 
512 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
513 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
514 			udelay(5);
515 
516 			val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
517 			val1 &= BNX2_EMAC_MDIO_COMM_DATA;
518 
519 			break;
520 		}
521 	}
522 
523 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
524 		*val = 0x0;
525 		ret = -EBUSY;
526 	}
527 	else {
528 		*val = val1;
529 		ret = 0;
530 	}
531 
532 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
533 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
534 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
535 
536 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
537 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
538 
539 		udelay(40);
540 	}
541 
542 	return ret;
543 }
544 
545 static int
546 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
547 {
548 	u32 val1;
549 	int i, ret;
550 
551 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
552 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
553 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
554 
555 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
556 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
557 
558 		udelay(40);
559 	}
560 
561 	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
562 		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
563 		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
564 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
565 
566 	for (i = 0; i < 50; i++) {
567 		udelay(10);
568 
569 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
570 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
571 			udelay(5);
572 			break;
573 		}
574 	}
575 
576 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
577         	ret = -EBUSY;
578 	else
579 		ret = 0;
580 
581 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
582 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
583 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
584 
585 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
586 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
587 
588 		udelay(40);
589 	}
590 
591 	return ret;
592 }
593 
594 static void
595 bnx2_disable_int(struct bnx2 *bp)
596 {
597 	int i;
598 	struct bnx2_napi *bnapi;
599 
600 	for (i = 0; i < bp->irq_nvecs; i++) {
601 		bnapi = &bp->bnx2_napi[i];
602 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
603 		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
604 	}
605 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
606 }
607 
608 static void
609 bnx2_enable_int(struct bnx2 *bp)
610 {
611 	int i;
612 	struct bnx2_napi *bnapi;
613 
614 	for (i = 0; i < bp->irq_nvecs; i++) {
615 		bnapi = &bp->bnx2_napi[i];
616 
617 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
618 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
619 			BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
620 			bnapi->last_status_idx);
621 
622 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
623 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
624 			bnapi->last_status_idx);
625 	}
626 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
627 }
628 
629 static void
630 bnx2_disable_int_sync(struct bnx2 *bp)
631 {
632 	int i;
633 
634 	atomic_inc(&bp->intr_sem);
635 	if (!netif_running(bp->dev))
636 		return;
637 
638 	bnx2_disable_int(bp);
639 	for (i = 0; i < bp->irq_nvecs; i++)
640 		synchronize_irq(bp->irq_tbl[i].vector);
641 }
642 
643 static void
644 bnx2_napi_disable(struct bnx2 *bp)
645 {
646 	int i;
647 
648 	for (i = 0; i < bp->irq_nvecs; i++)
649 		napi_disable(&bp->bnx2_napi[i].napi);
650 }
651 
652 static void
653 bnx2_napi_enable(struct bnx2 *bp)
654 {
655 	int i;
656 
657 	for (i = 0; i < bp->irq_nvecs; i++)
658 		napi_enable(&bp->bnx2_napi[i].napi);
659 }
660 
661 static void
662 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
663 {
664 	if (stop_cnic)
665 		bnx2_cnic_stop(bp);
666 	if (netif_running(bp->dev)) {
667 		bnx2_napi_disable(bp);
668 		netif_tx_disable(bp->dev);
669 	}
670 	bnx2_disable_int_sync(bp);
671 	netif_carrier_off(bp->dev);	/* prevent tx timeout */
672 }
673 
674 static void
675 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
676 {
677 	if (atomic_dec_and_test(&bp->intr_sem)) {
678 		if (netif_running(bp->dev)) {
679 			netif_tx_wake_all_queues(bp->dev);
680 			spin_lock_bh(&bp->phy_lock);
681 			if (bp->link_up)
682 				netif_carrier_on(bp->dev);
683 			spin_unlock_bh(&bp->phy_lock);
684 			bnx2_napi_enable(bp);
685 			bnx2_enable_int(bp);
686 			if (start_cnic)
687 				bnx2_cnic_start(bp);
688 		}
689 	}
690 }
691 
692 static void
693 bnx2_free_tx_mem(struct bnx2 *bp)
694 {
695 	int i;
696 
697 	for (i = 0; i < bp->num_tx_rings; i++) {
698 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
699 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
700 
701 		if (txr->tx_desc_ring) {
702 			dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
703 					  txr->tx_desc_ring,
704 					  txr->tx_desc_mapping);
705 			txr->tx_desc_ring = NULL;
706 		}
707 		kfree(txr->tx_buf_ring);
708 		txr->tx_buf_ring = NULL;
709 	}
710 }
711 
712 static void
713 bnx2_free_rx_mem(struct bnx2 *bp)
714 {
715 	int i;
716 
717 	for (i = 0; i < bp->num_rx_rings; i++) {
718 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
719 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
720 		int j;
721 
722 		for (j = 0; j < bp->rx_max_ring; j++) {
723 			if (rxr->rx_desc_ring[j])
724 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
725 						  rxr->rx_desc_ring[j],
726 						  rxr->rx_desc_mapping[j]);
727 			rxr->rx_desc_ring[j] = NULL;
728 		}
729 		vfree(rxr->rx_buf_ring);
730 		rxr->rx_buf_ring = NULL;
731 
732 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
733 			if (rxr->rx_pg_desc_ring[j])
734 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
735 						  rxr->rx_pg_desc_ring[j],
736 						  rxr->rx_pg_desc_mapping[j]);
737 			rxr->rx_pg_desc_ring[j] = NULL;
738 		}
739 		vfree(rxr->rx_pg_ring);
740 		rxr->rx_pg_ring = NULL;
741 	}
742 }
743 
744 static int
745 bnx2_alloc_tx_mem(struct bnx2 *bp)
746 {
747 	int i;
748 
749 	for (i = 0; i < bp->num_tx_rings; i++) {
750 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
751 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
752 
753 		txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
754 		if (txr->tx_buf_ring == NULL)
755 			return -ENOMEM;
756 
757 		txr->tx_desc_ring =
758 			dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
759 					   &txr->tx_desc_mapping, GFP_KERNEL);
760 		if (txr->tx_desc_ring == NULL)
761 			return -ENOMEM;
762 	}
763 	return 0;
764 }
765 
766 static int
767 bnx2_alloc_rx_mem(struct bnx2 *bp)
768 {
769 	int i;
770 
771 	for (i = 0; i < bp->num_rx_rings; i++) {
772 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
773 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
774 		int j;
775 
776 		rxr->rx_buf_ring =
777 			vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
778 		if (rxr->rx_buf_ring == NULL)
779 			return -ENOMEM;
780 
781 		for (j = 0; j < bp->rx_max_ring; j++) {
782 			rxr->rx_desc_ring[j] =
783 				dma_alloc_coherent(&bp->pdev->dev,
784 						   RXBD_RING_SIZE,
785 						   &rxr->rx_desc_mapping[j],
786 						   GFP_KERNEL);
787 			if (rxr->rx_desc_ring[j] == NULL)
788 				return -ENOMEM;
789 
790 		}
791 
792 		if (bp->rx_pg_ring_size) {
793 			rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
794 						  bp->rx_max_pg_ring);
795 			if (rxr->rx_pg_ring == NULL)
796 				return -ENOMEM;
797 
798 		}
799 
800 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
801 			rxr->rx_pg_desc_ring[j] =
802 				dma_alloc_coherent(&bp->pdev->dev,
803 						   RXBD_RING_SIZE,
804 						   &rxr->rx_pg_desc_mapping[j],
805 						   GFP_KERNEL);
806 			if (rxr->rx_pg_desc_ring[j] == NULL)
807 				return -ENOMEM;
808 
809 		}
810 	}
811 	return 0;
812 }
813 
814 static void
815 bnx2_free_mem(struct bnx2 *bp)
816 {
817 	int i;
818 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
819 
820 	bnx2_free_tx_mem(bp);
821 	bnx2_free_rx_mem(bp);
822 
823 	for (i = 0; i < bp->ctx_pages; i++) {
824 		if (bp->ctx_blk[i]) {
825 			dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
826 					  bp->ctx_blk[i],
827 					  bp->ctx_blk_mapping[i]);
828 			bp->ctx_blk[i] = NULL;
829 		}
830 	}
831 	if (bnapi->status_blk.msi) {
832 		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
833 				  bnapi->status_blk.msi,
834 				  bp->status_blk_mapping);
835 		bnapi->status_blk.msi = NULL;
836 		bp->stats_blk = NULL;
837 	}
838 }
839 
840 static int
841 bnx2_alloc_mem(struct bnx2 *bp)
842 {
843 	int i, status_blk_size, err;
844 	struct bnx2_napi *bnapi;
845 	void *status_blk;
846 
847 	/* Combine status and statistics blocks into one allocation. */
848 	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
849 	if (bp->flags & BNX2_FLAG_MSIX_CAP)
850 		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
851 						 BNX2_SBLK_MSIX_ALIGN_SIZE);
852 	bp->status_stats_size = status_blk_size +
853 				sizeof(struct statistics_block);
854 
855 	status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
856 					 &bp->status_blk_mapping, GFP_KERNEL);
857 	if (status_blk == NULL)
858 		goto alloc_mem_err;
859 
860 	bnapi = &bp->bnx2_napi[0];
861 	bnapi->status_blk.msi = status_blk;
862 	bnapi->hw_tx_cons_ptr =
863 		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
864 	bnapi->hw_rx_cons_ptr =
865 		&bnapi->status_blk.msi->status_rx_quick_consumer_index0;
866 	if (bp->flags & BNX2_FLAG_MSIX_CAP) {
867 		for (i = 1; i < bp->irq_nvecs; i++) {
868 			struct status_block_msix *sblk;
869 
870 			bnapi = &bp->bnx2_napi[i];
871 
872 			sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
873 			bnapi->status_blk.msix = sblk;
874 			bnapi->hw_tx_cons_ptr =
875 				&sblk->status_tx_quick_consumer_index;
876 			bnapi->hw_rx_cons_ptr =
877 				&sblk->status_rx_quick_consumer_index;
878 			bnapi->int_num = i << 24;
879 		}
880 	}
881 
882 	bp->stats_blk = status_blk + status_blk_size;
883 
884 	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
885 
886 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
887 		bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
888 		if (bp->ctx_pages == 0)
889 			bp->ctx_pages = 1;
890 		for (i = 0; i < bp->ctx_pages; i++) {
891 			bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
892 						BNX2_PAGE_SIZE,
893 						&bp->ctx_blk_mapping[i],
894 						GFP_KERNEL);
895 			if (bp->ctx_blk[i] == NULL)
896 				goto alloc_mem_err;
897 		}
898 	}
899 
900 	err = bnx2_alloc_rx_mem(bp);
901 	if (err)
902 		goto alloc_mem_err;
903 
904 	err = bnx2_alloc_tx_mem(bp);
905 	if (err)
906 		goto alloc_mem_err;
907 
908 	return 0;
909 
910 alloc_mem_err:
911 	bnx2_free_mem(bp);
912 	return -ENOMEM;
913 }
914 
915 static void
916 bnx2_report_fw_link(struct bnx2 *bp)
917 {
918 	u32 fw_link_status = 0;
919 
920 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
921 		return;
922 
923 	if (bp->link_up) {
924 		u32 bmsr;
925 
926 		switch (bp->line_speed) {
927 		case SPEED_10:
928 			if (bp->duplex == DUPLEX_HALF)
929 				fw_link_status = BNX2_LINK_STATUS_10HALF;
930 			else
931 				fw_link_status = BNX2_LINK_STATUS_10FULL;
932 			break;
933 		case SPEED_100:
934 			if (bp->duplex == DUPLEX_HALF)
935 				fw_link_status = BNX2_LINK_STATUS_100HALF;
936 			else
937 				fw_link_status = BNX2_LINK_STATUS_100FULL;
938 			break;
939 		case SPEED_1000:
940 			if (bp->duplex == DUPLEX_HALF)
941 				fw_link_status = BNX2_LINK_STATUS_1000HALF;
942 			else
943 				fw_link_status = BNX2_LINK_STATUS_1000FULL;
944 			break;
945 		case SPEED_2500:
946 			if (bp->duplex == DUPLEX_HALF)
947 				fw_link_status = BNX2_LINK_STATUS_2500HALF;
948 			else
949 				fw_link_status = BNX2_LINK_STATUS_2500FULL;
950 			break;
951 		}
952 
953 		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
954 
955 		if (bp->autoneg) {
956 			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
957 
958 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
959 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
960 
961 			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
962 			    bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
963 				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
964 			else
965 				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
966 		}
967 	}
968 	else
969 		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
970 
971 	bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
972 }
973 
974 static char *
975 bnx2_xceiver_str(struct bnx2 *bp)
976 {
977 	return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
978 		((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
979 		 "Copper");
980 }
981 
982 static void
983 bnx2_report_link(struct bnx2 *bp)
984 {
985 	if (bp->link_up) {
986 		netif_carrier_on(bp->dev);
987 		netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
988 			    bnx2_xceiver_str(bp),
989 			    bp->line_speed,
990 			    bp->duplex == DUPLEX_FULL ? "full" : "half");
991 
992 		if (bp->flow_ctrl) {
993 			if (bp->flow_ctrl & FLOW_CTRL_RX) {
994 				pr_cont(", receive ");
995 				if (bp->flow_ctrl & FLOW_CTRL_TX)
996 					pr_cont("& transmit ");
997 			}
998 			else {
999 				pr_cont(", transmit ");
1000 			}
1001 			pr_cont("flow control ON");
1002 		}
1003 		pr_cont("\n");
1004 	} else {
1005 		netif_carrier_off(bp->dev);
1006 		netdev_err(bp->dev, "NIC %s Link is Down\n",
1007 			   bnx2_xceiver_str(bp));
1008 	}
1009 
1010 	bnx2_report_fw_link(bp);
1011 }
1012 
1013 static void
1014 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1015 {
1016 	u32 local_adv, remote_adv;
1017 
1018 	bp->flow_ctrl = 0;
1019 	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1020 		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1021 
1022 		if (bp->duplex == DUPLEX_FULL) {
1023 			bp->flow_ctrl = bp->req_flow_ctrl;
1024 		}
1025 		return;
1026 	}
1027 
1028 	if (bp->duplex != DUPLEX_FULL) {
1029 		return;
1030 	}
1031 
1032 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1033 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1034 		u32 val;
1035 
1036 		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1037 		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1038 			bp->flow_ctrl |= FLOW_CTRL_TX;
1039 		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1040 			bp->flow_ctrl |= FLOW_CTRL_RX;
1041 		return;
1042 	}
1043 
1044 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1045 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1046 
1047 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1048 		u32 new_local_adv = 0;
1049 		u32 new_remote_adv = 0;
1050 
1051 		if (local_adv & ADVERTISE_1000XPAUSE)
1052 			new_local_adv |= ADVERTISE_PAUSE_CAP;
1053 		if (local_adv & ADVERTISE_1000XPSE_ASYM)
1054 			new_local_adv |= ADVERTISE_PAUSE_ASYM;
1055 		if (remote_adv & ADVERTISE_1000XPAUSE)
1056 			new_remote_adv |= ADVERTISE_PAUSE_CAP;
1057 		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1058 			new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1059 
1060 		local_adv = new_local_adv;
1061 		remote_adv = new_remote_adv;
1062 	}
1063 
1064 	/* See Table 28B-3 of 802.3ab-1999 spec. */
1065 	if (local_adv & ADVERTISE_PAUSE_CAP) {
1066 		if(local_adv & ADVERTISE_PAUSE_ASYM) {
1067 	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
1068 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1069 			}
1070 			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1071 				bp->flow_ctrl = FLOW_CTRL_RX;
1072 			}
1073 		}
1074 		else {
1075 			if (remote_adv & ADVERTISE_PAUSE_CAP) {
1076 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1077 			}
1078 		}
1079 	}
1080 	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1081 		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1082 			(remote_adv & ADVERTISE_PAUSE_ASYM)) {
1083 
1084 			bp->flow_ctrl = FLOW_CTRL_TX;
1085 		}
1086 	}
1087 }
1088 
1089 static int
1090 bnx2_5709s_linkup(struct bnx2 *bp)
1091 {
1092 	u32 val, speed;
1093 
1094 	bp->link_up = 1;
1095 
1096 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1097 	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1098 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1099 
1100 	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1101 		bp->line_speed = bp->req_line_speed;
1102 		bp->duplex = bp->req_duplex;
1103 		return 0;
1104 	}
1105 	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1106 	switch (speed) {
1107 		case MII_BNX2_GP_TOP_AN_SPEED_10:
1108 			bp->line_speed = SPEED_10;
1109 			break;
1110 		case MII_BNX2_GP_TOP_AN_SPEED_100:
1111 			bp->line_speed = SPEED_100;
1112 			break;
1113 		case MII_BNX2_GP_TOP_AN_SPEED_1G:
1114 		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1115 			bp->line_speed = SPEED_1000;
1116 			break;
1117 		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1118 			bp->line_speed = SPEED_2500;
1119 			break;
1120 	}
1121 	if (val & MII_BNX2_GP_TOP_AN_FD)
1122 		bp->duplex = DUPLEX_FULL;
1123 	else
1124 		bp->duplex = DUPLEX_HALF;
1125 	return 0;
1126 }
1127 
1128 static int
1129 bnx2_5708s_linkup(struct bnx2 *bp)
1130 {
1131 	u32 val;
1132 
1133 	bp->link_up = 1;
1134 	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1135 	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1136 		case BCM5708S_1000X_STAT1_SPEED_10:
1137 			bp->line_speed = SPEED_10;
1138 			break;
1139 		case BCM5708S_1000X_STAT1_SPEED_100:
1140 			bp->line_speed = SPEED_100;
1141 			break;
1142 		case BCM5708S_1000X_STAT1_SPEED_1G:
1143 			bp->line_speed = SPEED_1000;
1144 			break;
1145 		case BCM5708S_1000X_STAT1_SPEED_2G5:
1146 			bp->line_speed = SPEED_2500;
1147 			break;
1148 	}
1149 	if (val & BCM5708S_1000X_STAT1_FD)
1150 		bp->duplex = DUPLEX_FULL;
1151 	else
1152 		bp->duplex = DUPLEX_HALF;
1153 
1154 	return 0;
1155 }
1156 
1157 static int
1158 bnx2_5706s_linkup(struct bnx2 *bp)
1159 {
1160 	u32 bmcr, local_adv, remote_adv, common;
1161 
1162 	bp->link_up = 1;
1163 	bp->line_speed = SPEED_1000;
1164 
1165 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1166 	if (bmcr & BMCR_FULLDPLX) {
1167 		bp->duplex = DUPLEX_FULL;
1168 	}
1169 	else {
1170 		bp->duplex = DUPLEX_HALF;
1171 	}
1172 
1173 	if (!(bmcr & BMCR_ANENABLE)) {
1174 		return 0;
1175 	}
1176 
1177 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1178 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1179 
1180 	common = local_adv & remote_adv;
1181 	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1182 
1183 		if (common & ADVERTISE_1000XFULL) {
1184 			bp->duplex = DUPLEX_FULL;
1185 		}
1186 		else {
1187 			bp->duplex = DUPLEX_HALF;
1188 		}
1189 	}
1190 
1191 	return 0;
1192 }
1193 
1194 static int
1195 bnx2_copper_linkup(struct bnx2 *bp)
1196 {
1197 	u32 bmcr;
1198 
1199 	bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1200 
1201 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1202 	if (bmcr & BMCR_ANENABLE) {
1203 		u32 local_adv, remote_adv, common;
1204 
1205 		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1206 		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1207 
1208 		common = local_adv & (remote_adv >> 2);
1209 		if (common & ADVERTISE_1000FULL) {
1210 			bp->line_speed = SPEED_1000;
1211 			bp->duplex = DUPLEX_FULL;
1212 		}
1213 		else if (common & ADVERTISE_1000HALF) {
1214 			bp->line_speed = SPEED_1000;
1215 			bp->duplex = DUPLEX_HALF;
1216 		}
1217 		else {
1218 			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1219 			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1220 
1221 			common = local_adv & remote_adv;
1222 			if (common & ADVERTISE_100FULL) {
1223 				bp->line_speed = SPEED_100;
1224 				bp->duplex = DUPLEX_FULL;
1225 			}
1226 			else if (common & ADVERTISE_100HALF) {
1227 				bp->line_speed = SPEED_100;
1228 				bp->duplex = DUPLEX_HALF;
1229 			}
1230 			else if (common & ADVERTISE_10FULL) {
1231 				bp->line_speed = SPEED_10;
1232 				bp->duplex = DUPLEX_FULL;
1233 			}
1234 			else if (common & ADVERTISE_10HALF) {
1235 				bp->line_speed = SPEED_10;
1236 				bp->duplex = DUPLEX_HALF;
1237 			}
1238 			else {
1239 				bp->line_speed = 0;
1240 				bp->link_up = 0;
1241 			}
1242 		}
1243 	}
1244 	else {
1245 		if (bmcr & BMCR_SPEED100) {
1246 			bp->line_speed = SPEED_100;
1247 		}
1248 		else {
1249 			bp->line_speed = SPEED_10;
1250 		}
1251 		if (bmcr & BMCR_FULLDPLX) {
1252 			bp->duplex = DUPLEX_FULL;
1253 		}
1254 		else {
1255 			bp->duplex = DUPLEX_HALF;
1256 		}
1257 	}
1258 
1259 	if (bp->link_up) {
1260 		u32 ext_status;
1261 
1262 		bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1263 		if (ext_status & EXT_STATUS_MDIX)
1264 			bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1265 	}
1266 
1267 	return 0;
1268 }
1269 
1270 static void
1271 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1272 {
1273 	u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1274 
1275 	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1276 	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1277 	val |= 0x02 << 8;
1278 
1279 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1280 		val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1281 
1282 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1283 }
1284 
1285 static void
1286 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1287 {
1288 	int i;
1289 	u32 cid;
1290 
1291 	for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1292 		if (i == 1)
1293 			cid = RX_RSS_CID;
1294 		bnx2_init_rx_context(bp, cid);
1295 	}
1296 }
1297 
1298 static void
1299 bnx2_set_mac_link(struct bnx2 *bp)
1300 {
1301 	u32 val;
1302 
1303 	BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1304 	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1305 		(bp->duplex == DUPLEX_HALF)) {
1306 		BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1307 	}
1308 
1309 	/* Configure the EMAC mode register. */
1310 	val = BNX2_RD(bp, BNX2_EMAC_MODE);
1311 
1312 	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1313 		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1314 		BNX2_EMAC_MODE_25G_MODE);
1315 
1316 	if (bp->link_up) {
1317 		switch (bp->line_speed) {
1318 			case SPEED_10:
1319 				if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1320 					val |= BNX2_EMAC_MODE_PORT_MII_10M;
1321 					break;
1322 				}
1323 				/* fall through */
1324 			case SPEED_100:
1325 				val |= BNX2_EMAC_MODE_PORT_MII;
1326 				break;
1327 			case SPEED_2500:
1328 				val |= BNX2_EMAC_MODE_25G_MODE;
1329 				/* fall through */
1330 			case SPEED_1000:
1331 				val |= BNX2_EMAC_MODE_PORT_GMII;
1332 				break;
1333 		}
1334 	}
1335 	else {
1336 		val |= BNX2_EMAC_MODE_PORT_GMII;
1337 	}
1338 
1339 	/* Set the MAC to operate in the appropriate duplex mode. */
1340 	if (bp->duplex == DUPLEX_HALF)
1341 		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1342 	BNX2_WR(bp, BNX2_EMAC_MODE, val);
1343 
1344 	/* Enable/disable rx PAUSE. */
1345 	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1346 
1347 	if (bp->flow_ctrl & FLOW_CTRL_RX)
1348 		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1349 	BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1350 
1351 	/* Enable/disable tx PAUSE. */
1352 	val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1353 	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1354 
1355 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1356 		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1357 	BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1358 
1359 	/* Acknowledge the interrupt. */
1360 	BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1361 
1362 	bnx2_init_all_rx_contexts(bp);
1363 }
1364 
1365 static void
1366 bnx2_enable_bmsr1(struct bnx2 *bp)
1367 {
1368 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1369 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1370 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1371 			       MII_BNX2_BLK_ADDR_GP_STATUS);
1372 }
1373 
1374 static void
1375 bnx2_disable_bmsr1(struct bnx2 *bp)
1376 {
1377 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1378 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1379 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1380 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1381 }
1382 
1383 static int
1384 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1385 {
1386 	u32 up1;
1387 	int ret = 1;
1388 
1389 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1390 		return 0;
1391 
1392 	if (bp->autoneg & AUTONEG_SPEED)
1393 		bp->advertising |= ADVERTISED_2500baseX_Full;
1394 
1395 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1396 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1397 
1398 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1399 	if (!(up1 & BCM5708S_UP1_2G5)) {
1400 		up1 |= BCM5708S_UP1_2G5;
1401 		bnx2_write_phy(bp, bp->mii_up1, up1);
1402 		ret = 0;
1403 	}
1404 
1405 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1406 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1407 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1408 
1409 	return ret;
1410 }
1411 
1412 static int
1413 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1414 {
1415 	u32 up1;
1416 	int ret = 0;
1417 
1418 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1419 		return 0;
1420 
1421 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1422 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1423 
1424 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1425 	if (up1 & BCM5708S_UP1_2G5) {
1426 		up1 &= ~BCM5708S_UP1_2G5;
1427 		bnx2_write_phy(bp, bp->mii_up1, up1);
1428 		ret = 1;
1429 	}
1430 
1431 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1432 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1433 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1434 
1435 	return ret;
1436 }
1437 
1438 static void
1439 bnx2_enable_forced_2g5(struct bnx2 *bp)
1440 {
1441 	u32 uninitialized_var(bmcr);
1442 	int err;
1443 
1444 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1445 		return;
1446 
1447 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1448 		u32 val;
1449 
1450 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1451 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1452 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1453 			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1454 			val |= MII_BNX2_SD_MISC1_FORCE |
1455 				MII_BNX2_SD_MISC1_FORCE_2_5G;
1456 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1457 		}
1458 
1459 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1460 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1461 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1462 
1463 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1464 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1465 		if (!err)
1466 			bmcr |= BCM5708S_BMCR_FORCE_2500;
1467 	} else {
1468 		return;
1469 	}
1470 
1471 	if (err)
1472 		return;
1473 
1474 	if (bp->autoneg & AUTONEG_SPEED) {
1475 		bmcr &= ~BMCR_ANENABLE;
1476 		if (bp->req_duplex == DUPLEX_FULL)
1477 			bmcr |= BMCR_FULLDPLX;
1478 	}
1479 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1480 }
1481 
1482 static void
1483 bnx2_disable_forced_2g5(struct bnx2 *bp)
1484 {
1485 	u32 uninitialized_var(bmcr);
1486 	int err;
1487 
1488 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1489 		return;
1490 
1491 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1492 		u32 val;
1493 
1494 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1495 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1496 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1497 			val &= ~MII_BNX2_SD_MISC1_FORCE;
1498 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1499 		}
1500 
1501 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1502 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1503 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1504 
1505 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1506 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1507 		if (!err)
1508 			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1509 	} else {
1510 		return;
1511 	}
1512 
1513 	if (err)
1514 		return;
1515 
1516 	if (bp->autoneg & AUTONEG_SPEED)
1517 		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1518 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1519 }
1520 
1521 static void
1522 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1523 {
1524 	u32 val;
1525 
1526 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1527 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1528 	if (start)
1529 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1530 	else
1531 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1532 }
1533 
1534 static int
1535 bnx2_set_link(struct bnx2 *bp)
1536 {
1537 	u32 bmsr;
1538 	u8 link_up;
1539 
1540 	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1541 		bp->link_up = 1;
1542 		return 0;
1543 	}
1544 
1545 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1546 		return 0;
1547 
1548 	link_up = bp->link_up;
1549 
1550 	bnx2_enable_bmsr1(bp);
1551 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1552 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1553 	bnx2_disable_bmsr1(bp);
1554 
1555 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1556 	    (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1557 		u32 val, an_dbg;
1558 
1559 		if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1560 			bnx2_5706s_force_link_dn(bp, 0);
1561 			bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1562 		}
1563 		val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1564 
1565 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1566 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1567 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1568 
1569 		if ((val & BNX2_EMAC_STATUS_LINK) &&
1570 		    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1571 			bmsr |= BMSR_LSTATUS;
1572 		else
1573 			bmsr &= ~BMSR_LSTATUS;
1574 	}
1575 
1576 	if (bmsr & BMSR_LSTATUS) {
1577 		bp->link_up = 1;
1578 
1579 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1580 			if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1581 				bnx2_5706s_linkup(bp);
1582 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1583 				bnx2_5708s_linkup(bp);
1584 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1585 				bnx2_5709s_linkup(bp);
1586 		}
1587 		else {
1588 			bnx2_copper_linkup(bp);
1589 		}
1590 		bnx2_resolve_flow_ctrl(bp);
1591 	}
1592 	else {
1593 		if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1594 		    (bp->autoneg & AUTONEG_SPEED))
1595 			bnx2_disable_forced_2g5(bp);
1596 
1597 		if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1598 			u32 bmcr;
1599 
1600 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1601 			bmcr |= BMCR_ANENABLE;
1602 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1603 
1604 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1605 		}
1606 		bp->link_up = 0;
1607 	}
1608 
1609 	if (bp->link_up != link_up) {
1610 		bnx2_report_link(bp);
1611 	}
1612 
1613 	bnx2_set_mac_link(bp);
1614 
1615 	return 0;
1616 }
1617 
1618 static int
1619 bnx2_reset_phy(struct bnx2 *bp)
1620 {
1621 	int i;
1622 	u32 reg;
1623 
1624         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1625 
1626 #define PHY_RESET_MAX_WAIT 100
1627 	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1628 		udelay(10);
1629 
1630 		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1631 		if (!(reg & BMCR_RESET)) {
1632 			udelay(20);
1633 			break;
1634 		}
1635 	}
1636 	if (i == PHY_RESET_MAX_WAIT) {
1637 		return -EBUSY;
1638 	}
1639 	return 0;
1640 }
1641 
1642 static u32
1643 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1644 {
1645 	u32 adv = 0;
1646 
1647 	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1648 		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1649 
1650 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1651 			adv = ADVERTISE_1000XPAUSE;
1652 		}
1653 		else {
1654 			adv = ADVERTISE_PAUSE_CAP;
1655 		}
1656 	}
1657 	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1658 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1659 			adv = ADVERTISE_1000XPSE_ASYM;
1660 		}
1661 		else {
1662 			adv = ADVERTISE_PAUSE_ASYM;
1663 		}
1664 	}
1665 	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1666 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1667 			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1668 		}
1669 		else {
1670 			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1671 		}
1672 	}
1673 	return adv;
1674 }
1675 
1676 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1677 
1678 static int
1679 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1680 __releases(&bp->phy_lock)
1681 __acquires(&bp->phy_lock)
1682 {
1683 	u32 speed_arg = 0, pause_adv;
1684 
1685 	pause_adv = bnx2_phy_get_pause_adv(bp);
1686 
1687 	if (bp->autoneg & AUTONEG_SPEED) {
1688 		speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1689 		if (bp->advertising & ADVERTISED_10baseT_Half)
1690 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1691 		if (bp->advertising & ADVERTISED_10baseT_Full)
1692 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1693 		if (bp->advertising & ADVERTISED_100baseT_Half)
1694 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1695 		if (bp->advertising & ADVERTISED_100baseT_Full)
1696 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1697 		if (bp->advertising & ADVERTISED_1000baseT_Full)
1698 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1699 		if (bp->advertising & ADVERTISED_2500baseX_Full)
1700 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1701 	} else {
1702 		if (bp->req_line_speed == SPEED_2500)
1703 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1704 		else if (bp->req_line_speed == SPEED_1000)
1705 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1706 		else if (bp->req_line_speed == SPEED_100) {
1707 			if (bp->req_duplex == DUPLEX_FULL)
1708 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1709 			else
1710 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1711 		} else if (bp->req_line_speed == SPEED_10) {
1712 			if (bp->req_duplex == DUPLEX_FULL)
1713 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1714 			else
1715 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1716 		}
1717 	}
1718 
1719 	if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1720 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1721 	if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1722 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1723 
1724 	if (port == PORT_TP)
1725 		speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1726 			     BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1727 
1728 	bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1729 
1730 	spin_unlock_bh(&bp->phy_lock);
1731 	bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1732 	spin_lock_bh(&bp->phy_lock);
1733 
1734 	return 0;
1735 }
1736 
1737 static int
1738 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1739 __releases(&bp->phy_lock)
1740 __acquires(&bp->phy_lock)
1741 {
1742 	u32 adv, bmcr;
1743 	u32 new_adv = 0;
1744 
1745 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1746 		return bnx2_setup_remote_phy(bp, port);
1747 
1748 	if (!(bp->autoneg & AUTONEG_SPEED)) {
1749 		u32 new_bmcr;
1750 		int force_link_down = 0;
1751 
1752 		if (bp->req_line_speed == SPEED_2500) {
1753 			if (!bnx2_test_and_enable_2g5(bp))
1754 				force_link_down = 1;
1755 		} else if (bp->req_line_speed == SPEED_1000) {
1756 			if (bnx2_test_and_disable_2g5(bp))
1757 				force_link_down = 1;
1758 		}
1759 		bnx2_read_phy(bp, bp->mii_adv, &adv);
1760 		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1761 
1762 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1763 		new_bmcr = bmcr & ~BMCR_ANENABLE;
1764 		new_bmcr |= BMCR_SPEED1000;
1765 
1766 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1767 			if (bp->req_line_speed == SPEED_2500)
1768 				bnx2_enable_forced_2g5(bp);
1769 			else if (bp->req_line_speed == SPEED_1000) {
1770 				bnx2_disable_forced_2g5(bp);
1771 				new_bmcr &= ~0x2000;
1772 			}
1773 
1774 		} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1775 			if (bp->req_line_speed == SPEED_2500)
1776 				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1777 			else
1778 				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1779 		}
1780 
1781 		if (bp->req_duplex == DUPLEX_FULL) {
1782 			adv |= ADVERTISE_1000XFULL;
1783 			new_bmcr |= BMCR_FULLDPLX;
1784 		}
1785 		else {
1786 			adv |= ADVERTISE_1000XHALF;
1787 			new_bmcr &= ~BMCR_FULLDPLX;
1788 		}
1789 		if ((new_bmcr != bmcr) || (force_link_down)) {
1790 			/* Force a link down visible on the other side */
1791 			if (bp->link_up) {
1792 				bnx2_write_phy(bp, bp->mii_adv, adv &
1793 					       ~(ADVERTISE_1000XFULL |
1794 						 ADVERTISE_1000XHALF));
1795 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1796 					BMCR_ANRESTART | BMCR_ANENABLE);
1797 
1798 				bp->link_up = 0;
1799 				netif_carrier_off(bp->dev);
1800 				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1801 				bnx2_report_link(bp);
1802 			}
1803 			bnx2_write_phy(bp, bp->mii_adv, adv);
1804 			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1805 		} else {
1806 			bnx2_resolve_flow_ctrl(bp);
1807 			bnx2_set_mac_link(bp);
1808 		}
1809 		return 0;
1810 	}
1811 
1812 	bnx2_test_and_enable_2g5(bp);
1813 
1814 	if (bp->advertising & ADVERTISED_1000baseT_Full)
1815 		new_adv |= ADVERTISE_1000XFULL;
1816 
1817 	new_adv |= bnx2_phy_get_pause_adv(bp);
1818 
1819 	bnx2_read_phy(bp, bp->mii_adv, &adv);
1820 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1821 
1822 	bp->serdes_an_pending = 0;
1823 	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1824 		/* Force a link down visible on the other side */
1825 		if (bp->link_up) {
1826 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1827 			spin_unlock_bh(&bp->phy_lock);
1828 			msleep(20);
1829 			spin_lock_bh(&bp->phy_lock);
1830 		}
1831 
1832 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
1833 		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1834 			BMCR_ANENABLE);
1835 		/* Speed up link-up time when the link partner
1836 		 * does not autonegotiate which is very common
1837 		 * in blade servers. Some blade servers use
1838 		 * IPMI for kerboard input and it's important
1839 		 * to minimize link disruptions. Autoneg. involves
1840 		 * exchanging base pages plus 3 next pages and
1841 		 * normally completes in about 120 msec.
1842 		 */
1843 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1844 		bp->serdes_an_pending = 1;
1845 		mod_timer(&bp->timer, jiffies + bp->current_interval);
1846 	} else {
1847 		bnx2_resolve_flow_ctrl(bp);
1848 		bnx2_set_mac_link(bp);
1849 	}
1850 
1851 	return 0;
1852 }
1853 
1854 #define ETHTOOL_ALL_FIBRE_SPEED						\
1855 	(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?			\
1856 		(ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1857 		(ADVERTISED_1000baseT_Full)
1858 
1859 #define ETHTOOL_ALL_COPPER_SPEED					\
1860 	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
1861 	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
1862 	ADVERTISED_1000baseT_Full)
1863 
1864 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1865 	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1866 
1867 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1868 
1869 static void
1870 bnx2_set_default_remote_link(struct bnx2 *bp)
1871 {
1872 	u32 link;
1873 
1874 	if (bp->phy_port == PORT_TP)
1875 		link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1876 	else
1877 		link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1878 
1879 	if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1880 		bp->req_line_speed = 0;
1881 		bp->autoneg |= AUTONEG_SPEED;
1882 		bp->advertising = ADVERTISED_Autoneg;
1883 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1884 			bp->advertising |= ADVERTISED_10baseT_Half;
1885 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1886 			bp->advertising |= ADVERTISED_10baseT_Full;
1887 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1888 			bp->advertising |= ADVERTISED_100baseT_Half;
1889 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1890 			bp->advertising |= ADVERTISED_100baseT_Full;
1891 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1892 			bp->advertising |= ADVERTISED_1000baseT_Full;
1893 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1894 			bp->advertising |= ADVERTISED_2500baseX_Full;
1895 	} else {
1896 		bp->autoneg = 0;
1897 		bp->advertising = 0;
1898 		bp->req_duplex = DUPLEX_FULL;
1899 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1900 			bp->req_line_speed = SPEED_10;
1901 			if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1902 				bp->req_duplex = DUPLEX_HALF;
1903 		}
1904 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1905 			bp->req_line_speed = SPEED_100;
1906 			if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1907 				bp->req_duplex = DUPLEX_HALF;
1908 		}
1909 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1910 			bp->req_line_speed = SPEED_1000;
1911 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1912 			bp->req_line_speed = SPEED_2500;
1913 	}
1914 }
1915 
1916 static void
1917 bnx2_set_default_link(struct bnx2 *bp)
1918 {
1919 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1920 		bnx2_set_default_remote_link(bp);
1921 		return;
1922 	}
1923 
1924 	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1925 	bp->req_line_speed = 0;
1926 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1927 		u32 reg;
1928 
1929 		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1930 
1931 		reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1932 		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1933 		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1934 			bp->autoneg = 0;
1935 			bp->req_line_speed = bp->line_speed = SPEED_1000;
1936 			bp->req_duplex = DUPLEX_FULL;
1937 		}
1938 	} else
1939 		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1940 }
1941 
1942 static void
1943 bnx2_send_heart_beat(struct bnx2 *bp)
1944 {
1945 	u32 msg;
1946 	u32 addr;
1947 
1948 	spin_lock(&bp->indirect_lock);
1949 	msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1950 	addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1951 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1952 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1953 	spin_unlock(&bp->indirect_lock);
1954 }
1955 
1956 static void
1957 bnx2_remote_phy_event(struct bnx2 *bp)
1958 {
1959 	u32 msg;
1960 	u8 link_up = bp->link_up;
1961 	u8 old_port;
1962 
1963 	msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1964 
1965 	if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1966 		bnx2_send_heart_beat(bp);
1967 
1968 	msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1969 
1970 	if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1971 		bp->link_up = 0;
1972 	else {
1973 		u32 speed;
1974 
1975 		bp->link_up = 1;
1976 		speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1977 		bp->duplex = DUPLEX_FULL;
1978 		switch (speed) {
1979 			case BNX2_LINK_STATUS_10HALF:
1980 				bp->duplex = DUPLEX_HALF;
1981 				/* fall through */
1982 			case BNX2_LINK_STATUS_10FULL:
1983 				bp->line_speed = SPEED_10;
1984 				break;
1985 			case BNX2_LINK_STATUS_100HALF:
1986 				bp->duplex = DUPLEX_HALF;
1987 				/* fall through */
1988 			case BNX2_LINK_STATUS_100BASE_T4:
1989 			case BNX2_LINK_STATUS_100FULL:
1990 				bp->line_speed = SPEED_100;
1991 				break;
1992 			case BNX2_LINK_STATUS_1000HALF:
1993 				bp->duplex = DUPLEX_HALF;
1994 				/* fall through */
1995 			case BNX2_LINK_STATUS_1000FULL:
1996 				bp->line_speed = SPEED_1000;
1997 				break;
1998 			case BNX2_LINK_STATUS_2500HALF:
1999 				bp->duplex = DUPLEX_HALF;
2000 				/* fall through */
2001 			case BNX2_LINK_STATUS_2500FULL:
2002 				bp->line_speed = SPEED_2500;
2003 				break;
2004 			default:
2005 				bp->line_speed = 0;
2006 				break;
2007 		}
2008 
2009 		bp->flow_ctrl = 0;
2010 		if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2011 		    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2012 			if (bp->duplex == DUPLEX_FULL)
2013 				bp->flow_ctrl = bp->req_flow_ctrl;
2014 		} else {
2015 			if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2016 				bp->flow_ctrl |= FLOW_CTRL_TX;
2017 			if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2018 				bp->flow_ctrl |= FLOW_CTRL_RX;
2019 		}
2020 
2021 		old_port = bp->phy_port;
2022 		if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2023 			bp->phy_port = PORT_FIBRE;
2024 		else
2025 			bp->phy_port = PORT_TP;
2026 
2027 		if (old_port != bp->phy_port)
2028 			bnx2_set_default_link(bp);
2029 
2030 	}
2031 	if (bp->link_up != link_up)
2032 		bnx2_report_link(bp);
2033 
2034 	bnx2_set_mac_link(bp);
2035 }
2036 
2037 static int
2038 bnx2_set_remote_link(struct bnx2 *bp)
2039 {
2040 	u32 evt_code;
2041 
2042 	evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2043 	switch (evt_code) {
2044 		case BNX2_FW_EVT_CODE_LINK_EVENT:
2045 			bnx2_remote_phy_event(bp);
2046 			break;
2047 		case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2048 		default:
2049 			bnx2_send_heart_beat(bp);
2050 			break;
2051 	}
2052 	return 0;
2053 }
2054 
2055 static int
2056 bnx2_setup_copper_phy(struct bnx2 *bp)
2057 __releases(&bp->phy_lock)
2058 __acquires(&bp->phy_lock)
2059 {
2060 	u32 bmcr, adv_reg, new_adv = 0;
2061 	u32 new_bmcr;
2062 
2063 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2064 
2065 	bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2066 	adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2067 		    ADVERTISE_PAUSE_ASYM);
2068 
2069 	new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2070 
2071 	if (bp->autoneg & AUTONEG_SPEED) {
2072 		u32 adv1000_reg;
2073 		u32 new_adv1000 = 0;
2074 
2075 		new_adv |= bnx2_phy_get_pause_adv(bp);
2076 
2077 		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2078 		adv1000_reg &= PHY_ALL_1000_SPEED;
2079 
2080 		new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2081 		if ((adv1000_reg != new_adv1000) ||
2082 			(adv_reg != new_adv) ||
2083 			((bmcr & BMCR_ANENABLE) == 0)) {
2084 
2085 			bnx2_write_phy(bp, bp->mii_adv, new_adv);
2086 			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2087 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2088 				BMCR_ANENABLE);
2089 		}
2090 		else if (bp->link_up) {
2091 			/* Flow ctrl may have changed from auto to forced */
2092 			/* or vice-versa. */
2093 
2094 			bnx2_resolve_flow_ctrl(bp);
2095 			bnx2_set_mac_link(bp);
2096 		}
2097 		return 0;
2098 	}
2099 
2100 	/* advertise nothing when forcing speed */
2101 	if (adv_reg != new_adv)
2102 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
2103 
2104 	new_bmcr = 0;
2105 	if (bp->req_line_speed == SPEED_100) {
2106 		new_bmcr |= BMCR_SPEED100;
2107 	}
2108 	if (bp->req_duplex == DUPLEX_FULL) {
2109 		new_bmcr |= BMCR_FULLDPLX;
2110 	}
2111 	if (new_bmcr != bmcr) {
2112 		u32 bmsr;
2113 
2114 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2115 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2116 
2117 		if (bmsr & BMSR_LSTATUS) {
2118 			/* Force link down */
2119 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2120 			spin_unlock_bh(&bp->phy_lock);
2121 			msleep(50);
2122 			spin_lock_bh(&bp->phy_lock);
2123 
2124 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2125 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2126 		}
2127 
2128 		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2129 
2130 		/* Normally, the new speed is setup after the link has
2131 		 * gone down and up again. In some cases, link will not go
2132 		 * down so we need to set up the new speed here.
2133 		 */
2134 		if (bmsr & BMSR_LSTATUS) {
2135 			bp->line_speed = bp->req_line_speed;
2136 			bp->duplex = bp->req_duplex;
2137 			bnx2_resolve_flow_ctrl(bp);
2138 			bnx2_set_mac_link(bp);
2139 		}
2140 	} else {
2141 		bnx2_resolve_flow_ctrl(bp);
2142 		bnx2_set_mac_link(bp);
2143 	}
2144 	return 0;
2145 }
2146 
2147 static int
2148 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2149 __releases(&bp->phy_lock)
2150 __acquires(&bp->phy_lock)
2151 {
2152 	if (bp->loopback == MAC_LOOPBACK)
2153 		return 0;
2154 
2155 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2156 		return bnx2_setup_serdes_phy(bp, port);
2157 	}
2158 	else {
2159 		return bnx2_setup_copper_phy(bp);
2160 	}
2161 }
2162 
2163 static int
2164 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2165 {
2166 	u32 val;
2167 
2168 	bp->mii_bmcr = MII_BMCR + 0x10;
2169 	bp->mii_bmsr = MII_BMSR + 0x10;
2170 	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2171 	bp->mii_adv = MII_ADVERTISE + 0x10;
2172 	bp->mii_lpa = MII_LPA + 0x10;
2173 	bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2174 
2175 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2176 	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2177 
2178 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2179 	if (reset_phy)
2180 		bnx2_reset_phy(bp);
2181 
2182 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2183 
2184 	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2185 	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2186 	val |= MII_BNX2_SD_1000XCTL1_FIBER;
2187 	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2188 
2189 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2190 	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2191 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2192 		val |= BCM5708S_UP1_2G5;
2193 	else
2194 		val &= ~BCM5708S_UP1_2G5;
2195 	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2196 
2197 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2198 	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2199 	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2200 	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2201 
2202 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2203 
2204 	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2205 	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2206 	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2207 
2208 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2209 
2210 	return 0;
2211 }
2212 
2213 static int
2214 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2215 {
2216 	u32 val;
2217 
2218 	if (reset_phy)
2219 		bnx2_reset_phy(bp);
2220 
2221 	bp->mii_up1 = BCM5708S_UP1;
2222 
2223 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2224 	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2225 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2226 
2227 	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2228 	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2229 	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2230 
2231 	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2232 	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2233 	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2234 
2235 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2236 		bnx2_read_phy(bp, BCM5708S_UP1, &val);
2237 		val |= BCM5708S_UP1_2G5;
2238 		bnx2_write_phy(bp, BCM5708S_UP1, val);
2239 	}
2240 
2241 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2242 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2243 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2244 		/* increase tx signal amplitude */
2245 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2246 			       BCM5708S_BLK_ADDR_TX_MISC);
2247 		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2248 		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2249 		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2250 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2251 	}
2252 
2253 	val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2254 	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2255 
2256 	if (val) {
2257 		u32 is_backplane;
2258 
2259 		is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2260 		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2261 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2262 				       BCM5708S_BLK_ADDR_TX_MISC);
2263 			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2264 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2265 				       BCM5708S_BLK_ADDR_DIG);
2266 		}
2267 	}
2268 	return 0;
2269 }
2270 
2271 static int
2272 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2273 {
2274 	if (reset_phy)
2275 		bnx2_reset_phy(bp);
2276 
2277 	bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2278 
2279 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2280 		BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2281 
2282 	if (bp->dev->mtu > 1500) {
2283 		u32 val;
2284 
2285 		/* Set extended packet length bit */
2286 		bnx2_write_phy(bp, 0x18, 0x7);
2287 		bnx2_read_phy(bp, 0x18, &val);
2288 		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2289 
2290 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2291 		bnx2_read_phy(bp, 0x1c, &val);
2292 		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2293 	}
2294 	else {
2295 		u32 val;
2296 
2297 		bnx2_write_phy(bp, 0x18, 0x7);
2298 		bnx2_read_phy(bp, 0x18, &val);
2299 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2300 
2301 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2302 		bnx2_read_phy(bp, 0x1c, &val);
2303 		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2304 	}
2305 
2306 	return 0;
2307 }
2308 
2309 static int
2310 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2311 {
2312 	u32 val;
2313 
2314 	if (reset_phy)
2315 		bnx2_reset_phy(bp);
2316 
2317 	if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2318 		bnx2_write_phy(bp, 0x18, 0x0c00);
2319 		bnx2_write_phy(bp, 0x17, 0x000a);
2320 		bnx2_write_phy(bp, 0x15, 0x310b);
2321 		bnx2_write_phy(bp, 0x17, 0x201f);
2322 		bnx2_write_phy(bp, 0x15, 0x9506);
2323 		bnx2_write_phy(bp, 0x17, 0x401f);
2324 		bnx2_write_phy(bp, 0x15, 0x14e2);
2325 		bnx2_write_phy(bp, 0x18, 0x0400);
2326 	}
2327 
2328 	if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2329 		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2330 			       MII_BNX2_DSP_EXPAND_REG | 0x8);
2331 		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2332 		val &= ~(1 << 8);
2333 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2334 	}
2335 
2336 	if (bp->dev->mtu > 1500) {
2337 		/* Set extended packet length bit */
2338 		bnx2_write_phy(bp, 0x18, 0x7);
2339 		bnx2_read_phy(bp, 0x18, &val);
2340 		bnx2_write_phy(bp, 0x18, val | 0x4000);
2341 
2342 		bnx2_read_phy(bp, 0x10, &val);
2343 		bnx2_write_phy(bp, 0x10, val | 0x1);
2344 	}
2345 	else {
2346 		bnx2_write_phy(bp, 0x18, 0x7);
2347 		bnx2_read_phy(bp, 0x18, &val);
2348 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2349 
2350 		bnx2_read_phy(bp, 0x10, &val);
2351 		bnx2_write_phy(bp, 0x10, val & ~0x1);
2352 	}
2353 
2354 	/* ethernet@wirespeed */
2355 	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2356 	bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2357 	val |=  AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2358 
2359 	/* auto-mdix */
2360 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2361 		val |=  AUX_CTL_MISC_CTL_AUTOMDIX;
2362 
2363 	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2364 	return 0;
2365 }
2366 
2367 
2368 static int
2369 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2370 __releases(&bp->phy_lock)
2371 __acquires(&bp->phy_lock)
2372 {
2373 	u32 val;
2374 	int rc = 0;
2375 
2376 	bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2377 	bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2378 
2379 	bp->mii_bmcr = MII_BMCR;
2380 	bp->mii_bmsr = MII_BMSR;
2381 	bp->mii_bmsr1 = MII_BMSR;
2382 	bp->mii_adv = MII_ADVERTISE;
2383 	bp->mii_lpa = MII_LPA;
2384 
2385 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2386 
2387 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2388 		goto setup_phy;
2389 
2390 	bnx2_read_phy(bp, MII_PHYSID1, &val);
2391 	bp->phy_id = val << 16;
2392 	bnx2_read_phy(bp, MII_PHYSID2, &val);
2393 	bp->phy_id |= val & 0xffff;
2394 
2395 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2396 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2397 			rc = bnx2_init_5706s_phy(bp, reset_phy);
2398 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2399 			rc = bnx2_init_5708s_phy(bp, reset_phy);
2400 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2401 			rc = bnx2_init_5709s_phy(bp, reset_phy);
2402 	}
2403 	else {
2404 		rc = bnx2_init_copper_phy(bp, reset_phy);
2405 	}
2406 
2407 setup_phy:
2408 	if (!rc)
2409 		rc = bnx2_setup_phy(bp, bp->phy_port);
2410 
2411 	return rc;
2412 }
2413 
2414 static int
2415 bnx2_set_mac_loopback(struct bnx2 *bp)
2416 {
2417 	u32 mac_mode;
2418 
2419 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2420 	mac_mode &= ~BNX2_EMAC_MODE_PORT;
2421 	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2422 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2423 	bp->link_up = 1;
2424 	return 0;
2425 }
2426 
2427 static int bnx2_test_link(struct bnx2 *);
2428 
2429 static int
2430 bnx2_set_phy_loopback(struct bnx2 *bp)
2431 {
2432 	u32 mac_mode;
2433 	int rc, i;
2434 
2435 	spin_lock_bh(&bp->phy_lock);
2436 	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2437 			    BMCR_SPEED1000);
2438 	spin_unlock_bh(&bp->phy_lock);
2439 	if (rc)
2440 		return rc;
2441 
2442 	for (i = 0; i < 10; i++) {
2443 		if (bnx2_test_link(bp) == 0)
2444 			break;
2445 		msleep(100);
2446 	}
2447 
2448 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2449 	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2450 		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2451 		      BNX2_EMAC_MODE_25G_MODE);
2452 
2453 	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2454 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2455 	bp->link_up = 1;
2456 	return 0;
2457 }
2458 
2459 static void
2460 bnx2_dump_mcp_state(struct bnx2 *bp)
2461 {
2462 	struct net_device *dev = bp->dev;
2463 	u32 mcp_p0, mcp_p1;
2464 
2465 	netdev_err(dev, "<--- start MCP states dump --->\n");
2466 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2467 		mcp_p0 = BNX2_MCP_STATE_P0;
2468 		mcp_p1 = BNX2_MCP_STATE_P1;
2469 	} else {
2470 		mcp_p0 = BNX2_MCP_STATE_P0_5708;
2471 		mcp_p1 = BNX2_MCP_STATE_P1_5708;
2472 	}
2473 	netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2474 		   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2475 	netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2476 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2477 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2478 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2479 	netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2480 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2481 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2482 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2483 	netdev_err(dev, "DEBUG: shmem states:\n");
2484 	netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2485 		   bnx2_shmem_rd(bp, BNX2_DRV_MB),
2486 		   bnx2_shmem_rd(bp, BNX2_FW_MB),
2487 		   bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2488 	pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2489 	netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2490 		   bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2491 		   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2492 	pr_cont(" condition[%08x]\n",
2493 		bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2494 	DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2495 	DP_SHMEM_LINE(bp, 0x3cc);
2496 	DP_SHMEM_LINE(bp, 0x3dc);
2497 	DP_SHMEM_LINE(bp, 0x3ec);
2498 	netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2499 	netdev_err(dev, "<--- end MCP states dump --->\n");
2500 }
2501 
2502 static int
2503 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2504 {
2505 	int i;
2506 	u32 val;
2507 
2508 	bp->fw_wr_seq++;
2509 	msg_data |= bp->fw_wr_seq;
2510 
2511 	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2512 
2513 	if (!ack)
2514 		return 0;
2515 
2516 	/* wait for an acknowledgement. */
2517 	for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2518 		msleep(10);
2519 
2520 		val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2521 
2522 		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2523 			break;
2524 	}
2525 	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2526 		return 0;
2527 
2528 	/* If we timed out, inform the firmware that this is the case. */
2529 	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2530 		msg_data &= ~BNX2_DRV_MSG_CODE;
2531 		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2532 
2533 		bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2534 		if (!silent) {
2535 			pr_err("fw sync timeout, reset code = %x\n", msg_data);
2536 			bnx2_dump_mcp_state(bp);
2537 		}
2538 
2539 		return -EBUSY;
2540 	}
2541 
2542 	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2543 		return -EIO;
2544 
2545 	return 0;
2546 }
2547 
2548 static int
2549 bnx2_init_5709_context(struct bnx2 *bp)
2550 {
2551 	int i, ret = 0;
2552 	u32 val;
2553 
2554 	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2555 	val |= (BNX2_PAGE_BITS - 8) << 16;
2556 	BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2557 	for (i = 0; i < 10; i++) {
2558 		val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2559 		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2560 			break;
2561 		udelay(2);
2562 	}
2563 	if (val & BNX2_CTX_COMMAND_MEM_INIT)
2564 		return -EBUSY;
2565 
2566 	for (i = 0; i < bp->ctx_pages; i++) {
2567 		int j;
2568 
2569 		if (bp->ctx_blk[i])
2570 			memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2571 		else
2572 			return -ENOMEM;
2573 
2574 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2575 			(bp->ctx_blk_mapping[i] & 0xffffffff) |
2576 			BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2577 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2578 			(u64) bp->ctx_blk_mapping[i] >> 32);
2579 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2580 			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2581 		for (j = 0; j < 10; j++) {
2582 
2583 			val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2584 			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2585 				break;
2586 			udelay(5);
2587 		}
2588 		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2589 			ret = -EBUSY;
2590 			break;
2591 		}
2592 	}
2593 	return ret;
2594 }
2595 
2596 static void
2597 bnx2_init_context(struct bnx2 *bp)
2598 {
2599 	u32 vcid;
2600 
2601 	vcid = 96;
2602 	while (vcid) {
2603 		u32 vcid_addr, pcid_addr, offset;
2604 		int i;
2605 
2606 		vcid--;
2607 
2608 		if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2609 			u32 new_vcid;
2610 
2611 			vcid_addr = GET_PCID_ADDR(vcid);
2612 			if (vcid & 0x8) {
2613 				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2614 			}
2615 			else {
2616 				new_vcid = vcid;
2617 			}
2618 			pcid_addr = GET_PCID_ADDR(new_vcid);
2619 		}
2620 		else {
2621 	    		vcid_addr = GET_CID_ADDR(vcid);
2622 			pcid_addr = vcid_addr;
2623 		}
2624 
2625 		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2626 			vcid_addr += (i << PHY_CTX_SHIFT);
2627 			pcid_addr += (i << PHY_CTX_SHIFT);
2628 
2629 			BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2630 			BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2631 
2632 			/* Zero out the context. */
2633 			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2634 				bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2635 		}
2636 	}
2637 }
2638 
2639 static int
2640 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2641 {
2642 	u16 *good_mbuf;
2643 	u32 good_mbuf_cnt;
2644 	u32 val;
2645 
2646 	good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2647 	if (good_mbuf == NULL)
2648 		return -ENOMEM;
2649 
2650 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2651 		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2652 
2653 	good_mbuf_cnt = 0;
2654 
2655 	/* Allocate a bunch of mbufs and save the good ones in an array. */
2656 	val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2657 	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2658 		bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2659 				BNX2_RBUF_COMMAND_ALLOC_REQ);
2660 
2661 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2662 
2663 		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2664 
2665 		/* The addresses with Bit 9 set are bad memory blocks. */
2666 		if (!(val & (1 << 9))) {
2667 			good_mbuf[good_mbuf_cnt] = (u16) val;
2668 			good_mbuf_cnt++;
2669 		}
2670 
2671 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2672 	}
2673 
2674 	/* Free the good ones back to the mbuf pool thus discarding
2675 	 * all the bad ones. */
2676 	while (good_mbuf_cnt) {
2677 		good_mbuf_cnt--;
2678 
2679 		val = good_mbuf[good_mbuf_cnt];
2680 		val = (val << 9) | val | 1;
2681 
2682 		bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2683 	}
2684 	kfree(good_mbuf);
2685 	return 0;
2686 }
2687 
2688 static void
2689 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2690 {
2691 	u32 val;
2692 
2693 	val = (mac_addr[0] << 8) | mac_addr[1];
2694 
2695 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2696 
2697 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2698 		(mac_addr[4] << 8) | mac_addr[5];
2699 
2700 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2701 }
2702 
2703 static inline int
2704 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2705 {
2706 	dma_addr_t mapping;
2707 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2708 	struct bnx2_rx_bd *rxbd =
2709 		&rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2710 	struct page *page = alloc_page(gfp);
2711 
2712 	if (!page)
2713 		return -ENOMEM;
2714 	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2715 			       PCI_DMA_FROMDEVICE);
2716 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2717 		__free_page(page);
2718 		return -EIO;
2719 	}
2720 
2721 	rx_pg->page = page;
2722 	dma_unmap_addr_set(rx_pg, mapping, mapping);
2723 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2724 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2725 	return 0;
2726 }
2727 
2728 static void
2729 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2730 {
2731 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2732 	struct page *page = rx_pg->page;
2733 
2734 	if (!page)
2735 		return;
2736 
2737 	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2738 		       PAGE_SIZE, PCI_DMA_FROMDEVICE);
2739 
2740 	__free_page(page);
2741 	rx_pg->page = NULL;
2742 }
2743 
2744 static inline int
2745 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2746 {
2747 	u8 *data;
2748 	struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2749 	dma_addr_t mapping;
2750 	struct bnx2_rx_bd *rxbd =
2751 		&rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2752 
2753 	data = kmalloc(bp->rx_buf_size, gfp);
2754 	if (!data)
2755 		return -ENOMEM;
2756 
2757 	mapping = dma_map_single(&bp->pdev->dev,
2758 				 get_l2_fhdr(data),
2759 				 bp->rx_buf_use_size,
2760 				 PCI_DMA_FROMDEVICE);
2761 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2762 		kfree(data);
2763 		return -EIO;
2764 	}
2765 
2766 	rx_buf->data = data;
2767 	dma_unmap_addr_set(rx_buf, mapping, mapping);
2768 
2769 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2770 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2771 
2772 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2773 
2774 	return 0;
2775 }
2776 
2777 static int
2778 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2779 {
2780 	struct status_block *sblk = bnapi->status_blk.msi;
2781 	u32 new_link_state, old_link_state;
2782 	int is_set = 1;
2783 
2784 	new_link_state = sblk->status_attn_bits & event;
2785 	old_link_state = sblk->status_attn_bits_ack & event;
2786 	if (new_link_state != old_link_state) {
2787 		if (new_link_state)
2788 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2789 		else
2790 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2791 	} else
2792 		is_set = 0;
2793 
2794 	return is_set;
2795 }
2796 
2797 static void
2798 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2799 {
2800 	spin_lock(&bp->phy_lock);
2801 
2802 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2803 		bnx2_set_link(bp);
2804 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2805 		bnx2_set_remote_link(bp);
2806 
2807 	spin_unlock(&bp->phy_lock);
2808 
2809 }
2810 
2811 static inline u16
2812 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2813 {
2814 	u16 cons;
2815 
2816 	/* Tell compiler that status block fields can change. */
2817 	barrier();
2818 	cons = *bnapi->hw_tx_cons_ptr;
2819 	barrier();
2820 	if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2821 		cons++;
2822 	return cons;
2823 }
2824 
2825 static int
2826 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2827 {
2828 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2829 	u16 hw_cons, sw_cons, sw_ring_cons;
2830 	int tx_pkt = 0, index;
2831 	unsigned int tx_bytes = 0;
2832 	struct netdev_queue *txq;
2833 
2834 	index = (bnapi - bp->bnx2_napi);
2835 	txq = netdev_get_tx_queue(bp->dev, index);
2836 
2837 	hw_cons = bnx2_get_hw_tx_cons(bnapi);
2838 	sw_cons = txr->tx_cons;
2839 
2840 	while (sw_cons != hw_cons) {
2841 		struct bnx2_sw_tx_bd *tx_buf;
2842 		struct sk_buff *skb;
2843 		int i, last;
2844 
2845 		sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2846 
2847 		tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2848 		skb = tx_buf->skb;
2849 
2850 		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2851 		prefetch(&skb->end);
2852 
2853 		/* partial BD completions possible with TSO packets */
2854 		if (tx_buf->is_gso) {
2855 			u16 last_idx, last_ring_idx;
2856 
2857 			last_idx = sw_cons + tx_buf->nr_frags + 1;
2858 			last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2859 			if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2860 				last_idx++;
2861 			}
2862 			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2863 				break;
2864 			}
2865 		}
2866 
2867 		dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2868 			skb_headlen(skb), PCI_DMA_TODEVICE);
2869 
2870 		tx_buf->skb = NULL;
2871 		last = tx_buf->nr_frags;
2872 
2873 		for (i = 0; i < last; i++) {
2874 			struct bnx2_sw_tx_bd *tx_buf;
2875 
2876 			sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2877 
2878 			tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2879 			dma_unmap_page(&bp->pdev->dev,
2880 				dma_unmap_addr(tx_buf, mapping),
2881 				skb_frag_size(&skb_shinfo(skb)->frags[i]),
2882 				PCI_DMA_TODEVICE);
2883 		}
2884 
2885 		sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2886 
2887 		tx_bytes += skb->len;
2888 		dev_kfree_skb(skb);
2889 		tx_pkt++;
2890 		if (tx_pkt == budget)
2891 			break;
2892 
2893 		if (hw_cons == sw_cons)
2894 			hw_cons = bnx2_get_hw_tx_cons(bnapi);
2895 	}
2896 
2897 	netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2898 	txr->hw_tx_cons = hw_cons;
2899 	txr->tx_cons = sw_cons;
2900 
2901 	/* Need to make the tx_cons update visible to bnx2_start_xmit()
2902 	 * before checking for netif_tx_queue_stopped().  Without the
2903 	 * memory barrier, there is a small possibility that bnx2_start_xmit()
2904 	 * will miss it and cause the queue to be stopped forever.
2905 	 */
2906 	smp_mb();
2907 
2908 	if (unlikely(netif_tx_queue_stopped(txq)) &&
2909 		     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2910 		__netif_tx_lock(txq, smp_processor_id());
2911 		if ((netif_tx_queue_stopped(txq)) &&
2912 		    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2913 			netif_tx_wake_queue(txq);
2914 		__netif_tx_unlock(txq);
2915 	}
2916 
2917 	return tx_pkt;
2918 }
2919 
2920 static void
2921 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2922 			struct sk_buff *skb, int count)
2923 {
2924 	struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2925 	struct bnx2_rx_bd *cons_bd, *prod_bd;
2926 	int i;
2927 	u16 hw_prod, prod;
2928 	u16 cons = rxr->rx_pg_cons;
2929 
2930 	cons_rx_pg = &rxr->rx_pg_ring[cons];
2931 
2932 	/* The caller was unable to allocate a new page to replace the
2933 	 * last one in the frags array, so we need to recycle that page
2934 	 * and then free the skb.
2935 	 */
2936 	if (skb) {
2937 		struct page *page;
2938 		struct skb_shared_info *shinfo;
2939 
2940 		shinfo = skb_shinfo(skb);
2941 		shinfo->nr_frags--;
2942 		page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2943 		__skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2944 
2945 		cons_rx_pg->page = page;
2946 		dev_kfree_skb(skb);
2947 	}
2948 
2949 	hw_prod = rxr->rx_pg_prod;
2950 
2951 	for (i = 0; i < count; i++) {
2952 		prod = BNX2_RX_PG_RING_IDX(hw_prod);
2953 
2954 		prod_rx_pg = &rxr->rx_pg_ring[prod];
2955 		cons_rx_pg = &rxr->rx_pg_ring[cons];
2956 		cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2957 						[BNX2_RX_IDX(cons)];
2958 		prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2959 						[BNX2_RX_IDX(prod)];
2960 
2961 		if (prod != cons) {
2962 			prod_rx_pg->page = cons_rx_pg->page;
2963 			cons_rx_pg->page = NULL;
2964 			dma_unmap_addr_set(prod_rx_pg, mapping,
2965 				dma_unmap_addr(cons_rx_pg, mapping));
2966 
2967 			prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2968 			prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2969 
2970 		}
2971 		cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2972 		hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2973 	}
2974 	rxr->rx_pg_prod = hw_prod;
2975 	rxr->rx_pg_cons = cons;
2976 }
2977 
2978 static inline void
2979 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2980 		   u8 *data, u16 cons, u16 prod)
2981 {
2982 	struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2983 	struct bnx2_rx_bd *cons_bd, *prod_bd;
2984 
2985 	cons_rx_buf = &rxr->rx_buf_ring[cons];
2986 	prod_rx_buf = &rxr->rx_buf_ring[prod];
2987 
2988 	dma_sync_single_for_device(&bp->pdev->dev,
2989 		dma_unmap_addr(cons_rx_buf, mapping),
2990 		BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2991 
2992 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2993 
2994 	prod_rx_buf->data = data;
2995 
2996 	if (cons == prod)
2997 		return;
2998 
2999 	dma_unmap_addr_set(prod_rx_buf, mapping,
3000 			dma_unmap_addr(cons_rx_buf, mapping));
3001 
3002 	cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3003 	prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3004 	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3005 	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3006 }
3007 
3008 static struct sk_buff *
3009 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3010 	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3011 	    u32 ring_idx)
3012 {
3013 	int err;
3014 	u16 prod = ring_idx & 0xffff;
3015 	struct sk_buff *skb;
3016 
3017 	err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3018 	if (unlikely(err)) {
3019 		bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3020 error:
3021 		if (hdr_len) {
3022 			unsigned int raw_len = len + 4;
3023 			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3024 
3025 			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3026 		}
3027 		return NULL;
3028 	}
3029 
3030 	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3031 			 PCI_DMA_FROMDEVICE);
3032 	skb = build_skb(data, 0);
3033 	if (!skb) {
3034 		kfree(data);
3035 		goto error;
3036 	}
3037 	skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3038 	if (hdr_len == 0) {
3039 		skb_put(skb, len);
3040 		return skb;
3041 	} else {
3042 		unsigned int i, frag_len, frag_size, pages;
3043 		struct bnx2_sw_pg *rx_pg;
3044 		u16 pg_cons = rxr->rx_pg_cons;
3045 		u16 pg_prod = rxr->rx_pg_prod;
3046 
3047 		frag_size = len + 4 - hdr_len;
3048 		pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3049 		skb_put(skb, hdr_len);
3050 
3051 		for (i = 0; i < pages; i++) {
3052 			dma_addr_t mapping_old;
3053 
3054 			frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3055 			if (unlikely(frag_len <= 4)) {
3056 				unsigned int tail = 4 - frag_len;
3057 
3058 				rxr->rx_pg_cons = pg_cons;
3059 				rxr->rx_pg_prod = pg_prod;
3060 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3061 							pages - i);
3062 				skb->len -= tail;
3063 				if (i == 0) {
3064 					skb->tail -= tail;
3065 				} else {
3066 					skb_frag_t *frag =
3067 						&skb_shinfo(skb)->frags[i - 1];
3068 					skb_frag_size_sub(frag, tail);
3069 					skb->data_len -= tail;
3070 				}
3071 				return skb;
3072 			}
3073 			rx_pg = &rxr->rx_pg_ring[pg_cons];
3074 
3075 			/* Don't unmap yet.  If we're unable to allocate a new
3076 			 * page, we need to recycle the page and the DMA addr.
3077 			 */
3078 			mapping_old = dma_unmap_addr(rx_pg, mapping);
3079 			if (i == pages - 1)
3080 				frag_len -= 4;
3081 
3082 			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3083 			rx_pg->page = NULL;
3084 
3085 			err = bnx2_alloc_rx_page(bp, rxr,
3086 						 BNX2_RX_PG_RING_IDX(pg_prod),
3087 						 GFP_ATOMIC);
3088 			if (unlikely(err)) {
3089 				rxr->rx_pg_cons = pg_cons;
3090 				rxr->rx_pg_prod = pg_prod;
3091 				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3092 							pages - i);
3093 				return NULL;
3094 			}
3095 
3096 			dma_unmap_page(&bp->pdev->dev, mapping_old,
3097 				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
3098 
3099 			frag_size -= frag_len;
3100 			skb->data_len += frag_len;
3101 			skb->truesize += PAGE_SIZE;
3102 			skb->len += frag_len;
3103 
3104 			pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3105 			pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3106 		}
3107 		rxr->rx_pg_prod = pg_prod;
3108 		rxr->rx_pg_cons = pg_cons;
3109 	}
3110 	return skb;
3111 }
3112 
3113 static inline u16
3114 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3115 {
3116 	u16 cons;
3117 
3118 	/* Tell compiler that status block fields can change. */
3119 	barrier();
3120 	cons = *bnapi->hw_rx_cons_ptr;
3121 	barrier();
3122 	if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3123 		cons++;
3124 	return cons;
3125 }
3126 
3127 static int
3128 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3129 {
3130 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3131 	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3132 	struct l2_fhdr *rx_hdr;
3133 	int rx_pkt = 0, pg_ring_used = 0;
3134 
3135 	hw_cons = bnx2_get_hw_rx_cons(bnapi);
3136 	sw_cons = rxr->rx_cons;
3137 	sw_prod = rxr->rx_prod;
3138 
3139 	/* Memory barrier necessary as speculative reads of the rx
3140 	 * buffer can be ahead of the index in the status block
3141 	 */
3142 	rmb();
3143 	while (sw_cons != hw_cons) {
3144 		unsigned int len, hdr_len;
3145 		u32 status;
3146 		struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3147 		struct sk_buff *skb;
3148 		dma_addr_t dma_addr;
3149 		u8 *data;
3150 		u16 next_ring_idx;
3151 
3152 		sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3153 		sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3154 
3155 		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3156 		data = rx_buf->data;
3157 		rx_buf->data = NULL;
3158 
3159 		rx_hdr = get_l2_fhdr(data);
3160 		prefetch(rx_hdr);
3161 
3162 		dma_addr = dma_unmap_addr(rx_buf, mapping);
3163 
3164 		dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3165 			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3166 			PCI_DMA_FROMDEVICE);
3167 
3168 		next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3169 		next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3170 		prefetch(get_l2_fhdr(next_rx_buf->data));
3171 
3172 		len = rx_hdr->l2_fhdr_pkt_len;
3173 		status = rx_hdr->l2_fhdr_status;
3174 
3175 		hdr_len = 0;
3176 		if (status & L2_FHDR_STATUS_SPLIT) {
3177 			hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3178 			pg_ring_used = 1;
3179 		} else if (len > bp->rx_jumbo_thresh) {
3180 			hdr_len = bp->rx_jumbo_thresh;
3181 			pg_ring_used = 1;
3182 		}
3183 
3184 		if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3185 				       L2_FHDR_ERRORS_PHY_DECODE |
3186 				       L2_FHDR_ERRORS_ALIGNMENT |
3187 				       L2_FHDR_ERRORS_TOO_SHORT |
3188 				       L2_FHDR_ERRORS_GIANT_FRAME))) {
3189 
3190 			bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3191 					  sw_ring_prod);
3192 			if (pg_ring_used) {
3193 				int pages;
3194 
3195 				pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3196 
3197 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3198 			}
3199 			goto next_rx;
3200 		}
3201 
3202 		len -= 4;
3203 
3204 		if (len <= bp->rx_copy_thresh) {
3205 			skb = netdev_alloc_skb(bp->dev, len + 6);
3206 			if (skb == NULL) {
3207 				bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3208 						  sw_ring_prod);
3209 				goto next_rx;
3210 			}
3211 
3212 			/* aligned copy */
3213 			memcpy(skb->data,
3214 			       (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3215 			       len + 6);
3216 			skb_reserve(skb, 6);
3217 			skb_put(skb, len);
3218 
3219 			bnx2_reuse_rx_data(bp, rxr, data,
3220 				sw_ring_cons, sw_ring_prod);
3221 
3222 		} else {
3223 			skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3224 					  (sw_ring_cons << 16) | sw_ring_prod);
3225 			if (!skb)
3226 				goto next_rx;
3227 		}
3228 		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3229 		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3230 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3231 
3232 		skb->protocol = eth_type_trans(skb, bp->dev);
3233 
3234 		if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3235 			(ntohs(skb->protocol) != 0x8100)) {
3236 
3237 			dev_kfree_skb(skb);
3238 			goto next_rx;
3239 
3240 		}
3241 
3242 		skb_checksum_none_assert(skb);
3243 		if ((bp->dev->features & NETIF_F_RXCSUM) &&
3244 			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
3245 			L2_FHDR_STATUS_UDP_DATAGRAM))) {
3246 
3247 			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3248 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3249 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3250 		}
3251 		if ((bp->dev->features & NETIF_F_RXHASH) &&
3252 		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3253 		     L2_FHDR_STATUS_USE_RXHASH))
3254 			skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3255 				     PKT_HASH_TYPE_L3);
3256 
3257 		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3258 		napi_gro_receive(&bnapi->napi, skb);
3259 		rx_pkt++;
3260 
3261 next_rx:
3262 		sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3263 		sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3264 
3265 		if ((rx_pkt == budget))
3266 			break;
3267 
3268 		/* Refresh hw_cons to see if there is new work */
3269 		if (sw_cons == hw_cons) {
3270 			hw_cons = bnx2_get_hw_rx_cons(bnapi);
3271 			rmb();
3272 		}
3273 	}
3274 	rxr->rx_cons = sw_cons;
3275 	rxr->rx_prod = sw_prod;
3276 
3277 	if (pg_ring_used)
3278 		BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3279 
3280 	BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3281 
3282 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3283 
3284 	mmiowb();
3285 
3286 	return rx_pkt;
3287 
3288 }
3289 
3290 /* MSI ISR - The only difference between this and the INTx ISR
3291  * is that the MSI interrupt is always serviced.
3292  */
3293 static irqreturn_t
3294 bnx2_msi(int irq, void *dev_instance)
3295 {
3296 	struct bnx2_napi *bnapi = dev_instance;
3297 	struct bnx2 *bp = bnapi->bp;
3298 
3299 	prefetch(bnapi->status_blk.msi);
3300 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3301 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3302 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3303 
3304 	/* Return here if interrupt is disabled. */
3305 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3306 		return IRQ_HANDLED;
3307 
3308 	napi_schedule(&bnapi->napi);
3309 
3310 	return IRQ_HANDLED;
3311 }
3312 
3313 static irqreturn_t
3314 bnx2_msi_1shot(int irq, void *dev_instance)
3315 {
3316 	struct bnx2_napi *bnapi = dev_instance;
3317 	struct bnx2 *bp = bnapi->bp;
3318 
3319 	prefetch(bnapi->status_blk.msi);
3320 
3321 	/* Return here if interrupt is disabled. */
3322 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3323 		return IRQ_HANDLED;
3324 
3325 	napi_schedule(&bnapi->napi);
3326 
3327 	return IRQ_HANDLED;
3328 }
3329 
3330 static irqreturn_t
3331 bnx2_interrupt(int irq, void *dev_instance)
3332 {
3333 	struct bnx2_napi *bnapi = dev_instance;
3334 	struct bnx2 *bp = bnapi->bp;
3335 	struct status_block *sblk = bnapi->status_blk.msi;
3336 
3337 	/* When using INTx, it is possible for the interrupt to arrive
3338 	 * at the CPU before the status block posted prior to the
3339 	 * interrupt. Reading a register will flush the status block.
3340 	 * When using MSI, the MSI message will always complete after
3341 	 * the status block write.
3342 	 */
3343 	if ((sblk->status_idx == bnapi->last_status_idx) &&
3344 	    (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3345 	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3346 		return IRQ_NONE;
3347 
3348 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3349 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3350 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3351 
3352 	/* Read back to deassert IRQ immediately to avoid too many
3353 	 * spurious interrupts.
3354 	 */
3355 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3356 
3357 	/* Return here if interrupt is shared and is disabled. */
3358 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3359 		return IRQ_HANDLED;
3360 
3361 	if (napi_schedule_prep(&bnapi->napi)) {
3362 		bnapi->last_status_idx = sblk->status_idx;
3363 		__napi_schedule(&bnapi->napi);
3364 	}
3365 
3366 	return IRQ_HANDLED;
3367 }
3368 
3369 static inline int
3370 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3371 {
3372 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3373 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3374 
3375 	if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3376 	    (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3377 		return 1;
3378 	return 0;
3379 }
3380 
3381 #define STATUS_ATTN_EVENTS	(STATUS_ATTN_BITS_LINK_STATE | \
3382 				 STATUS_ATTN_BITS_TIMER_ABORT)
3383 
3384 static inline int
3385 bnx2_has_work(struct bnx2_napi *bnapi)
3386 {
3387 	struct status_block *sblk = bnapi->status_blk.msi;
3388 
3389 	if (bnx2_has_fast_work(bnapi))
3390 		return 1;
3391 
3392 #ifdef BCM_CNIC
3393 	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3394 		return 1;
3395 #endif
3396 
3397 	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3398 	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3399 		return 1;
3400 
3401 	return 0;
3402 }
3403 
3404 static void
3405 bnx2_chk_missed_msi(struct bnx2 *bp)
3406 {
3407 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3408 	u32 msi_ctrl;
3409 
3410 	if (bnx2_has_work(bnapi)) {
3411 		msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3412 		if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3413 			return;
3414 
3415 		if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3416 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3417 				~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3418 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3419 			bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3420 		}
3421 	}
3422 
3423 	bp->idle_chk_status_idx = bnapi->last_status_idx;
3424 }
3425 
3426 #ifdef BCM_CNIC
3427 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3428 {
3429 	struct cnic_ops *c_ops;
3430 
3431 	if (!bnapi->cnic_present)
3432 		return;
3433 
3434 	rcu_read_lock();
3435 	c_ops = rcu_dereference(bp->cnic_ops);
3436 	if (c_ops)
3437 		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3438 						      bnapi->status_blk.msi);
3439 	rcu_read_unlock();
3440 }
3441 #endif
3442 
3443 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3444 {
3445 	struct status_block *sblk = bnapi->status_blk.msi;
3446 	u32 status_attn_bits = sblk->status_attn_bits;
3447 	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3448 
3449 	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3450 	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3451 
3452 		bnx2_phy_int(bp, bnapi);
3453 
3454 		/* This is needed to take care of transient status
3455 		 * during link changes.
3456 		 */
3457 		BNX2_WR(bp, BNX2_HC_COMMAND,
3458 			bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3459 		BNX2_RD(bp, BNX2_HC_COMMAND);
3460 	}
3461 }
3462 
3463 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3464 			  int work_done, int budget)
3465 {
3466 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3467 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3468 
3469 	if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3470 		bnx2_tx_int(bp, bnapi, 0);
3471 
3472 	if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3473 		work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3474 
3475 	return work_done;
3476 }
3477 
3478 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3479 {
3480 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3481 	struct bnx2 *bp = bnapi->bp;
3482 	int work_done = 0;
3483 	struct status_block_msix *sblk = bnapi->status_blk.msix;
3484 
3485 	while (1) {
3486 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3487 		if (unlikely(work_done >= budget))
3488 			break;
3489 
3490 		bnapi->last_status_idx = sblk->status_idx;
3491 		/* status idx must be read before checking for more work. */
3492 		rmb();
3493 		if (likely(!bnx2_has_fast_work(bnapi))) {
3494 
3495 			napi_complete(napi);
3496 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3497 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3498 				bnapi->last_status_idx);
3499 			break;
3500 		}
3501 	}
3502 	return work_done;
3503 }
3504 
3505 static int bnx2_poll(struct napi_struct *napi, int budget)
3506 {
3507 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3508 	struct bnx2 *bp = bnapi->bp;
3509 	int work_done = 0;
3510 	struct status_block *sblk = bnapi->status_blk.msi;
3511 
3512 	while (1) {
3513 		bnx2_poll_link(bp, bnapi);
3514 
3515 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3516 
3517 #ifdef BCM_CNIC
3518 		bnx2_poll_cnic(bp, bnapi);
3519 #endif
3520 
3521 		/* bnapi->last_status_idx is used below to tell the hw how
3522 		 * much work has been processed, so we must read it before
3523 		 * checking for more work.
3524 		 */
3525 		bnapi->last_status_idx = sblk->status_idx;
3526 
3527 		if (unlikely(work_done >= budget))
3528 			break;
3529 
3530 		rmb();
3531 		if (likely(!bnx2_has_work(bnapi))) {
3532 			napi_complete(napi);
3533 			if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3534 				BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3535 					BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3536 					bnapi->last_status_idx);
3537 				break;
3538 			}
3539 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3540 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3541 				BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3542 				bnapi->last_status_idx);
3543 
3544 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3545 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3546 				bnapi->last_status_idx);
3547 			break;
3548 		}
3549 	}
3550 
3551 	return work_done;
3552 }
3553 
3554 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3555  * from set_multicast.
3556  */
3557 static void
3558 bnx2_set_rx_mode(struct net_device *dev)
3559 {
3560 	struct bnx2 *bp = netdev_priv(dev);
3561 	u32 rx_mode, sort_mode;
3562 	struct netdev_hw_addr *ha;
3563 	int i;
3564 
3565 	if (!netif_running(dev))
3566 		return;
3567 
3568 	spin_lock_bh(&bp->phy_lock);
3569 
3570 	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3571 				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3572 	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3573 	if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3574 	     (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3575 		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3576 	if (dev->flags & IFF_PROMISC) {
3577 		/* Promiscuous mode. */
3578 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3579 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3580 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3581 	}
3582 	else if (dev->flags & IFF_ALLMULTI) {
3583 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3584 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3585 				0xffffffff);
3586         	}
3587 		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3588 	}
3589 	else {
3590 		/* Accept one or more multicast(s). */
3591 		u32 mc_filter[NUM_MC_HASH_REGISTERS];
3592 		u32 regidx;
3593 		u32 bit;
3594 		u32 crc;
3595 
3596 		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3597 
3598 		netdev_for_each_mc_addr(ha, dev) {
3599 			crc = ether_crc_le(ETH_ALEN, ha->addr);
3600 			bit = crc & 0xff;
3601 			regidx = (bit & 0xe0) >> 5;
3602 			bit &= 0x1f;
3603 			mc_filter[regidx] |= (1 << bit);
3604 		}
3605 
3606 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3607 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3608 				mc_filter[i]);
3609 		}
3610 
3611 		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3612 	}
3613 
3614 	if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3615 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3616 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3617 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3618 	} else if (!(dev->flags & IFF_PROMISC)) {
3619 		/* Add all entries into to the match filter list */
3620 		i = 0;
3621 		netdev_for_each_uc_addr(ha, dev) {
3622 			bnx2_set_mac_addr(bp, ha->addr,
3623 					  i + BNX2_START_UNICAST_ADDRESS_INDEX);
3624 			sort_mode |= (1 <<
3625 				      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3626 			i++;
3627 		}
3628 
3629 	}
3630 
3631 	if (rx_mode != bp->rx_mode) {
3632 		bp->rx_mode = rx_mode;
3633 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3634 	}
3635 
3636 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3637 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3638 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3639 
3640 	spin_unlock_bh(&bp->phy_lock);
3641 }
3642 
3643 static int
3644 check_fw_section(const struct firmware *fw,
3645 		 const struct bnx2_fw_file_section *section,
3646 		 u32 alignment, bool non_empty)
3647 {
3648 	u32 offset = be32_to_cpu(section->offset);
3649 	u32 len = be32_to_cpu(section->len);
3650 
3651 	if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3652 		return -EINVAL;
3653 	if ((non_empty && len == 0) || len > fw->size - offset ||
3654 	    len & (alignment - 1))
3655 		return -EINVAL;
3656 	return 0;
3657 }
3658 
3659 static int
3660 check_mips_fw_entry(const struct firmware *fw,
3661 		    const struct bnx2_mips_fw_file_entry *entry)
3662 {
3663 	if (check_fw_section(fw, &entry->text, 4, true) ||
3664 	    check_fw_section(fw, &entry->data, 4, false) ||
3665 	    check_fw_section(fw, &entry->rodata, 4, false))
3666 		return -EINVAL;
3667 	return 0;
3668 }
3669 
3670 static void bnx2_release_firmware(struct bnx2 *bp)
3671 {
3672 	if (bp->rv2p_firmware) {
3673 		release_firmware(bp->mips_firmware);
3674 		release_firmware(bp->rv2p_firmware);
3675 		bp->rv2p_firmware = NULL;
3676 	}
3677 }
3678 
3679 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3680 {
3681 	const char *mips_fw_file, *rv2p_fw_file;
3682 	const struct bnx2_mips_fw_file *mips_fw;
3683 	const struct bnx2_rv2p_fw_file *rv2p_fw;
3684 	int rc;
3685 
3686 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3687 		mips_fw_file = FW_MIPS_FILE_09;
3688 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3689 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3690 			rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3691 		else
3692 			rv2p_fw_file = FW_RV2P_FILE_09;
3693 	} else {
3694 		mips_fw_file = FW_MIPS_FILE_06;
3695 		rv2p_fw_file = FW_RV2P_FILE_06;
3696 	}
3697 
3698 	rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3699 	if (rc) {
3700 		pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3701 		goto out;
3702 	}
3703 
3704 	rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3705 	if (rc) {
3706 		pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3707 		goto err_release_mips_firmware;
3708 	}
3709 	mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3710 	rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3711 	if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3712 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3713 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3714 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3715 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3716 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3717 		pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3718 		rc = -EINVAL;
3719 		goto err_release_firmware;
3720 	}
3721 	if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3722 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3723 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3724 		pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3725 		rc = -EINVAL;
3726 		goto err_release_firmware;
3727 	}
3728 out:
3729 	return rc;
3730 
3731 err_release_firmware:
3732 	release_firmware(bp->rv2p_firmware);
3733 	bp->rv2p_firmware = NULL;
3734 err_release_mips_firmware:
3735 	release_firmware(bp->mips_firmware);
3736 	goto out;
3737 }
3738 
3739 static int bnx2_request_firmware(struct bnx2 *bp)
3740 {
3741 	return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3742 }
3743 
3744 static u32
3745 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3746 {
3747 	switch (idx) {
3748 	case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3749 		rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3750 		rv2p_code |= RV2P_BD_PAGE_SIZE;
3751 		break;
3752 	}
3753 	return rv2p_code;
3754 }
3755 
3756 static int
3757 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3758 	     const struct bnx2_rv2p_fw_file_entry *fw_entry)
3759 {
3760 	u32 rv2p_code_len, file_offset;
3761 	__be32 *rv2p_code;
3762 	int i;
3763 	u32 val, cmd, addr;
3764 
3765 	rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3766 	file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3767 
3768 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3769 
3770 	if (rv2p_proc == RV2P_PROC1) {
3771 		cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3772 		addr = BNX2_RV2P_PROC1_ADDR_CMD;
3773 	} else {
3774 		cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3775 		addr = BNX2_RV2P_PROC2_ADDR_CMD;
3776 	}
3777 
3778 	for (i = 0; i < rv2p_code_len; i += 8) {
3779 		BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3780 		rv2p_code++;
3781 		BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3782 		rv2p_code++;
3783 
3784 		val = (i / 8) | cmd;
3785 		BNX2_WR(bp, addr, val);
3786 	}
3787 
3788 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3789 	for (i = 0; i < 8; i++) {
3790 		u32 loc, code;
3791 
3792 		loc = be32_to_cpu(fw_entry->fixup[i]);
3793 		if (loc && ((loc * 4) < rv2p_code_len)) {
3794 			code = be32_to_cpu(*(rv2p_code + loc - 1));
3795 			BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3796 			code = be32_to_cpu(*(rv2p_code + loc));
3797 			code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3798 			BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3799 
3800 			val = (loc / 2) | cmd;
3801 			BNX2_WR(bp, addr, val);
3802 		}
3803 	}
3804 
3805 	/* Reset the processor, un-stall is done later. */
3806 	if (rv2p_proc == RV2P_PROC1) {
3807 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3808 	}
3809 	else {
3810 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3811 	}
3812 
3813 	return 0;
3814 }
3815 
3816 static int
3817 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3818 	    const struct bnx2_mips_fw_file_entry *fw_entry)
3819 {
3820 	u32 addr, len, file_offset;
3821 	__be32 *data;
3822 	u32 offset;
3823 	u32 val;
3824 
3825 	/* Halt the CPU. */
3826 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3827 	val |= cpu_reg->mode_value_halt;
3828 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3829 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3830 
3831 	/* Load the Text area. */
3832 	addr = be32_to_cpu(fw_entry->text.addr);
3833 	len = be32_to_cpu(fw_entry->text.len);
3834 	file_offset = be32_to_cpu(fw_entry->text.offset);
3835 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3836 
3837 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3838 	if (len) {
3839 		int j;
3840 
3841 		for (j = 0; j < (len / 4); j++, offset += 4)
3842 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3843 	}
3844 
3845 	/* Load the Data area. */
3846 	addr = be32_to_cpu(fw_entry->data.addr);
3847 	len = be32_to_cpu(fw_entry->data.len);
3848 	file_offset = be32_to_cpu(fw_entry->data.offset);
3849 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3850 
3851 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3852 	if (len) {
3853 		int j;
3854 
3855 		for (j = 0; j < (len / 4); j++, offset += 4)
3856 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3857 	}
3858 
3859 	/* Load the Read-Only area. */
3860 	addr = be32_to_cpu(fw_entry->rodata.addr);
3861 	len = be32_to_cpu(fw_entry->rodata.len);
3862 	file_offset = be32_to_cpu(fw_entry->rodata.offset);
3863 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3864 
3865 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3866 	if (len) {
3867 		int j;
3868 
3869 		for (j = 0; j < (len / 4); j++, offset += 4)
3870 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3871 	}
3872 
3873 	/* Clear the pre-fetch instruction. */
3874 	bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3875 
3876 	val = be32_to_cpu(fw_entry->start_addr);
3877 	bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3878 
3879 	/* Start the CPU. */
3880 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3881 	val &= ~cpu_reg->mode_value_halt;
3882 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3883 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3884 
3885 	return 0;
3886 }
3887 
3888 static int
3889 bnx2_init_cpus(struct bnx2 *bp)
3890 {
3891 	const struct bnx2_mips_fw_file *mips_fw =
3892 		(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3893 	const struct bnx2_rv2p_fw_file *rv2p_fw =
3894 		(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3895 	int rc;
3896 
3897 	/* Initialize the RV2P processor. */
3898 	load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3899 	load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3900 
3901 	/* Initialize the RX Processor. */
3902 	rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3903 	if (rc)
3904 		goto init_cpu_err;
3905 
3906 	/* Initialize the TX Processor. */
3907 	rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3908 	if (rc)
3909 		goto init_cpu_err;
3910 
3911 	/* Initialize the TX Patch-up Processor. */
3912 	rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3913 	if (rc)
3914 		goto init_cpu_err;
3915 
3916 	/* Initialize the Completion Processor. */
3917 	rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3918 	if (rc)
3919 		goto init_cpu_err;
3920 
3921 	/* Initialize the Command Processor. */
3922 	rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3923 
3924 init_cpu_err:
3925 	return rc;
3926 }
3927 
3928 static void
3929 bnx2_setup_wol(struct bnx2 *bp)
3930 {
3931 	int i;
3932 	u32 val, wol_msg;
3933 
3934 	if (bp->wol) {
3935 		u32 advertising;
3936 		u8 autoneg;
3937 
3938 		autoneg = bp->autoneg;
3939 		advertising = bp->advertising;
3940 
3941 		if (bp->phy_port == PORT_TP) {
3942 			bp->autoneg = AUTONEG_SPEED;
3943 			bp->advertising = ADVERTISED_10baseT_Half |
3944 				ADVERTISED_10baseT_Full |
3945 				ADVERTISED_100baseT_Half |
3946 				ADVERTISED_100baseT_Full |
3947 				ADVERTISED_Autoneg;
3948 		}
3949 
3950 		spin_lock_bh(&bp->phy_lock);
3951 		bnx2_setup_phy(bp, bp->phy_port);
3952 		spin_unlock_bh(&bp->phy_lock);
3953 
3954 		bp->autoneg = autoneg;
3955 		bp->advertising = advertising;
3956 
3957 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3958 
3959 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
3960 
3961 		/* Enable port mode. */
3962 		val &= ~BNX2_EMAC_MODE_PORT;
3963 		val |= BNX2_EMAC_MODE_MPKT_RCVD |
3964 		       BNX2_EMAC_MODE_ACPI_RCVD |
3965 		       BNX2_EMAC_MODE_MPKT;
3966 		if (bp->phy_port == PORT_TP) {
3967 			val |= BNX2_EMAC_MODE_PORT_MII;
3968 		} else {
3969 			val |= BNX2_EMAC_MODE_PORT_GMII;
3970 			if (bp->line_speed == SPEED_2500)
3971 				val |= BNX2_EMAC_MODE_25G_MODE;
3972 		}
3973 
3974 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
3975 
3976 		/* receive all multicast */
3977 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3978 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3979 				0xffffffff);
3980 		}
3981 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3982 
3983 		val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
3984 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3985 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
3986 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
3987 
3988 		/* Need to enable EMAC and RPM for WOL. */
3989 		BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3990 			BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3991 			BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3992 			BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3993 
3994 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
3995 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3996 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
3997 
3998 		wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3999 	} else {
4000 			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4001 	}
4002 
4003 	if (!(bp->flags & BNX2_FLAG_NO_WOL))
4004 		bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0);
4005 
4006 }
4007 
4008 static int
4009 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4010 {
4011 	switch (state) {
4012 	case PCI_D0: {
4013 		u32 val;
4014 
4015 		pci_enable_wake(bp->pdev, PCI_D0, false);
4016 		pci_set_power_state(bp->pdev, PCI_D0);
4017 
4018 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
4019 		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4020 		val &= ~BNX2_EMAC_MODE_MPKT;
4021 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
4022 
4023 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4024 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4025 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4026 		break;
4027 	}
4028 	case PCI_D3hot: {
4029 		bnx2_setup_wol(bp);
4030 		pci_wake_from_d3(bp->pdev, bp->wol);
4031 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4032 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4033 
4034 			if (bp->wol)
4035 				pci_set_power_state(bp->pdev, PCI_D3hot);
4036 		} else {
4037 			pci_set_power_state(bp->pdev, PCI_D3hot);
4038 		}
4039 
4040 		/* No more memory access after this point until
4041 		 * device is brought back to D0.
4042 		 */
4043 		break;
4044 	}
4045 	default:
4046 		return -EINVAL;
4047 	}
4048 	return 0;
4049 }
4050 
4051 static int
4052 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4053 {
4054 	u32 val;
4055 	int j;
4056 
4057 	/* Request access to the flash interface. */
4058 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4059 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4060 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4061 		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4062 			break;
4063 
4064 		udelay(5);
4065 	}
4066 
4067 	if (j >= NVRAM_TIMEOUT_COUNT)
4068 		return -EBUSY;
4069 
4070 	return 0;
4071 }
4072 
4073 static int
4074 bnx2_release_nvram_lock(struct bnx2 *bp)
4075 {
4076 	int j;
4077 	u32 val;
4078 
4079 	/* Relinquish nvram interface. */
4080 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4081 
4082 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4083 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4084 		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4085 			break;
4086 
4087 		udelay(5);
4088 	}
4089 
4090 	if (j >= NVRAM_TIMEOUT_COUNT)
4091 		return -EBUSY;
4092 
4093 	return 0;
4094 }
4095 
4096 
4097 static int
4098 bnx2_enable_nvram_write(struct bnx2 *bp)
4099 {
4100 	u32 val;
4101 
4102 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4103 	BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4104 
4105 	if (bp->flash_info->flags & BNX2_NV_WREN) {
4106 		int j;
4107 
4108 		BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4109 		BNX2_WR(bp, BNX2_NVM_COMMAND,
4110 			BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4111 
4112 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4113 			udelay(5);
4114 
4115 			val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4116 			if (val & BNX2_NVM_COMMAND_DONE)
4117 				break;
4118 		}
4119 
4120 		if (j >= NVRAM_TIMEOUT_COUNT)
4121 			return -EBUSY;
4122 	}
4123 	return 0;
4124 }
4125 
4126 static void
4127 bnx2_disable_nvram_write(struct bnx2 *bp)
4128 {
4129 	u32 val;
4130 
4131 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4132 	BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4133 }
4134 
4135 
4136 static void
4137 bnx2_enable_nvram_access(struct bnx2 *bp)
4138 {
4139 	u32 val;
4140 
4141 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4142 	/* Enable both bits, even on read. */
4143 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4144 		val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4145 }
4146 
4147 static void
4148 bnx2_disable_nvram_access(struct bnx2 *bp)
4149 {
4150 	u32 val;
4151 
4152 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4153 	/* Disable both bits, even after read. */
4154 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4155 		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4156 			BNX2_NVM_ACCESS_ENABLE_WR_EN));
4157 }
4158 
4159 static int
4160 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4161 {
4162 	u32 cmd;
4163 	int j;
4164 
4165 	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4166 		/* Buffered flash, no erase needed */
4167 		return 0;
4168 
4169 	/* Build an erase command */
4170 	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4171 	      BNX2_NVM_COMMAND_DOIT;
4172 
4173 	/* Need to clear DONE bit separately. */
4174 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4175 
4176 	/* Address of the NVRAM to read from. */
4177 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4178 
4179 	/* Issue an erase command. */
4180 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4181 
4182 	/* Wait for completion. */
4183 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4184 		u32 val;
4185 
4186 		udelay(5);
4187 
4188 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4189 		if (val & BNX2_NVM_COMMAND_DONE)
4190 			break;
4191 	}
4192 
4193 	if (j >= NVRAM_TIMEOUT_COUNT)
4194 		return -EBUSY;
4195 
4196 	return 0;
4197 }
4198 
4199 static int
4200 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4201 {
4202 	u32 cmd;
4203 	int j;
4204 
4205 	/* Build the command word. */
4206 	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4207 
4208 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4209 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4210 		offset = ((offset / bp->flash_info->page_size) <<
4211 			   bp->flash_info->page_bits) +
4212 			  (offset % bp->flash_info->page_size);
4213 	}
4214 
4215 	/* Need to clear DONE bit separately. */
4216 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4217 
4218 	/* Address of the NVRAM to read from. */
4219 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4220 
4221 	/* Issue a read command. */
4222 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4223 
4224 	/* Wait for completion. */
4225 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4226 		u32 val;
4227 
4228 		udelay(5);
4229 
4230 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4231 		if (val & BNX2_NVM_COMMAND_DONE) {
4232 			__be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4233 			memcpy(ret_val, &v, 4);
4234 			break;
4235 		}
4236 	}
4237 	if (j >= NVRAM_TIMEOUT_COUNT)
4238 		return -EBUSY;
4239 
4240 	return 0;
4241 }
4242 
4243 
4244 static int
4245 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4246 {
4247 	u32 cmd;
4248 	__be32 val32;
4249 	int j;
4250 
4251 	/* Build the command word. */
4252 	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4253 
4254 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4255 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4256 		offset = ((offset / bp->flash_info->page_size) <<
4257 			  bp->flash_info->page_bits) +
4258 			 (offset % bp->flash_info->page_size);
4259 	}
4260 
4261 	/* Need to clear DONE bit separately. */
4262 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4263 
4264 	memcpy(&val32, val, 4);
4265 
4266 	/* Write the data. */
4267 	BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4268 
4269 	/* Address of the NVRAM to write to. */
4270 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4271 
4272 	/* Issue the write command. */
4273 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4274 
4275 	/* Wait for completion. */
4276 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4277 		udelay(5);
4278 
4279 		if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4280 			break;
4281 	}
4282 	if (j >= NVRAM_TIMEOUT_COUNT)
4283 		return -EBUSY;
4284 
4285 	return 0;
4286 }
4287 
4288 static int
4289 bnx2_init_nvram(struct bnx2 *bp)
4290 {
4291 	u32 val;
4292 	int j, entry_count, rc = 0;
4293 	const struct flash_spec *flash;
4294 
4295 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4296 		bp->flash_info = &flash_5709;
4297 		goto get_flash_size;
4298 	}
4299 
4300 	/* Determine the selected interface. */
4301 	val = BNX2_RD(bp, BNX2_NVM_CFG1);
4302 
4303 	entry_count = ARRAY_SIZE(flash_table);
4304 
4305 	if (val & 0x40000000) {
4306 
4307 		/* Flash interface has been reconfigured */
4308 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4309 		     j++, flash++) {
4310 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
4311 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4312 				bp->flash_info = flash;
4313 				break;
4314 			}
4315 		}
4316 	}
4317 	else {
4318 		u32 mask;
4319 		/* Not yet been reconfigured */
4320 
4321 		if (val & (1 << 23))
4322 			mask = FLASH_BACKUP_STRAP_MASK;
4323 		else
4324 			mask = FLASH_STRAP_MASK;
4325 
4326 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4327 			j++, flash++) {
4328 
4329 			if ((val & mask) == (flash->strapping & mask)) {
4330 				bp->flash_info = flash;
4331 
4332 				/* Request access to the flash interface. */
4333 				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4334 					return rc;
4335 
4336 				/* Enable access to flash interface */
4337 				bnx2_enable_nvram_access(bp);
4338 
4339 				/* Reconfigure the flash interface */
4340 				BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4341 				BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4342 				BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4343 				BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4344 
4345 				/* Disable access to flash interface */
4346 				bnx2_disable_nvram_access(bp);
4347 				bnx2_release_nvram_lock(bp);
4348 
4349 				break;
4350 			}
4351 		}
4352 	} /* if (val & 0x40000000) */
4353 
4354 	if (j == entry_count) {
4355 		bp->flash_info = NULL;
4356 		pr_alert("Unknown flash/EEPROM type\n");
4357 		return -ENODEV;
4358 	}
4359 
4360 get_flash_size:
4361 	val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4362 	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4363 	if (val)
4364 		bp->flash_size = val;
4365 	else
4366 		bp->flash_size = bp->flash_info->total_size;
4367 
4368 	return rc;
4369 }
4370 
4371 static int
4372 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4373 		int buf_size)
4374 {
4375 	int rc = 0;
4376 	u32 cmd_flags, offset32, len32, extra;
4377 
4378 	if (buf_size == 0)
4379 		return 0;
4380 
4381 	/* Request access to the flash interface. */
4382 	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4383 		return rc;
4384 
4385 	/* Enable access to flash interface */
4386 	bnx2_enable_nvram_access(bp);
4387 
4388 	len32 = buf_size;
4389 	offset32 = offset;
4390 	extra = 0;
4391 
4392 	cmd_flags = 0;
4393 
4394 	if (offset32 & 3) {
4395 		u8 buf[4];
4396 		u32 pre_len;
4397 
4398 		offset32 &= ~3;
4399 		pre_len = 4 - (offset & 3);
4400 
4401 		if (pre_len >= len32) {
4402 			pre_len = len32;
4403 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4404 				    BNX2_NVM_COMMAND_LAST;
4405 		}
4406 		else {
4407 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4408 		}
4409 
4410 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4411 
4412 		if (rc)
4413 			return rc;
4414 
4415 		memcpy(ret_buf, buf + (offset & 3), pre_len);
4416 
4417 		offset32 += 4;
4418 		ret_buf += pre_len;
4419 		len32 -= pre_len;
4420 	}
4421 	if (len32 & 3) {
4422 		extra = 4 - (len32 & 3);
4423 		len32 = (len32 + 4) & ~3;
4424 	}
4425 
4426 	if (len32 == 4) {
4427 		u8 buf[4];
4428 
4429 		if (cmd_flags)
4430 			cmd_flags = BNX2_NVM_COMMAND_LAST;
4431 		else
4432 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4433 				    BNX2_NVM_COMMAND_LAST;
4434 
4435 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4436 
4437 		memcpy(ret_buf, buf, 4 - extra);
4438 	}
4439 	else if (len32 > 0) {
4440 		u8 buf[4];
4441 
4442 		/* Read the first word. */
4443 		if (cmd_flags)
4444 			cmd_flags = 0;
4445 		else
4446 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4447 
4448 		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4449 
4450 		/* Advance to the next dword. */
4451 		offset32 += 4;
4452 		ret_buf += 4;
4453 		len32 -= 4;
4454 
4455 		while (len32 > 4 && rc == 0) {
4456 			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4457 
4458 			/* Advance to the next dword. */
4459 			offset32 += 4;
4460 			ret_buf += 4;
4461 			len32 -= 4;
4462 		}
4463 
4464 		if (rc)
4465 			return rc;
4466 
4467 		cmd_flags = BNX2_NVM_COMMAND_LAST;
4468 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4469 
4470 		memcpy(ret_buf, buf, 4 - extra);
4471 	}
4472 
4473 	/* Disable access to flash interface */
4474 	bnx2_disable_nvram_access(bp);
4475 
4476 	bnx2_release_nvram_lock(bp);
4477 
4478 	return rc;
4479 }
4480 
4481 static int
4482 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4483 		int buf_size)
4484 {
4485 	u32 written, offset32, len32;
4486 	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4487 	int rc = 0;
4488 	int align_start, align_end;
4489 
4490 	buf = data_buf;
4491 	offset32 = offset;
4492 	len32 = buf_size;
4493 	align_start = align_end = 0;
4494 
4495 	if ((align_start = (offset32 & 3))) {
4496 		offset32 &= ~3;
4497 		len32 += align_start;
4498 		if (len32 < 4)
4499 			len32 = 4;
4500 		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4501 			return rc;
4502 	}
4503 
4504 	if (len32 & 3) {
4505 		align_end = 4 - (len32 & 3);
4506 		len32 += align_end;
4507 		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4508 			return rc;
4509 	}
4510 
4511 	if (align_start || align_end) {
4512 		align_buf = kmalloc(len32, GFP_KERNEL);
4513 		if (align_buf == NULL)
4514 			return -ENOMEM;
4515 		if (align_start) {
4516 			memcpy(align_buf, start, 4);
4517 		}
4518 		if (align_end) {
4519 			memcpy(align_buf + len32 - 4, end, 4);
4520 		}
4521 		memcpy(align_buf + align_start, data_buf, buf_size);
4522 		buf = align_buf;
4523 	}
4524 
4525 	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4526 		flash_buffer = kmalloc(264, GFP_KERNEL);
4527 		if (flash_buffer == NULL) {
4528 			rc = -ENOMEM;
4529 			goto nvram_write_end;
4530 		}
4531 	}
4532 
4533 	written = 0;
4534 	while ((written < len32) && (rc == 0)) {
4535 		u32 page_start, page_end, data_start, data_end;
4536 		u32 addr, cmd_flags;
4537 		int i;
4538 
4539 	        /* Find the page_start addr */
4540 		page_start = offset32 + written;
4541 		page_start -= (page_start % bp->flash_info->page_size);
4542 		/* Find the page_end addr */
4543 		page_end = page_start + bp->flash_info->page_size;
4544 		/* Find the data_start addr */
4545 		data_start = (written == 0) ? offset32 : page_start;
4546 		/* Find the data_end addr */
4547 		data_end = (page_end > offset32 + len32) ?
4548 			(offset32 + len32) : page_end;
4549 
4550 		/* Request access to the flash interface. */
4551 		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4552 			goto nvram_write_end;
4553 
4554 		/* Enable access to flash interface */
4555 		bnx2_enable_nvram_access(bp);
4556 
4557 		cmd_flags = BNX2_NVM_COMMAND_FIRST;
4558 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4559 			int j;
4560 
4561 			/* Read the whole page into the buffer
4562 			 * (non-buffer flash only) */
4563 			for (j = 0; j < bp->flash_info->page_size; j += 4) {
4564 				if (j == (bp->flash_info->page_size - 4)) {
4565 					cmd_flags |= BNX2_NVM_COMMAND_LAST;
4566 				}
4567 				rc = bnx2_nvram_read_dword(bp,
4568 					page_start + j,
4569 					&flash_buffer[j],
4570 					cmd_flags);
4571 
4572 				if (rc)
4573 					goto nvram_write_end;
4574 
4575 				cmd_flags = 0;
4576 			}
4577 		}
4578 
4579 		/* Enable writes to flash interface (unlock write-protect) */
4580 		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4581 			goto nvram_write_end;
4582 
4583 		/* Loop to write back the buffer data from page_start to
4584 		 * data_start */
4585 		i = 0;
4586 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4587 			/* Erase the page */
4588 			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4589 				goto nvram_write_end;
4590 
4591 			/* Re-enable the write again for the actual write */
4592 			bnx2_enable_nvram_write(bp);
4593 
4594 			for (addr = page_start; addr < data_start;
4595 				addr += 4, i += 4) {
4596 
4597 				rc = bnx2_nvram_write_dword(bp, addr,
4598 					&flash_buffer[i], cmd_flags);
4599 
4600 				if (rc != 0)
4601 					goto nvram_write_end;
4602 
4603 				cmd_flags = 0;
4604 			}
4605 		}
4606 
4607 		/* Loop to write the new data from data_start to data_end */
4608 		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4609 			if ((addr == page_end - 4) ||
4610 				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4611 				 (addr == data_end - 4))) {
4612 
4613 				cmd_flags |= BNX2_NVM_COMMAND_LAST;
4614 			}
4615 			rc = bnx2_nvram_write_dword(bp, addr, buf,
4616 				cmd_flags);
4617 
4618 			if (rc != 0)
4619 				goto nvram_write_end;
4620 
4621 			cmd_flags = 0;
4622 			buf += 4;
4623 		}
4624 
4625 		/* Loop to write back the buffer data from data_end
4626 		 * to page_end */
4627 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4628 			for (addr = data_end; addr < page_end;
4629 				addr += 4, i += 4) {
4630 
4631 				if (addr == page_end-4) {
4632 					cmd_flags = BNX2_NVM_COMMAND_LAST;
4633                 		}
4634 				rc = bnx2_nvram_write_dword(bp, addr,
4635 					&flash_buffer[i], cmd_flags);
4636 
4637 				if (rc != 0)
4638 					goto nvram_write_end;
4639 
4640 				cmd_flags = 0;
4641 			}
4642 		}
4643 
4644 		/* Disable writes to flash interface (lock write-protect) */
4645 		bnx2_disable_nvram_write(bp);
4646 
4647 		/* Disable access to flash interface */
4648 		bnx2_disable_nvram_access(bp);
4649 		bnx2_release_nvram_lock(bp);
4650 
4651 		/* Increment written */
4652 		written += data_end - data_start;
4653 	}
4654 
4655 nvram_write_end:
4656 	kfree(flash_buffer);
4657 	kfree(align_buf);
4658 	return rc;
4659 }
4660 
4661 static void
4662 bnx2_init_fw_cap(struct bnx2 *bp)
4663 {
4664 	u32 val, sig = 0;
4665 
4666 	bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4667 	bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4668 
4669 	if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4670 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4671 
4672 	val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4673 	if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4674 		return;
4675 
4676 	if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4677 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4678 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4679 	}
4680 
4681 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4682 	    (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4683 		u32 link;
4684 
4685 		bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4686 
4687 		link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4688 		if (link & BNX2_LINK_STATUS_SERDES_LINK)
4689 			bp->phy_port = PORT_FIBRE;
4690 		else
4691 			bp->phy_port = PORT_TP;
4692 
4693 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4694 		       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4695 	}
4696 
4697 	if (netif_running(bp->dev) && sig)
4698 		bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4699 }
4700 
4701 static void
4702 bnx2_setup_msix_tbl(struct bnx2 *bp)
4703 {
4704 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4705 
4706 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4707 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4708 }
4709 
4710 static int
4711 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4712 {
4713 	u32 val;
4714 	int i, rc = 0;
4715 	u8 old_port;
4716 
4717 	/* Wait for the current PCI transaction to complete before
4718 	 * issuing a reset. */
4719 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4720 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4721 		BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4722 			BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4723 			BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4724 			BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4725 			BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4726 		val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4727 		udelay(5);
4728 	} else {  /* 5709 */
4729 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4730 		val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4731 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4732 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4733 
4734 		for (i = 0; i < 100; i++) {
4735 			msleep(1);
4736 			val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4737 			if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4738 				break;
4739 		}
4740 	}
4741 
4742 	/* Wait for the firmware to tell us it is ok to issue a reset. */
4743 	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4744 
4745 	/* Deposit a driver reset signature so the firmware knows that
4746 	 * this is a soft reset. */
4747 	bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4748 		      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4749 
4750 	/* Do a dummy read to force the chip to complete all current transaction
4751 	 * before we issue a reset. */
4752 	val = BNX2_RD(bp, BNX2_MISC_ID);
4753 
4754 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4755 		BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4756 		BNX2_RD(bp, BNX2_MISC_COMMAND);
4757 		udelay(5);
4758 
4759 		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4760 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4761 
4762 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4763 
4764 	} else {
4765 		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4766 		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4767 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4768 
4769 		/* Chip reset. */
4770 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4771 
4772 		/* Reading back any register after chip reset will hang the
4773 		 * bus on 5706 A0 and A1.  The msleep below provides plenty
4774 		 * of margin for write posting.
4775 		 */
4776 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4777 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4778 			msleep(20);
4779 
4780 		/* Reset takes approximate 30 usec */
4781 		for (i = 0; i < 10; i++) {
4782 			val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4783 			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4784 				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4785 				break;
4786 			udelay(10);
4787 		}
4788 
4789 		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4790 			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4791 			pr_err("Chip reset did not complete\n");
4792 			return -EBUSY;
4793 		}
4794 	}
4795 
4796 	/* Make sure byte swapping is properly configured. */
4797 	val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4798 	if (val != 0x01020304) {
4799 		pr_err("Chip not in correct endian mode\n");
4800 		return -ENODEV;
4801 	}
4802 
4803 	/* Wait for the firmware to finish its initialization. */
4804 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4805 	if (rc)
4806 		return rc;
4807 
4808 	spin_lock_bh(&bp->phy_lock);
4809 	old_port = bp->phy_port;
4810 	bnx2_init_fw_cap(bp);
4811 	if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4812 	    old_port != bp->phy_port)
4813 		bnx2_set_default_remote_link(bp);
4814 	spin_unlock_bh(&bp->phy_lock);
4815 
4816 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4817 		/* Adjust the voltage regular to two steps lower.  The default
4818 		 * of this register is 0x0000000e. */
4819 		BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4820 
4821 		/* Remove bad rbuf memory from the free pool. */
4822 		rc = bnx2_alloc_bad_rbuf(bp);
4823 	}
4824 
4825 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4826 		bnx2_setup_msix_tbl(bp);
4827 		/* Prevent MSIX table reads and write from timing out */
4828 		BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4829 			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4830 	}
4831 
4832 	return rc;
4833 }
4834 
4835 static int
4836 bnx2_init_chip(struct bnx2 *bp)
4837 {
4838 	u32 val, mtu;
4839 	int rc, i;
4840 
4841 	/* Make sure the interrupt is not active. */
4842 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4843 
4844 	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4845 	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4846 #ifdef __BIG_ENDIAN
4847 	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4848 #endif
4849 	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4850 	      DMA_READ_CHANS << 12 |
4851 	      DMA_WRITE_CHANS << 16;
4852 
4853 	val |= (0x2 << 20) | (1 << 11);
4854 
4855 	if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4856 		val |= (1 << 23);
4857 
4858 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4859 	    (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4860 	    !(bp->flags & BNX2_FLAG_PCIX))
4861 		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4862 
4863 	BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4864 
4865 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4866 		val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4867 		val |= BNX2_TDMA_CONFIG_ONE_DMA;
4868 		BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4869 	}
4870 
4871 	if (bp->flags & BNX2_FLAG_PCIX) {
4872 		u16 val16;
4873 
4874 		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4875 				     &val16);
4876 		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4877 				      val16 & ~PCI_X_CMD_ERO);
4878 	}
4879 
4880 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4881 		BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4882 		BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4883 		BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4884 
4885 	/* Initialize context mapping and zero out the quick contexts.  The
4886 	 * context block must have already been enabled. */
4887 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4888 		rc = bnx2_init_5709_context(bp);
4889 		if (rc)
4890 			return rc;
4891 	} else
4892 		bnx2_init_context(bp);
4893 
4894 	if ((rc = bnx2_init_cpus(bp)) != 0)
4895 		return rc;
4896 
4897 	bnx2_init_nvram(bp);
4898 
4899 	bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4900 
4901 	val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4902 	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4903 	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4904 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4905 		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4906 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4907 			val |= BNX2_MQ_CONFIG_HALT_DIS;
4908 	}
4909 
4910 	BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4911 
4912 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4913 	BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4914 	BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4915 
4916 	val = (BNX2_PAGE_BITS - 8) << 24;
4917 	BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4918 
4919 	/* Configure page size. */
4920 	val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4921 	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4922 	val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4923 	BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4924 
4925 	val = bp->mac_addr[0] +
4926 	      (bp->mac_addr[1] << 8) +
4927 	      (bp->mac_addr[2] << 16) +
4928 	      bp->mac_addr[3] +
4929 	      (bp->mac_addr[4] << 8) +
4930 	      (bp->mac_addr[5] << 16);
4931 	BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4932 
4933 	/* Program the MTU.  Also include 4 bytes for CRC32. */
4934 	mtu = bp->dev->mtu;
4935 	val = mtu + ETH_HLEN + ETH_FCS_LEN;
4936 	if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4937 		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4938 	BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4939 
4940 	if (mtu < 1500)
4941 		mtu = 1500;
4942 
4943 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4944 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4945 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4946 
4947 	memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4948 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4949 		bp->bnx2_napi[i].last_status_idx = 0;
4950 
4951 	bp->idle_chk_status_idx = 0xffff;
4952 
4953 	bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4954 
4955 	/* Set up how to generate a link change interrupt. */
4956 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4957 
4958 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
4959 		(u64) bp->status_blk_mapping & 0xffffffff);
4960 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4961 
4962 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4963 		(u64) bp->stats_blk_mapping & 0xffffffff);
4964 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4965 		(u64) bp->stats_blk_mapping >> 32);
4966 
4967 	BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4968 		(bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4969 
4970 	BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4971 		(bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4972 
4973 	BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4974 		(bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4975 
4976 	BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4977 
4978 	BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4979 
4980 	BNX2_WR(bp, BNX2_HC_COM_TICKS,
4981 		(bp->com_ticks_int << 16) | bp->com_ticks);
4982 
4983 	BNX2_WR(bp, BNX2_HC_CMD_TICKS,
4984 		(bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4985 
4986 	if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4987 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
4988 	else
4989 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4990 	BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4991 
4992 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
4993 		val = BNX2_HC_CONFIG_COLLECT_STATS;
4994 	else {
4995 		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4996 		      BNX2_HC_CONFIG_COLLECT_STATS;
4997 	}
4998 
4999 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
5000 		BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5001 			BNX2_HC_MSIX_BIT_VECTOR_VAL);
5002 
5003 		val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5004 	}
5005 
5006 	if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5007 		val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5008 
5009 	BNX2_WR(bp, BNX2_HC_CONFIG, val);
5010 
5011 	if (bp->rx_ticks < 25)
5012 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5013 	else
5014 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5015 
5016 	for (i = 1; i < bp->irq_nvecs; i++) {
5017 		u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5018 			   BNX2_HC_SB_CONFIG_1;
5019 
5020 		BNX2_WR(bp, base,
5021 			BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5022 			BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5023 			BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5024 
5025 		BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5026 			(bp->tx_quick_cons_trip_int << 16) |
5027 			 bp->tx_quick_cons_trip);
5028 
5029 		BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5030 			(bp->tx_ticks_int << 16) | bp->tx_ticks);
5031 
5032 		BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5033 			(bp->rx_quick_cons_trip_int << 16) |
5034 			bp->rx_quick_cons_trip);
5035 
5036 		BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5037 			(bp->rx_ticks_int << 16) | bp->rx_ticks);
5038 	}
5039 
5040 	/* Clear internal stats counters. */
5041 	BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5042 
5043 	BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5044 
5045 	/* Initialize the receive filter. */
5046 	bnx2_set_rx_mode(bp->dev);
5047 
5048 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5049 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5050 		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5051 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5052 	}
5053 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5054 			  1, 0);
5055 
5056 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5057 	BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5058 
5059 	udelay(20);
5060 
5061 	bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5062 
5063 	return rc;
5064 }
5065 
5066 static void
5067 bnx2_clear_ring_states(struct bnx2 *bp)
5068 {
5069 	struct bnx2_napi *bnapi;
5070 	struct bnx2_tx_ring_info *txr;
5071 	struct bnx2_rx_ring_info *rxr;
5072 	int i;
5073 
5074 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5075 		bnapi = &bp->bnx2_napi[i];
5076 		txr = &bnapi->tx_ring;
5077 		rxr = &bnapi->rx_ring;
5078 
5079 		txr->tx_cons = 0;
5080 		txr->hw_tx_cons = 0;
5081 		rxr->rx_prod_bseq = 0;
5082 		rxr->rx_prod = 0;
5083 		rxr->rx_cons = 0;
5084 		rxr->rx_pg_prod = 0;
5085 		rxr->rx_pg_cons = 0;
5086 	}
5087 }
5088 
5089 static void
5090 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5091 {
5092 	u32 val, offset0, offset1, offset2, offset3;
5093 	u32 cid_addr = GET_CID_ADDR(cid);
5094 
5095 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5096 		offset0 = BNX2_L2CTX_TYPE_XI;
5097 		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5098 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5099 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5100 	} else {
5101 		offset0 = BNX2_L2CTX_TYPE;
5102 		offset1 = BNX2_L2CTX_CMD_TYPE;
5103 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5104 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5105 	}
5106 	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5107 	bnx2_ctx_wr(bp, cid_addr, offset0, val);
5108 
5109 	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5110 	bnx2_ctx_wr(bp, cid_addr, offset1, val);
5111 
5112 	val = (u64) txr->tx_desc_mapping >> 32;
5113 	bnx2_ctx_wr(bp, cid_addr, offset2, val);
5114 
5115 	val = (u64) txr->tx_desc_mapping & 0xffffffff;
5116 	bnx2_ctx_wr(bp, cid_addr, offset3, val);
5117 }
5118 
5119 static void
5120 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5121 {
5122 	struct bnx2_tx_bd *txbd;
5123 	u32 cid = TX_CID;
5124 	struct bnx2_napi *bnapi;
5125 	struct bnx2_tx_ring_info *txr;
5126 
5127 	bnapi = &bp->bnx2_napi[ring_num];
5128 	txr = &bnapi->tx_ring;
5129 
5130 	if (ring_num == 0)
5131 		cid = TX_CID;
5132 	else
5133 		cid = TX_TSS_CID + ring_num - 1;
5134 
5135 	bp->tx_wake_thresh = bp->tx_ring_size / 2;
5136 
5137 	txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5138 
5139 	txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5140 	txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5141 
5142 	txr->tx_prod = 0;
5143 	txr->tx_prod_bseq = 0;
5144 
5145 	txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5146 	txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5147 
5148 	bnx2_init_tx_context(bp, cid, txr);
5149 }
5150 
5151 static void
5152 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5153 		     u32 buf_size, int num_rings)
5154 {
5155 	int i;
5156 	struct bnx2_rx_bd *rxbd;
5157 
5158 	for (i = 0; i < num_rings; i++) {
5159 		int j;
5160 
5161 		rxbd = &rx_ring[i][0];
5162 		for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5163 			rxbd->rx_bd_len = buf_size;
5164 			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5165 		}
5166 		if (i == (num_rings - 1))
5167 			j = 0;
5168 		else
5169 			j = i + 1;
5170 		rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5171 		rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5172 	}
5173 }
5174 
5175 static void
5176 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5177 {
5178 	int i;
5179 	u16 prod, ring_prod;
5180 	u32 cid, rx_cid_addr, val;
5181 	struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5182 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5183 
5184 	if (ring_num == 0)
5185 		cid = RX_CID;
5186 	else
5187 		cid = RX_RSS_CID + ring_num - 1;
5188 
5189 	rx_cid_addr = GET_CID_ADDR(cid);
5190 
5191 	bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5192 			     bp->rx_buf_use_size, bp->rx_max_ring);
5193 
5194 	bnx2_init_rx_context(bp, cid);
5195 
5196 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5197 		val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5198 		BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5199 	}
5200 
5201 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5202 	if (bp->rx_pg_ring_size) {
5203 		bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5204 				     rxr->rx_pg_desc_mapping,
5205 				     PAGE_SIZE, bp->rx_max_pg_ring);
5206 		val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5207 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5208 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5209 		       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5210 
5211 		val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5212 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5213 
5214 		val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5215 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5216 
5217 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5218 			BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5219 	}
5220 
5221 	val = (u64) rxr->rx_desc_mapping[0] >> 32;
5222 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5223 
5224 	val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5225 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5226 
5227 	ring_prod = prod = rxr->rx_pg_prod;
5228 	for (i = 0; i < bp->rx_pg_ring_size; i++) {
5229 		if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5230 			netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5231 				    ring_num, i, bp->rx_pg_ring_size);
5232 			break;
5233 		}
5234 		prod = BNX2_NEXT_RX_BD(prod);
5235 		ring_prod = BNX2_RX_PG_RING_IDX(prod);
5236 	}
5237 	rxr->rx_pg_prod = prod;
5238 
5239 	ring_prod = prod = rxr->rx_prod;
5240 	for (i = 0; i < bp->rx_ring_size; i++) {
5241 		if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5242 			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5243 				    ring_num, i, bp->rx_ring_size);
5244 			break;
5245 		}
5246 		prod = BNX2_NEXT_RX_BD(prod);
5247 		ring_prod = BNX2_RX_RING_IDX(prod);
5248 	}
5249 	rxr->rx_prod = prod;
5250 
5251 	rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5252 	rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5253 	rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5254 
5255 	BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5256 	BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5257 
5258 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5259 }
5260 
5261 static void
5262 bnx2_init_all_rings(struct bnx2 *bp)
5263 {
5264 	int i;
5265 	u32 val;
5266 
5267 	bnx2_clear_ring_states(bp);
5268 
5269 	BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5270 	for (i = 0; i < bp->num_tx_rings; i++)
5271 		bnx2_init_tx_ring(bp, i);
5272 
5273 	if (bp->num_tx_rings > 1)
5274 		BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5275 			(TX_TSS_CID << 7));
5276 
5277 	BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5278 	bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5279 
5280 	for (i = 0; i < bp->num_rx_rings; i++)
5281 		bnx2_init_rx_ring(bp, i);
5282 
5283 	if (bp->num_rx_rings > 1) {
5284 		u32 tbl_32 = 0;
5285 
5286 		for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5287 			int shift = (i % 8) << 2;
5288 
5289 			tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5290 			if ((i % 8) == 7) {
5291 				BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5292 				BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5293 					BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5294 					BNX2_RLUP_RSS_COMMAND_WRITE |
5295 					BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5296 				tbl_32 = 0;
5297 			}
5298 		}
5299 
5300 		val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5301 		      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5302 
5303 		BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5304 
5305 	}
5306 }
5307 
5308 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5309 {
5310 	u32 max, num_rings = 1;
5311 
5312 	while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5313 		ring_size -= BNX2_MAX_RX_DESC_CNT;
5314 		num_rings++;
5315 	}
5316 	/* round to next power of 2 */
5317 	max = max_size;
5318 	while ((max & num_rings) == 0)
5319 		max >>= 1;
5320 
5321 	if (num_rings != max)
5322 		max <<= 1;
5323 
5324 	return max;
5325 }
5326 
5327 static void
5328 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5329 {
5330 	u32 rx_size, rx_space, jumbo_size;
5331 
5332 	/* 8 for CRC and VLAN */
5333 	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5334 
5335 	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5336 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5337 
5338 	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5339 	bp->rx_pg_ring_size = 0;
5340 	bp->rx_max_pg_ring = 0;
5341 	bp->rx_max_pg_ring_idx = 0;
5342 	if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5343 		int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5344 
5345 		jumbo_size = size * pages;
5346 		if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5347 			jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5348 
5349 		bp->rx_pg_ring_size = jumbo_size;
5350 		bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5351 							BNX2_MAX_RX_PG_RINGS);
5352 		bp->rx_max_pg_ring_idx =
5353 			(bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5354 		rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5355 		bp->rx_copy_thresh = 0;
5356 	}
5357 
5358 	bp->rx_buf_use_size = rx_size;
5359 	/* hw alignment + build_skb() overhead*/
5360 	bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5361 		NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5362 	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5363 	bp->rx_ring_size = size;
5364 	bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5365 	bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5366 }
5367 
5368 static void
5369 bnx2_free_tx_skbs(struct bnx2 *bp)
5370 {
5371 	int i;
5372 
5373 	for (i = 0; i < bp->num_tx_rings; i++) {
5374 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5375 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5376 		int j;
5377 
5378 		if (txr->tx_buf_ring == NULL)
5379 			continue;
5380 
5381 		for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5382 			struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5383 			struct sk_buff *skb = tx_buf->skb;
5384 			int k, last;
5385 
5386 			if (skb == NULL) {
5387 				j = BNX2_NEXT_TX_BD(j);
5388 				continue;
5389 			}
5390 
5391 			dma_unmap_single(&bp->pdev->dev,
5392 					 dma_unmap_addr(tx_buf, mapping),
5393 					 skb_headlen(skb),
5394 					 PCI_DMA_TODEVICE);
5395 
5396 			tx_buf->skb = NULL;
5397 
5398 			last = tx_buf->nr_frags;
5399 			j = BNX2_NEXT_TX_BD(j);
5400 			for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5401 				tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5402 				dma_unmap_page(&bp->pdev->dev,
5403 					dma_unmap_addr(tx_buf, mapping),
5404 					skb_frag_size(&skb_shinfo(skb)->frags[k]),
5405 					PCI_DMA_TODEVICE);
5406 			}
5407 			dev_kfree_skb(skb);
5408 		}
5409 		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5410 	}
5411 }
5412 
5413 static void
5414 bnx2_free_rx_skbs(struct bnx2 *bp)
5415 {
5416 	int i;
5417 
5418 	for (i = 0; i < bp->num_rx_rings; i++) {
5419 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5420 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5421 		int j;
5422 
5423 		if (rxr->rx_buf_ring == NULL)
5424 			return;
5425 
5426 		for (j = 0; j < bp->rx_max_ring_idx; j++) {
5427 			struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5428 			u8 *data = rx_buf->data;
5429 
5430 			if (data == NULL)
5431 				continue;
5432 
5433 			dma_unmap_single(&bp->pdev->dev,
5434 					 dma_unmap_addr(rx_buf, mapping),
5435 					 bp->rx_buf_use_size,
5436 					 PCI_DMA_FROMDEVICE);
5437 
5438 			rx_buf->data = NULL;
5439 
5440 			kfree(data);
5441 		}
5442 		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5443 			bnx2_free_rx_page(bp, rxr, j);
5444 	}
5445 }
5446 
5447 static void
5448 bnx2_free_skbs(struct bnx2 *bp)
5449 {
5450 	bnx2_free_tx_skbs(bp);
5451 	bnx2_free_rx_skbs(bp);
5452 }
5453 
5454 static int
5455 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5456 {
5457 	int rc;
5458 
5459 	rc = bnx2_reset_chip(bp, reset_code);
5460 	bnx2_free_skbs(bp);
5461 	if (rc)
5462 		return rc;
5463 
5464 	if ((rc = bnx2_init_chip(bp)) != 0)
5465 		return rc;
5466 
5467 	bnx2_init_all_rings(bp);
5468 	return 0;
5469 }
5470 
5471 static int
5472 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5473 {
5474 	int rc;
5475 
5476 	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5477 		return rc;
5478 
5479 	spin_lock_bh(&bp->phy_lock);
5480 	bnx2_init_phy(bp, reset_phy);
5481 	bnx2_set_link(bp);
5482 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5483 		bnx2_remote_phy_event(bp);
5484 	spin_unlock_bh(&bp->phy_lock);
5485 	return 0;
5486 }
5487 
5488 static int
5489 bnx2_shutdown_chip(struct bnx2 *bp)
5490 {
5491 	u32 reset_code;
5492 
5493 	if (bp->flags & BNX2_FLAG_NO_WOL)
5494 		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5495 	else if (bp->wol)
5496 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5497 	else
5498 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5499 
5500 	return bnx2_reset_chip(bp, reset_code);
5501 }
5502 
5503 static int
5504 bnx2_test_registers(struct bnx2 *bp)
5505 {
5506 	int ret;
5507 	int i, is_5709;
5508 	static const struct {
5509 		u16   offset;
5510 		u16   flags;
5511 #define BNX2_FL_NOT_5709	1
5512 		u32   rw_mask;
5513 		u32   ro_mask;
5514 	} reg_tbl[] = {
5515 		{ 0x006c, 0, 0x00000000, 0x0000003f },
5516 		{ 0x0090, 0, 0xffffffff, 0x00000000 },
5517 		{ 0x0094, 0, 0x00000000, 0x00000000 },
5518 
5519 		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5520 		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5521 		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5522 		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5523 		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5524 		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5525 		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5526 		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5527 		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5528 
5529 		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5530 		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5531 		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5532 		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5533 		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5534 		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5535 
5536 		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5537 		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5538 		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5539 
5540 		{ 0x1000, 0, 0x00000000, 0x00000001 },
5541 		{ 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5542 
5543 		{ 0x1408, 0, 0x01c00800, 0x00000000 },
5544 		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
5545 		{ 0x14a8, 0, 0x00000000, 0x000001ff },
5546 		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
5547 		{ 0x14b0, 0, 0x00000002, 0x00000001 },
5548 		{ 0x14b8, 0, 0x00000000, 0x00000000 },
5549 		{ 0x14c0, 0, 0x00000000, 0x00000009 },
5550 		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
5551 		{ 0x14cc, 0, 0x00000000, 0x00000001 },
5552 		{ 0x14d0, 0, 0xffffffff, 0x00000000 },
5553 
5554 		{ 0x1800, 0, 0x00000000, 0x00000001 },
5555 		{ 0x1804, 0, 0x00000000, 0x00000003 },
5556 
5557 		{ 0x2800, 0, 0x00000000, 0x00000001 },
5558 		{ 0x2804, 0, 0x00000000, 0x00003f01 },
5559 		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5560 		{ 0x2810, 0, 0xffff0000, 0x00000000 },
5561 		{ 0x2814, 0, 0xffff0000, 0x00000000 },
5562 		{ 0x2818, 0, 0xffff0000, 0x00000000 },
5563 		{ 0x281c, 0, 0xffff0000, 0x00000000 },
5564 		{ 0x2834, 0, 0xffffffff, 0x00000000 },
5565 		{ 0x2840, 0, 0x00000000, 0xffffffff },
5566 		{ 0x2844, 0, 0x00000000, 0xffffffff },
5567 		{ 0x2848, 0, 0xffffffff, 0x00000000 },
5568 		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },
5569 
5570 		{ 0x2c00, 0, 0x00000000, 0x00000011 },
5571 		{ 0x2c04, 0, 0x00000000, 0x00030007 },
5572 
5573 		{ 0x3c00, 0, 0x00000000, 0x00000001 },
5574 		{ 0x3c04, 0, 0x00000000, 0x00070000 },
5575 		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
5576 		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5577 		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
5578 		{ 0x3c14, 0, 0x00000000, 0xffffffff },
5579 		{ 0x3c18, 0, 0x00000000, 0xffffffff },
5580 		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
5581 		{ 0x3c20, 0, 0xffffff00, 0x00000000 },
5582 
5583 		{ 0x5004, 0, 0x00000000, 0x0000007f },
5584 		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },
5585 
5586 		{ 0x5c00, 0, 0x00000000, 0x00000001 },
5587 		{ 0x5c04, 0, 0x00000000, 0x0003000f },
5588 		{ 0x5c08, 0, 0x00000003, 0x00000000 },
5589 		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5590 		{ 0x5c10, 0, 0x00000000, 0xffffffff },
5591 		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5592 		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
5593 		{ 0x5c88, 0, 0x00000000, 0x00077373 },
5594 		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },
5595 
5596 		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
5597 		{ 0x680c, 0, 0xffffffff, 0x00000000 },
5598 		{ 0x6810, 0, 0xffffffff, 0x00000000 },
5599 		{ 0x6814, 0, 0xffffffff, 0x00000000 },
5600 		{ 0x6818, 0, 0xffffffff, 0x00000000 },
5601 		{ 0x681c, 0, 0xffffffff, 0x00000000 },
5602 		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
5603 		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
5604 		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
5605 		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
5606 		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
5607 		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
5608 		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
5609 		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
5610 		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
5611 		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
5612 		{ 0x684c, 0, 0xffffffff, 0x00000000 },
5613 		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5614 		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5615 		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5616 		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5617 		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
5618 		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5619 
5620 		{ 0xffff, 0, 0x00000000, 0x00000000 },
5621 	};
5622 
5623 	ret = 0;
5624 	is_5709 = 0;
5625 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5626 		is_5709 = 1;
5627 
5628 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5629 		u32 offset, rw_mask, ro_mask, save_val, val;
5630 		u16 flags = reg_tbl[i].flags;
5631 
5632 		if (is_5709 && (flags & BNX2_FL_NOT_5709))
5633 			continue;
5634 
5635 		offset = (u32) reg_tbl[i].offset;
5636 		rw_mask = reg_tbl[i].rw_mask;
5637 		ro_mask = reg_tbl[i].ro_mask;
5638 
5639 		save_val = readl(bp->regview + offset);
5640 
5641 		writel(0, bp->regview + offset);
5642 
5643 		val = readl(bp->regview + offset);
5644 		if ((val & rw_mask) != 0) {
5645 			goto reg_test_err;
5646 		}
5647 
5648 		if ((val & ro_mask) != (save_val & ro_mask)) {
5649 			goto reg_test_err;
5650 		}
5651 
5652 		writel(0xffffffff, bp->regview + offset);
5653 
5654 		val = readl(bp->regview + offset);
5655 		if ((val & rw_mask) != rw_mask) {
5656 			goto reg_test_err;
5657 		}
5658 
5659 		if ((val & ro_mask) != (save_val & ro_mask)) {
5660 			goto reg_test_err;
5661 		}
5662 
5663 		writel(save_val, bp->regview + offset);
5664 		continue;
5665 
5666 reg_test_err:
5667 		writel(save_val, bp->regview + offset);
5668 		ret = -ENODEV;
5669 		break;
5670 	}
5671 	return ret;
5672 }
5673 
5674 static int
5675 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5676 {
5677 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5678 		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5679 	int i;
5680 
5681 	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5682 		u32 offset;
5683 
5684 		for (offset = 0; offset < size; offset += 4) {
5685 
5686 			bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5687 
5688 			if (bnx2_reg_rd_ind(bp, start + offset) !=
5689 				test_pattern[i]) {
5690 				return -ENODEV;
5691 			}
5692 		}
5693 	}
5694 	return 0;
5695 }
5696 
5697 static int
5698 bnx2_test_memory(struct bnx2 *bp)
5699 {
5700 	int ret = 0;
5701 	int i;
5702 	static struct mem_entry {
5703 		u32   offset;
5704 		u32   len;
5705 	} mem_tbl_5706[] = {
5706 		{ 0x60000,  0x4000 },
5707 		{ 0xa0000,  0x3000 },
5708 		{ 0xe0000,  0x4000 },
5709 		{ 0x120000, 0x4000 },
5710 		{ 0x1a0000, 0x4000 },
5711 		{ 0x160000, 0x4000 },
5712 		{ 0xffffffff, 0    },
5713 	},
5714 	mem_tbl_5709[] = {
5715 		{ 0x60000,  0x4000 },
5716 		{ 0xa0000,  0x3000 },
5717 		{ 0xe0000,  0x4000 },
5718 		{ 0x120000, 0x4000 },
5719 		{ 0x1a0000, 0x4000 },
5720 		{ 0xffffffff, 0    },
5721 	};
5722 	struct mem_entry *mem_tbl;
5723 
5724 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5725 		mem_tbl = mem_tbl_5709;
5726 	else
5727 		mem_tbl = mem_tbl_5706;
5728 
5729 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5730 		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5731 			mem_tbl[i].len)) != 0) {
5732 			return ret;
5733 		}
5734 	}
5735 
5736 	return ret;
5737 }
5738 
5739 #define BNX2_MAC_LOOPBACK	0
5740 #define BNX2_PHY_LOOPBACK	1
5741 
5742 static int
5743 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5744 {
5745 	unsigned int pkt_size, num_pkts, i;
5746 	struct sk_buff *skb;
5747 	u8 *data;
5748 	unsigned char *packet;
5749 	u16 rx_start_idx, rx_idx;
5750 	dma_addr_t map;
5751 	struct bnx2_tx_bd *txbd;
5752 	struct bnx2_sw_bd *rx_buf;
5753 	struct l2_fhdr *rx_hdr;
5754 	int ret = -ENODEV;
5755 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5756 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5757 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5758 
5759 	tx_napi = bnapi;
5760 
5761 	txr = &tx_napi->tx_ring;
5762 	rxr = &bnapi->rx_ring;
5763 	if (loopback_mode == BNX2_MAC_LOOPBACK) {
5764 		bp->loopback = MAC_LOOPBACK;
5765 		bnx2_set_mac_loopback(bp);
5766 	}
5767 	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5768 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5769 			return 0;
5770 
5771 		bp->loopback = PHY_LOOPBACK;
5772 		bnx2_set_phy_loopback(bp);
5773 	}
5774 	else
5775 		return -EINVAL;
5776 
5777 	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5778 	skb = netdev_alloc_skb(bp->dev, pkt_size);
5779 	if (!skb)
5780 		return -ENOMEM;
5781 	packet = skb_put(skb, pkt_size);
5782 	memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5783 	memset(packet + ETH_ALEN, 0x0, 8);
5784 	for (i = 14; i < pkt_size; i++)
5785 		packet[i] = (unsigned char) (i & 0xff);
5786 
5787 	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5788 			     PCI_DMA_TODEVICE);
5789 	if (dma_mapping_error(&bp->pdev->dev, map)) {
5790 		dev_kfree_skb(skb);
5791 		return -EIO;
5792 	}
5793 
5794 	BNX2_WR(bp, BNX2_HC_COMMAND,
5795 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5796 
5797 	BNX2_RD(bp, BNX2_HC_COMMAND);
5798 
5799 	udelay(5);
5800 	rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5801 
5802 	num_pkts = 0;
5803 
5804 	txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5805 
5806 	txbd->tx_bd_haddr_hi = (u64) map >> 32;
5807 	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5808 	txbd->tx_bd_mss_nbytes = pkt_size;
5809 	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5810 
5811 	num_pkts++;
5812 	txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5813 	txr->tx_prod_bseq += pkt_size;
5814 
5815 	BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5816 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5817 
5818 	udelay(100);
5819 
5820 	BNX2_WR(bp, BNX2_HC_COMMAND,
5821 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5822 
5823 	BNX2_RD(bp, BNX2_HC_COMMAND);
5824 
5825 	udelay(5);
5826 
5827 	dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5828 	dev_kfree_skb(skb);
5829 
5830 	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5831 		goto loopback_test_done;
5832 
5833 	rx_idx = bnx2_get_hw_rx_cons(bnapi);
5834 	if (rx_idx != rx_start_idx + num_pkts) {
5835 		goto loopback_test_done;
5836 	}
5837 
5838 	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5839 	data = rx_buf->data;
5840 
5841 	rx_hdr = get_l2_fhdr(data);
5842 	data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5843 
5844 	dma_sync_single_for_cpu(&bp->pdev->dev,
5845 		dma_unmap_addr(rx_buf, mapping),
5846 		bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5847 
5848 	if (rx_hdr->l2_fhdr_status &
5849 		(L2_FHDR_ERRORS_BAD_CRC |
5850 		L2_FHDR_ERRORS_PHY_DECODE |
5851 		L2_FHDR_ERRORS_ALIGNMENT |
5852 		L2_FHDR_ERRORS_TOO_SHORT |
5853 		L2_FHDR_ERRORS_GIANT_FRAME)) {
5854 
5855 		goto loopback_test_done;
5856 	}
5857 
5858 	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5859 		goto loopback_test_done;
5860 	}
5861 
5862 	for (i = 14; i < pkt_size; i++) {
5863 		if (*(data + i) != (unsigned char) (i & 0xff)) {
5864 			goto loopback_test_done;
5865 		}
5866 	}
5867 
5868 	ret = 0;
5869 
5870 loopback_test_done:
5871 	bp->loopback = 0;
5872 	return ret;
5873 }
5874 
5875 #define BNX2_MAC_LOOPBACK_FAILED	1
5876 #define BNX2_PHY_LOOPBACK_FAILED	2
5877 #define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
5878 					 BNX2_PHY_LOOPBACK_FAILED)
5879 
5880 static int
5881 bnx2_test_loopback(struct bnx2 *bp)
5882 {
5883 	int rc = 0;
5884 
5885 	if (!netif_running(bp->dev))
5886 		return BNX2_LOOPBACK_FAILED;
5887 
5888 	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5889 	spin_lock_bh(&bp->phy_lock);
5890 	bnx2_init_phy(bp, 1);
5891 	spin_unlock_bh(&bp->phy_lock);
5892 	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5893 		rc |= BNX2_MAC_LOOPBACK_FAILED;
5894 	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5895 		rc |= BNX2_PHY_LOOPBACK_FAILED;
5896 	return rc;
5897 }
5898 
5899 #define NVRAM_SIZE 0x200
5900 #define CRC32_RESIDUAL 0xdebb20e3
5901 
5902 static int
5903 bnx2_test_nvram(struct bnx2 *bp)
5904 {
5905 	__be32 buf[NVRAM_SIZE / 4];
5906 	u8 *data = (u8 *) buf;
5907 	int rc = 0;
5908 	u32 magic, csum;
5909 
5910 	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5911 		goto test_nvram_done;
5912 
5913         magic = be32_to_cpu(buf[0]);
5914 	if (magic != 0x669955aa) {
5915 		rc = -ENODEV;
5916 		goto test_nvram_done;
5917 	}
5918 
5919 	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5920 		goto test_nvram_done;
5921 
5922 	csum = ether_crc_le(0x100, data);
5923 	if (csum != CRC32_RESIDUAL) {
5924 		rc = -ENODEV;
5925 		goto test_nvram_done;
5926 	}
5927 
5928 	csum = ether_crc_le(0x100, data + 0x100);
5929 	if (csum != CRC32_RESIDUAL) {
5930 		rc = -ENODEV;
5931 	}
5932 
5933 test_nvram_done:
5934 	return rc;
5935 }
5936 
5937 static int
5938 bnx2_test_link(struct bnx2 *bp)
5939 {
5940 	u32 bmsr;
5941 
5942 	if (!netif_running(bp->dev))
5943 		return -ENODEV;
5944 
5945 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5946 		if (bp->link_up)
5947 			return 0;
5948 		return -ENODEV;
5949 	}
5950 	spin_lock_bh(&bp->phy_lock);
5951 	bnx2_enable_bmsr1(bp);
5952 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5953 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5954 	bnx2_disable_bmsr1(bp);
5955 	spin_unlock_bh(&bp->phy_lock);
5956 
5957 	if (bmsr & BMSR_LSTATUS) {
5958 		return 0;
5959 	}
5960 	return -ENODEV;
5961 }
5962 
5963 static int
5964 bnx2_test_intr(struct bnx2 *bp)
5965 {
5966 	int i;
5967 	u16 status_idx;
5968 
5969 	if (!netif_running(bp->dev))
5970 		return -ENODEV;
5971 
5972 	status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5973 
5974 	/* This register is not touched during run-time. */
5975 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5976 	BNX2_RD(bp, BNX2_HC_COMMAND);
5977 
5978 	for (i = 0; i < 10; i++) {
5979 		if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5980 			status_idx) {
5981 
5982 			break;
5983 		}
5984 
5985 		msleep_interruptible(10);
5986 	}
5987 	if (i < 10)
5988 		return 0;
5989 
5990 	return -ENODEV;
5991 }
5992 
5993 /* Determining link for parallel detection. */
5994 static int
5995 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5996 {
5997 	u32 mode_ctl, an_dbg, exp;
5998 
5999 	if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6000 		return 0;
6001 
6002 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6003 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6004 
6005 	if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6006 		return 0;
6007 
6008 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6009 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6010 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6011 
6012 	if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6013 		return 0;
6014 
6015 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6016 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6017 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6018 
6019 	if (exp & MII_EXPAND_REG1_RUDI_C)	/* receiving CONFIG */
6020 		return 0;
6021 
6022 	return 1;
6023 }
6024 
6025 static void
6026 bnx2_5706_serdes_timer(struct bnx2 *bp)
6027 {
6028 	int check_link = 1;
6029 
6030 	spin_lock(&bp->phy_lock);
6031 	if (bp->serdes_an_pending) {
6032 		bp->serdes_an_pending--;
6033 		check_link = 0;
6034 	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6035 		u32 bmcr;
6036 
6037 		bp->current_interval = BNX2_TIMER_INTERVAL;
6038 
6039 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6040 
6041 		if (bmcr & BMCR_ANENABLE) {
6042 			if (bnx2_5706_serdes_has_link(bp)) {
6043 				bmcr &= ~BMCR_ANENABLE;
6044 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6045 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6046 				bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6047 			}
6048 		}
6049 	}
6050 	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6051 		 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6052 		u32 phy2;
6053 
6054 		bnx2_write_phy(bp, 0x17, 0x0f01);
6055 		bnx2_read_phy(bp, 0x15, &phy2);
6056 		if (phy2 & 0x20) {
6057 			u32 bmcr;
6058 
6059 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6060 			bmcr |= BMCR_ANENABLE;
6061 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6062 
6063 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6064 		}
6065 	} else
6066 		bp->current_interval = BNX2_TIMER_INTERVAL;
6067 
6068 	if (check_link) {
6069 		u32 val;
6070 
6071 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6072 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6073 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6074 
6075 		if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6076 			if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6077 				bnx2_5706s_force_link_dn(bp, 1);
6078 				bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6079 			} else
6080 				bnx2_set_link(bp);
6081 		} else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6082 			bnx2_set_link(bp);
6083 	}
6084 	spin_unlock(&bp->phy_lock);
6085 }
6086 
6087 static void
6088 bnx2_5708_serdes_timer(struct bnx2 *bp)
6089 {
6090 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6091 		return;
6092 
6093 	if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6094 		bp->serdes_an_pending = 0;
6095 		return;
6096 	}
6097 
6098 	spin_lock(&bp->phy_lock);
6099 	if (bp->serdes_an_pending)
6100 		bp->serdes_an_pending--;
6101 	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6102 		u32 bmcr;
6103 
6104 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6105 		if (bmcr & BMCR_ANENABLE) {
6106 			bnx2_enable_forced_2g5(bp);
6107 			bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6108 		} else {
6109 			bnx2_disable_forced_2g5(bp);
6110 			bp->serdes_an_pending = 2;
6111 			bp->current_interval = BNX2_TIMER_INTERVAL;
6112 		}
6113 
6114 	} else
6115 		bp->current_interval = BNX2_TIMER_INTERVAL;
6116 
6117 	spin_unlock(&bp->phy_lock);
6118 }
6119 
6120 static void
6121 bnx2_timer(unsigned long data)
6122 {
6123 	struct bnx2 *bp = (struct bnx2 *) data;
6124 
6125 	if (!netif_running(bp->dev))
6126 		return;
6127 
6128 	if (atomic_read(&bp->intr_sem) != 0)
6129 		goto bnx2_restart_timer;
6130 
6131 	if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6132 	     BNX2_FLAG_USING_MSI)
6133 		bnx2_chk_missed_msi(bp);
6134 
6135 	bnx2_send_heart_beat(bp);
6136 
6137 	bp->stats_blk->stat_FwRxDrop =
6138 		bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6139 
6140 	/* workaround occasional corrupted counters */
6141 	if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6142 		BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6143 			BNX2_HC_COMMAND_STATS_NOW);
6144 
6145 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6146 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6147 			bnx2_5706_serdes_timer(bp);
6148 		else
6149 			bnx2_5708_serdes_timer(bp);
6150 	}
6151 
6152 bnx2_restart_timer:
6153 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6154 }
6155 
6156 static int
6157 bnx2_request_irq(struct bnx2 *bp)
6158 {
6159 	unsigned long flags;
6160 	struct bnx2_irq *irq;
6161 	int rc = 0, i;
6162 
6163 	if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6164 		flags = 0;
6165 	else
6166 		flags = IRQF_SHARED;
6167 
6168 	for (i = 0; i < bp->irq_nvecs; i++) {
6169 		irq = &bp->irq_tbl[i];
6170 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6171 				 &bp->bnx2_napi[i]);
6172 		if (rc)
6173 			break;
6174 		irq->requested = 1;
6175 	}
6176 	return rc;
6177 }
6178 
6179 static void
6180 __bnx2_free_irq(struct bnx2 *bp)
6181 {
6182 	struct bnx2_irq *irq;
6183 	int i;
6184 
6185 	for (i = 0; i < bp->irq_nvecs; i++) {
6186 		irq = &bp->irq_tbl[i];
6187 		if (irq->requested)
6188 			free_irq(irq->vector, &bp->bnx2_napi[i]);
6189 		irq->requested = 0;
6190 	}
6191 }
6192 
6193 static void
6194 bnx2_free_irq(struct bnx2 *bp)
6195 {
6196 
6197 	__bnx2_free_irq(bp);
6198 	if (bp->flags & BNX2_FLAG_USING_MSI)
6199 		pci_disable_msi(bp->pdev);
6200 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6201 		pci_disable_msix(bp->pdev);
6202 
6203 	bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6204 }
6205 
6206 static void
6207 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6208 {
6209 	int i, total_vecs, rc;
6210 	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6211 	struct net_device *dev = bp->dev;
6212 	const int len = sizeof(bp->irq_tbl[0].name);
6213 
6214 	bnx2_setup_msix_tbl(bp);
6215 	BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6216 	BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6217 	BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6218 
6219 	/*  Need to flush the previous three writes to ensure MSI-X
6220 	 *  is setup properly */
6221 	BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6222 
6223 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6224 		msix_ent[i].entry = i;
6225 		msix_ent[i].vector = 0;
6226 	}
6227 
6228 	total_vecs = msix_vecs;
6229 #ifdef BCM_CNIC
6230 	total_vecs++;
6231 #endif
6232 	rc = -ENOSPC;
6233 	while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6234 		rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6235 		if (rc <= 0)
6236 			break;
6237 		if (rc > 0)
6238 			total_vecs = rc;
6239 	}
6240 
6241 	if (rc != 0)
6242 		return;
6243 
6244 	msix_vecs = total_vecs;
6245 #ifdef BCM_CNIC
6246 	msix_vecs--;
6247 #endif
6248 	bp->irq_nvecs = msix_vecs;
6249 	bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6250 	for (i = 0; i < total_vecs; i++) {
6251 		bp->irq_tbl[i].vector = msix_ent[i].vector;
6252 		snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6253 		bp->irq_tbl[i].handler = bnx2_msi_1shot;
6254 	}
6255 }
6256 
6257 static int
6258 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6259 {
6260 	int cpus = netif_get_num_default_rss_queues();
6261 	int msix_vecs;
6262 
6263 	if (!bp->num_req_rx_rings)
6264 		msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6265 	else if (!bp->num_req_tx_rings)
6266 		msix_vecs = max(cpus, bp->num_req_rx_rings);
6267 	else
6268 		msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6269 
6270 	msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6271 
6272 	bp->irq_tbl[0].handler = bnx2_interrupt;
6273 	strcpy(bp->irq_tbl[0].name, bp->dev->name);
6274 	bp->irq_nvecs = 1;
6275 	bp->irq_tbl[0].vector = bp->pdev->irq;
6276 
6277 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6278 		bnx2_enable_msix(bp, msix_vecs);
6279 
6280 	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6281 	    !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6282 		if (pci_enable_msi(bp->pdev) == 0) {
6283 			bp->flags |= BNX2_FLAG_USING_MSI;
6284 			if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6285 				bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6286 				bp->irq_tbl[0].handler = bnx2_msi_1shot;
6287 			} else
6288 				bp->irq_tbl[0].handler = bnx2_msi;
6289 
6290 			bp->irq_tbl[0].vector = bp->pdev->irq;
6291 		}
6292 	}
6293 
6294 	if (!bp->num_req_tx_rings)
6295 		bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6296 	else
6297 		bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6298 
6299 	if (!bp->num_req_rx_rings)
6300 		bp->num_rx_rings = bp->irq_nvecs;
6301 	else
6302 		bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6303 
6304 	netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6305 
6306 	return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6307 }
6308 
6309 /* Called with rtnl_lock */
6310 static int
6311 bnx2_open(struct net_device *dev)
6312 {
6313 	struct bnx2 *bp = netdev_priv(dev);
6314 	int rc;
6315 
6316 	rc = bnx2_request_firmware(bp);
6317 	if (rc < 0)
6318 		goto out;
6319 
6320 	netif_carrier_off(dev);
6321 
6322 	bnx2_disable_int(bp);
6323 
6324 	rc = bnx2_setup_int_mode(bp, disable_msi);
6325 	if (rc)
6326 		goto open_err;
6327 	bnx2_init_napi(bp);
6328 	bnx2_napi_enable(bp);
6329 	rc = bnx2_alloc_mem(bp);
6330 	if (rc)
6331 		goto open_err;
6332 
6333 	rc = bnx2_request_irq(bp);
6334 	if (rc)
6335 		goto open_err;
6336 
6337 	rc = bnx2_init_nic(bp, 1);
6338 	if (rc)
6339 		goto open_err;
6340 
6341 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6342 
6343 	atomic_set(&bp->intr_sem, 0);
6344 
6345 	memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6346 
6347 	bnx2_enable_int(bp);
6348 
6349 	if (bp->flags & BNX2_FLAG_USING_MSI) {
6350 		/* Test MSI to make sure it is working
6351 		 * If MSI test fails, go back to INTx mode
6352 		 */
6353 		if (bnx2_test_intr(bp) != 0) {
6354 			netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6355 
6356 			bnx2_disable_int(bp);
6357 			bnx2_free_irq(bp);
6358 
6359 			bnx2_setup_int_mode(bp, 1);
6360 
6361 			rc = bnx2_init_nic(bp, 0);
6362 
6363 			if (!rc)
6364 				rc = bnx2_request_irq(bp);
6365 
6366 			if (rc) {
6367 				del_timer_sync(&bp->timer);
6368 				goto open_err;
6369 			}
6370 			bnx2_enable_int(bp);
6371 		}
6372 	}
6373 	if (bp->flags & BNX2_FLAG_USING_MSI)
6374 		netdev_info(dev, "using MSI\n");
6375 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6376 		netdev_info(dev, "using MSIX\n");
6377 
6378 	netif_tx_start_all_queues(dev);
6379 out:
6380 	return rc;
6381 
6382 open_err:
6383 	bnx2_napi_disable(bp);
6384 	bnx2_free_skbs(bp);
6385 	bnx2_free_irq(bp);
6386 	bnx2_free_mem(bp);
6387 	bnx2_del_napi(bp);
6388 	bnx2_release_firmware(bp);
6389 	goto out;
6390 }
6391 
6392 static void
6393 bnx2_reset_task(struct work_struct *work)
6394 {
6395 	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6396 	int rc;
6397 	u16 pcicmd;
6398 
6399 	rtnl_lock();
6400 	if (!netif_running(bp->dev)) {
6401 		rtnl_unlock();
6402 		return;
6403 	}
6404 
6405 	bnx2_netif_stop(bp, true);
6406 
6407 	pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6408 	if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6409 		/* in case PCI block has reset */
6410 		pci_restore_state(bp->pdev);
6411 		pci_save_state(bp->pdev);
6412 	}
6413 	rc = bnx2_init_nic(bp, 1);
6414 	if (rc) {
6415 		netdev_err(bp->dev, "failed to reset NIC, closing\n");
6416 		bnx2_napi_enable(bp);
6417 		dev_close(bp->dev);
6418 		rtnl_unlock();
6419 		return;
6420 	}
6421 
6422 	atomic_set(&bp->intr_sem, 1);
6423 	bnx2_netif_start(bp, true);
6424 	rtnl_unlock();
6425 }
6426 
6427 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6428 
6429 static void
6430 bnx2_dump_ftq(struct bnx2 *bp)
6431 {
6432 	int i;
6433 	u32 reg, bdidx, cid, valid;
6434 	struct net_device *dev = bp->dev;
6435 	static const struct ftq_reg {
6436 		char *name;
6437 		u32 off;
6438 	} ftq_arr[] = {
6439 		BNX2_FTQ_ENTRY(RV2P_P),
6440 		BNX2_FTQ_ENTRY(RV2P_T),
6441 		BNX2_FTQ_ENTRY(RV2P_M),
6442 		BNX2_FTQ_ENTRY(TBDR_),
6443 		BNX2_FTQ_ENTRY(TDMA_),
6444 		BNX2_FTQ_ENTRY(TXP_),
6445 		BNX2_FTQ_ENTRY(TXP_),
6446 		BNX2_FTQ_ENTRY(TPAT_),
6447 		BNX2_FTQ_ENTRY(RXP_C),
6448 		BNX2_FTQ_ENTRY(RXP_),
6449 		BNX2_FTQ_ENTRY(COM_COMXQ_),
6450 		BNX2_FTQ_ENTRY(COM_COMTQ_),
6451 		BNX2_FTQ_ENTRY(COM_COMQ_),
6452 		BNX2_FTQ_ENTRY(CP_CPQ_),
6453 	};
6454 
6455 	netdev_err(dev, "<--- start FTQ dump --->\n");
6456 	for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6457 		netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6458 			   bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6459 
6460 	netdev_err(dev, "CPU states:\n");
6461 	for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6462 		netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6463 			   reg, bnx2_reg_rd_ind(bp, reg),
6464 			   bnx2_reg_rd_ind(bp, reg + 4),
6465 			   bnx2_reg_rd_ind(bp, reg + 8),
6466 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6467 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6468 			   bnx2_reg_rd_ind(bp, reg + 0x20));
6469 
6470 	netdev_err(dev, "<--- end FTQ dump --->\n");
6471 	netdev_err(dev, "<--- start TBDC dump --->\n");
6472 	netdev_err(dev, "TBDC free cnt: %ld\n",
6473 		   BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6474 	netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6475 	for (i = 0; i < 0x20; i++) {
6476 		int j = 0;
6477 
6478 		BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6479 		BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6480 			BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6481 		BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6482 		while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6483 			BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6484 			j++;
6485 
6486 		cid = BNX2_RD(bp, BNX2_TBDC_CID);
6487 		bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6488 		valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6489 		netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6490 			   i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6491 			   bdidx >> 24, (valid >> 8) & 0x0ff);
6492 	}
6493 	netdev_err(dev, "<--- end TBDC dump --->\n");
6494 }
6495 
6496 static void
6497 bnx2_dump_state(struct bnx2 *bp)
6498 {
6499 	struct net_device *dev = bp->dev;
6500 	u32 val1, val2;
6501 
6502 	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6503 	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6504 		   atomic_read(&bp->intr_sem), val1);
6505 	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6506 	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6507 	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6508 	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6509 		   BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6510 		   BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6511 	netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6512 		   BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6513 	netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6514 		   BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6515 	if (bp->flags & BNX2_FLAG_USING_MSIX)
6516 		netdev_err(dev, "DEBUG: PBA[%08x]\n",
6517 			   BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6518 }
6519 
6520 static void
6521 bnx2_tx_timeout(struct net_device *dev)
6522 {
6523 	struct bnx2 *bp = netdev_priv(dev);
6524 
6525 	bnx2_dump_ftq(bp);
6526 	bnx2_dump_state(bp);
6527 	bnx2_dump_mcp_state(bp);
6528 
6529 	/* This allows the netif to be shutdown gracefully before resetting */
6530 	schedule_work(&bp->reset_task);
6531 }
6532 
6533 /* Called with netif_tx_lock.
6534  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6535  * netif_wake_queue().
6536  */
6537 static netdev_tx_t
6538 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6539 {
6540 	struct bnx2 *bp = netdev_priv(dev);
6541 	dma_addr_t mapping;
6542 	struct bnx2_tx_bd *txbd;
6543 	struct bnx2_sw_tx_bd *tx_buf;
6544 	u32 len, vlan_tag_flags, last_frag, mss;
6545 	u16 prod, ring_prod;
6546 	int i;
6547 	struct bnx2_napi *bnapi;
6548 	struct bnx2_tx_ring_info *txr;
6549 	struct netdev_queue *txq;
6550 
6551 	/*  Determine which tx ring we will be placed on */
6552 	i = skb_get_queue_mapping(skb);
6553 	bnapi = &bp->bnx2_napi[i];
6554 	txr = &bnapi->tx_ring;
6555 	txq = netdev_get_tx_queue(dev, i);
6556 
6557 	if (unlikely(bnx2_tx_avail(bp, txr) <
6558 	    (skb_shinfo(skb)->nr_frags + 1))) {
6559 		netif_tx_stop_queue(txq);
6560 		netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6561 
6562 		return NETDEV_TX_BUSY;
6563 	}
6564 	len = skb_headlen(skb);
6565 	prod = txr->tx_prod;
6566 	ring_prod = BNX2_TX_RING_IDX(prod);
6567 
6568 	vlan_tag_flags = 0;
6569 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6570 		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6571 	}
6572 
6573 	if (vlan_tx_tag_present(skb)) {
6574 		vlan_tag_flags |=
6575 			(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6576 	}
6577 
6578 	if ((mss = skb_shinfo(skb)->gso_size)) {
6579 		u32 tcp_opt_len;
6580 		struct iphdr *iph;
6581 
6582 		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6583 
6584 		tcp_opt_len = tcp_optlen(skb);
6585 
6586 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6587 			u32 tcp_off = skb_transport_offset(skb) -
6588 				      sizeof(struct ipv6hdr) - ETH_HLEN;
6589 
6590 			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6591 					  TX_BD_FLAGS_SW_FLAGS;
6592 			if (likely(tcp_off == 0))
6593 				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6594 			else {
6595 				tcp_off >>= 3;
6596 				vlan_tag_flags |= ((tcp_off & 0x3) <<
6597 						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
6598 						  ((tcp_off & 0x10) <<
6599 						   TX_BD_FLAGS_TCP6_OFF4_SHL);
6600 				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6601 			}
6602 		} else {
6603 			iph = ip_hdr(skb);
6604 			if (tcp_opt_len || (iph->ihl > 5)) {
6605 				vlan_tag_flags |= ((iph->ihl - 5) +
6606 						   (tcp_opt_len >> 2)) << 8;
6607 			}
6608 		}
6609 	} else
6610 		mss = 0;
6611 
6612 	mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6613 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6614 		dev_kfree_skb(skb);
6615 		return NETDEV_TX_OK;
6616 	}
6617 
6618 	tx_buf = &txr->tx_buf_ring[ring_prod];
6619 	tx_buf->skb = skb;
6620 	dma_unmap_addr_set(tx_buf, mapping, mapping);
6621 
6622 	txbd = &txr->tx_desc_ring[ring_prod];
6623 
6624 	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6625 	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6626 	txbd->tx_bd_mss_nbytes = len | (mss << 16);
6627 	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6628 
6629 	last_frag = skb_shinfo(skb)->nr_frags;
6630 	tx_buf->nr_frags = last_frag;
6631 	tx_buf->is_gso = skb_is_gso(skb);
6632 
6633 	for (i = 0; i < last_frag; i++) {
6634 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6635 
6636 		prod = BNX2_NEXT_TX_BD(prod);
6637 		ring_prod = BNX2_TX_RING_IDX(prod);
6638 		txbd = &txr->tx_desc_ring[ring_prod];
6639 
6640 		len = skb_frag_size(frag);
6641 		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6642 					   DMA_TO_DEVICE);
6643 		if (dma_mapping_error(&bp->pdev->dev, mapping))
6644 			goto dma_error;
6645 		dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6646 				   mapping);
6647 
6648 		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6649 		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6650 		txbd->tx_bd_mss_nbytes = len | (mss << 16);
6651 		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6652 
6653 	}
6654 	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6655 
6656 	/* Sync BD data before updating TX mailbox */
6657 	wmb();
6658 
6659 	netdev_tx_sent_queue(txq, skb->len);
6660 
6661 	prod = BNX2_NEXT_TX_BD(prod);
6662 	txr->tx_prod_bseq += skb->len;
6663 
6664 	BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6665 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6666 
6667 	mmiowb();
6668 
6669 	txr->tx_prod = prod;
6670 
6671 	if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6672 		netif_tx_stop_queue(txq);
6673 
6674 		/* netif_tx_stop_queue() must be done before checking
6675 		 * tx index in bnx2_tx_avail() below, because in
6676 		 * bnx2_tx_int(), we update tx index before checking for
6677 		 * netif_tx_queue_stopped().
6678 		 */
6679 		smp_mb();
6680 		if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6681 			netif_tx_wake_queue(txq);
6682 	}
6683 
6684 	return NETDEV_TX_OK;
6685 dma_error:
6686 	/* save value of frag that failed */
6687 	last_frag = i;
6688 
6689 	/* start back at beginning and unmap skb */
6690 	prod = txr->tx_prod;
6691 	ring_prod = BNX2_TX_RING_IDX(prod);
6692 	tx_buf = &txr->tx_buf_ring[ring_prod];
6693 	tx_buf->skb = NULL;
6694 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6695 			 skb_headlen(skb), PCI_DMA_TODEVICE);
6696 
6697 	/* unmap remaining mapped pages */
6698 	for (i = 0; i < last_frag; i++) {
6699 		prod = BNX2_NEXT_TX_BD(prod);
6700 		ring_prod = BNX2_TX_RING_IDX(prod);
6701 		tx_buf = &txr->tx_buf_ring[ring_prod];
6702 		dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6703 			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6704 			       PCI_DMA_TODEVICE);
6705 	}
6706 
6707 	dev_kfree_skb(skb);
6708 	return NETDEV_TX_OK;
6709 }
6710 
6711 /* Called with rtnl_lock */
6712 static int
6713 bnx2_close(struct net_device *dev)
6714 {
6715 	struct bnx2 *bp = netdev_priv(dev);
6716 
6717 	bnx2_disable_int_sync(bp);
6718 	bnx2_napi_disable(bp);
6719 	netif_tx_disable(dev);
6720 	del_timer_sync(&bp->timer);
6721 	bnx2_shutdown_chip(bp);
6722 	bnx2_free_irq(bp);
6723 	bnx2_free_skbs(bp);
6724 	bnx2_free_mem(bp);
6725 	bnx2_del_napi(bp);
6726 	bp->link_up = 0;
6727 	netif_carrier_off(bp->dev);
6728 	return 0;
6729 }
6730 
6731 static void
6732 bnx2_save_stats(struct bnx2 *bp)
6733 {
6734 	u32 *hw_stats = (u32 *) bp->stats_blk;
6735 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6736 	int i;
6737 
6738 	/* The 1st 10 counters are 64-bit counters */
6739 	for (i = 0; i < 20; i += 2) {
6740 		u32 hi;
6741 		u64 lo;
6742 
6743 		hi = temp_stats[i] + hw_stats[i];
6744 		lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6745 		if (lo > 0xffffffff)
6746 			hi++;
6747 		temp_stats[i] = hi;
6748 		temp_stats[i + 1] = lo & 0xffffffff;
6749 	}
6750 
6751 	for ( ; i < sizeof(struct statistics_block) / 4; i++)
6752 		temp_stats[i] += hw_stats[i];
6753 }
6754 
6755 #define GET_64BIT_NET_STATS64(ctr)		\
6756 	(((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6757 
6758 #define GET_64BIT_NET_STATS(ctr)				\
6759 	GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +		\
6760 	GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6761 
6762 #define GET_32BIT_NET_STATS(ctr)				\
6763 	(unsigned long) (bp->stats_blk->ctr +			\
6764 			 bp->temp_stats_blk->ctr)
6765 
6766 static struct rtnl_link_stats64 *
6767 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6768 {
6769 	struct bnx2 *bp = netdev_priv(dev);
6770 
6771 	if (bp->stats_blk == NULL)
6772 		return net_stats;
6773 
6774 	net_stats->rx_packets =
6775 		GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6776 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6777 		GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6778 
6779 	net_stats->tx_packets =
6780 		GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6781 		GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6782 		GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6783 
6784 	net_stats->rx_bytes =
6785 		GET_64BIT_NET_STATS(stat_IfHCInOctets);
6786 
6787 	net_stats->tx_bytes =
6788 		GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6789 
6790 	net_stats->multicast =
6791 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6792 
6793 	net_stats->collisions =
6794 		GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6795 
6796 	net_stats->rx_length_errors =
6797 		GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6798 		GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6799 
6800 	net_stats->rx_over_errors =
6801 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6802 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6803 
6804 	net_stats->rx_frame_errors =
6805 		GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6806 
6807 	net_stats->rx_crc_errors =
6808 		GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6809 
6810 	net_stats->rx_errors = net_stats->rx_length_errors +
6811 		net_stats->rx_over_errors + net_stats->rx_frame_errors +
6812 		net_stats->rx_crc_errors;
6813 
6814 	net_stats->tx_aborted_errors =
6815 		GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6816 		GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6817 
6818 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6819 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6820 		net_stats->tx_carrier_errors = 0;
6821 	else {
6822 		net_stats->tx_carrier_errors =
6823 			GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6824 	}
6825 
6826 	net_stats->tx_errors =
6827 		GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6828 		net_stats->tx_aborted_errors +
6829 		net_stats->tx_carrier_errors;
6830 
6831 	net_stats->rx_missed_errors =
6832 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6833 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6834 		GET_32BIT_NET_STATS(stat_FwRxDrop);
6835 
6836 	return net_stats;
6837 }
6838 
6839 /* All ethtool functions called with rtnl_lock */
6840 
6841 static int
6842 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6843 {
6844 	struct bnx2 *bp = netdev_priv(dev);
6845 	int support_serdes = 0, support_copper = 0;
6846 
6847 	cmd->supported = SUPPORTED_Autoneg;
6848 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6849 		support_serdes = 1;
6850 		support_copper = 1;
6851 	} else if (bp->phy_port == PORT_FIBRE)
6852 		support_serdes = 1;
6853 	else
6854 		support_copper = 1;
6855 
6856 	if (support_serdes) {
6857 		cmd->supported |= SUPPORTED_1000baseT_Full |
6858 			SUPPORTED_FIBRE;
6859 		if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6860 			cmd->supported |= SUPPORTED_2500baseX_Full;
6861 
6862 	}
6863 	if (support_copper) {
6864 		cmd->supported |= SUPPORTED_10baseT_Half |
6865 			SUPPORTED_10baseT_Full |
6866 			SUPPORTED_100baseT_Half |
6867 			SUPPORTED_100baseT_Full |
6868 			SUPPORTED_1000baseT_Full |
6869 			SUPPORTED_TP;
6870 
6871 	}
6872 
6873 	spin_lock_bh(&bp->phy_lock);
6874 	cmd->port = bp->phy_port;
6875 	cmd->advertising = bp->advertising;
6876 
6877 	if (bp->autoneg & AUTONEG_SPEED) {
6878 		cmd->autoneg = AUTONEG_ENABLE;
6879 	} else {
6880 		cmd->autoneg = AUTONEG_DISABLE;
6881 	}
6882 
6883 	if (netif_carrier_ok(dev)) {
6884 		ethtool_cmd_speed_set(cmd, bp->line_speed);
6885 		cmd->duplex = bp->duplex;
6886 		if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6887 			if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6888 				cmd->eth_tp_mdix = ETH_TP_MDI_X;
6889 			else
6890 				cmd->eth_tp_mdix = ETH_TP_MDI;
6891 		}
6892 	}
6893 	else {
6894 		ethtool_cmd_speed_set(cmd, -1);
6895 		cmd->duplex = -1;
6896 	}
6897 	spin_unlock_bh(&bp->phy_lock);
6898 
6899 	cmd->transceiver = XCVR_INTERNAL;
6900 	cmd->phy_address = bp->phy_addr;
6901 
6902 	return 0;
6903 }
6904 
6905 static int
6906 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6907 {
6908 	struct bnx2 *bp = netdev_priv(dev);
6909 	u8 autoneg = bp->autoneg;
6910 	u8 req_duplex = bp->req_duplex;
6911 	u16 req_line_speed = bp->req_line_speed;
6912 	u32 advertising = bp->advertising;
6913 	int err = -EINVAL;
6914 
6915 	spin_lock_bh(&bp->phy_lock);
6916 
6917 	if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6918 		goto err_out_unlock;
6919 
6920 	if (cmd->port != bp->phy_port &&
6921 	    !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6922 		goto err_out_unlock;
6923 
6924 	/* If device is down, we can store the settings only if the user
6925 	 * is setting the currently active port.
6926 	 */
6927 	if (!netif_running(dev) && cmd->port != bp->phy_port)
6928 		goto err_out_unlock;
6929 
6930 	if (cmd->autoneg == AUTONEG_ENABLE) {
6931 		autoneg |= AUTONEG_SPEED;
6932 
6933 		advertising = cmd->advertising;
6934 		if (cmd->port == PORT_TP) {
6935 			advertising &= ETHTOOL_ALL_COPPER_SPEED;
6936 			if (!advertising)
6937 				advertising = ETHTOOL_ALL_COPPER_SPEED;
6938 		} else {
6939 			advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6940 			if (!advertising)
6941 				advertising = ETHTOOL_ALL_FIBRE_SPEED;
6942 		}
6943 		advertising |= ADVERTISED_Autoneg;
6944 	}
6945 	else {
6946 		u32 speed = ethtool_cmd_speed(cmd);
6947 		if (cmd->port == PORT_FIBRE) {
6948 			if ((speed != SPEED_1000 &&
6949 			     speed != SPEED_2500) ||
6950 			    (cmd->duplex != DUPLEX_FULL))
6951 				goto err_out_unlock;
6952 
6953 			if (speed == SPEED_2500 &&
6954 			    !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6955 				goto err_out_unlock;
6956 		} else if (speed == SPEED_1000 || speed == SPEED_2500)
6957 			goto err_out_unlock;
6958 
6959 		autoneg &= ~AUTONEG_SPEED;
6960 		req_line_speed = speed;
6961 		req_duplex = cmd->duplex;
6962 		advertising = 0;
6963 	}
6964 
6965 	bp->autoneg = autoneg;
6966 	bp->advertising = advertising;
6967 	bp->req_line_speed = req_line_speed;
6968 	bp->req_duplex = req_duplex;
6969 
6970 	err = 0;
6971 	/* If device is down, the new settings will be picked up when it is
6972 	 * brought up.
6973 	 */
6974 	if (netif_running(dev))
6975 		err = bnx2_setup_phy(bp, cmd->port);
6976 
6977 err_out_unlock:
6978 	spin_unlock_bh(&bp->phy_lock);
6979 
6980 	return err;
6981 }
6982 
6983 static void
6984 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6985 {
6986 	struct bnx2 *bp = netdev_priv(dev);
6987 
6988 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
6989 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
6990 	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
6991 	strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
6992 }
6993 
6994 #define BNX2_REGDUMP_LEN		(32 * 1024)
6995 
6996 static int
6997 bnx2_get_regs_len(struct net_device *dev)
6998 {
6999 	return BNX2_REGDUMP_LEN;
7000 }
7001 
7002 static void
7003 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7004 {
7005 	u32 *p = _p, i, offset;
7006 	u8 *orig_p = _p;
7007 	struct bnx2 *bp = netdev_priv(dev);
7008 	static const u32 reg_boundaries[] = {
7009 		0x0000, 0x0098, 0x0400, 0x045c,
7010 		0x0800, 0x0880, 0x0c00, 0x0c10,
7011 		0x0c30, 0x0d08, 0x1000, 0x101c,
7012 		0x1040, 0x1048, 0x1080, 0x10a4,
7013 		0x1400, 0x1490, 0x1498, 0x14f0,
7014 		0x1500, 0x155c, 0x1580, 0x15dc,
7015 		0x1600, 0x1658, 0x1680, 0x16d8,
7016 		0x1800, 0x1820, 0x1840, 0x1854,
7017 		0x1880, 0x1894, 0x1900, 0x1984,
7018 		0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7019 		0x1c80, 0x1c94, 0x1d00, 0x1d84,
7020 		0x2000, 0x2030, 0x23c0, 0x2400,
7021 		0x2800, 0x2820, 0x2830, 0x2850,
7022 		0x2b40, 0x2c10, 0x2fc0, 0x3058,
7023 		0x3c00, 0x3c94, 0x4000, 0x4010,
7024 		0x4080, 0x4090, 0x43c0, 0x4458,
7025 		0x4c00, 0x4c18, 0x4c40, 0x4c54,
7026 		0x4fc0, 0x5010, 0x53c0, 0x5444,
7027 		0x5c00, 0x5c18, 0x5c80, 0x5c90,
7028 		0x5fc0, 0x6000, 0x6400, 0x6428,
7029 		0x6800, 0x6848, 0x684c, 0x6860,
7030 		0x6888, 0x6910, 0x8000
7031 	};
7032 
7033 	regs->version = 0;
7034 
7035 	memset(p, 0, BNX2_REGDUMP_LEN);
7036 
7037 	if (!netif_running(bp->dev))
7038 		return;
7039 
7040 	i = 0;
7041 	offset = reg_boundaries[0];
7042 	p += offset;
7043 	while (offset < BNX2_REGDUMP_LEN) {
7044 		*p++ = BNX2_RD(bp, offset);
7045 		offset += 4;
7046 		if (offset == reg_boundaries[i + 1]) {
7047 			offset = reg_boundaries[i + 2];
7048 			p = (u32 *) (orig_p + offset);
7049 			i += 2;
7050 		}
7051 	}
7052 }
7053 
7054 static void
7055 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7056 {
7057 	struct bnx2 *bp = netdev_priv(dev);
7058 
7059 	if (bp->flags & BNX2_FLAG_NO_WOL) {
7060 		wol->supported = 0;
7061 		wol->wolopts = 0;
7062 	}
7063 	else {
7064 		wol->supported = WAKE_MAGIC;
7065 		if (bp->wol)
7066 			wol->wolopts = WAKE_MAGIC;
7067 		else
7068 			wol->wolopts = 0;
7069 	}
7070 	memset(&wol->sopass, 0, sizeof(wol->sopass));
7071 }
7072 
7073 static int
7074 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7075 {
7076 	struct bnx2 *bp = netdev_priv(dev);
7077 
7078 	if (wol->wolopts & ~WAKE_MAGIC)
7079 		return -EINVAL;
7080 
7081 	if (wol->wolopts & WAKE_MAGIC) {
7082 		if (bp->flags & BNX2_FLAG_NO_WOL)
7083 			return -EINVAL;
7084 
7085 		bp->wol = 1;
7086 	}
7087 	else {
7088 		bp->wol = 0;
7089 	}
7090 
7091 	device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7092 
7093 	return 0;
7094 }
7095 
7096 static int
7097 bnx2_nway_reset(struct net_device *dev)
7098 {
7099 	struct bnx2 *bp = netdev_priv(dev);
7100 	u32 bmcr;
7101 
7102 	if (!netif_running(dev))
7103 		return -EAGAIN;
7104 
7105 	if (!(bp->autoneg & AUTONEG_SPEED)) {
7106 		return -EINVAL;
7107 	}
7108 
7109 	spin_lock_bh(&bp->phy_lock);
7110 
7111 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7112 		int rc;
7113 
7114 		rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7115 		spin_unlock_bh(&bp->phy_lock);
7116 		return rc;
7117 	}
7118 
7119 	/* Force a link down visible on the other side */
7120 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7121 		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7122 		spin_unlock_bh(&bp->phy_lock);
7123 
7124 		msleep(20);
7125 
7126 		spin_lock_bh(&bp->phy_lock);
7127 
7128 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7129 		bp->serdes_an_pending = 1;
7130 		mod_timer(&bp->timer, jiffies + bp->current_interval);
7131 	}
7132 
7133 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7134 	bmcr &= ~BMCR_LOOPBACK;
7135 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7136 
7137 	spin_unlock_bh(&bp->phy_lock);
7138 
7139 	return 0;
7140 }
7141 
7142 static u32
7143 bnx2_get_link(struct net_device *dev)
7144 {
7145 	struct bnx2 *bp = netdev_priv(dev);
7146 
7147 	return bp->link_up;
7148 }
7149 
7150 static int
7151 bnx2_get_eeprom_len(struct net_device *dev)
7152 {
7153 	struct bnx2 *bp = netdev_priv(dev);
7154 
7155 	if (bp->flash_info == NULL)
7156 		return 0;
7157 
7158 	return (int) bp->flash_size;
7159 }
7160 
7161 static int
7162 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7163 		u8 *eebuf)
7164 {
7165 	struct bnx2 *bp = netdev_priv(dev);
7166 	int rc;
7167 
7168 	/* parameters already validated in ethtool_get_eeprom */
7169 
7170 	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7171 
7172 	return rc;
7173 }
7174 
7175 static int
7176 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7177 		u8 *eebuf)
7178 {
7179 	struct bnx2 *bp = netdev_priv(dev);
7180 	int rc;
7181 
7182 	/* parameters already validated in ethtool_set_eeprom */
7183 
7184 	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7185 
7186 	return rc;
7187 }
7188 
7189 static int
7190 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7191 {
7192 	struct bnx2 *bp = netdev_priv(dev);
7193 
7194 	memset(coal, 0, sizeof(struct ethtool_coalesce));
7195 
7196 	coal->rx_coalesce_usecs = bp->rx_ticks;
7197 	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7198 	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7199 	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7200 
7201 	coal->tx_coalesce_usecs = bp->tx_ticks;
7202 	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7203 	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7204 	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7205 
7206 	coal->stats_block_coalesce_usecs = bp->stats_ticks;
7207 
7208 	return 0;
7209 }
7210 
7211 static int
7212 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7213 {
7214 	struct bnx2 *bp = netdev_priv(dev);
7215 
7216 	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7217 	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7218 
7219 	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7220 	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7221 
7222 	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7223 	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7224 
7225 	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7226 	if (bp->rx_quick_cons_trip_int > 0xff)
7227 		bp->rx_quick_cons_trip_int = 0xff;
7228 
7229 	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7230 	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7231 
7232 	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7233 	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7234 
7235 	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7236 	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7237 
7238 	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7239 	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7240 		0xff;
7241 
7242 	bp->stats_ticks = coal->stats_block_coalesce_usecs;
7243 	if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7244 		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7245 			bp->stats_ticks = USEC_PER_SEC;
7246 	}
7247 	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7248 		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7249 	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7250 
7251 	if (netif_running(bp->dev)) {
7252 		bnx2_netif_stop(bp, true);
7253 		bnx2_init_nic(bp, 0);
7254 		bnx2_netif_start(bp, true);
7255 	}
7256 
7257 	return 0;
7258 }
7259 
7260 static void
7261 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7262 {
7263 	struct bnx2 *bp = netdev_priv(dev);
7264 
7265 	ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7266 	ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7267 
7268 	ering->rx_pending = bp->rx_ring_size;
7269 	ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7270 
7271 	ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7272 	ering->tx_pending = bp->tx_ring_size;
7273 }
7274 
7275 static int
7276 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7277 {
7278 	if (netif_running(bp->dev)) {
7279 		/* Reset will erase chipset stats; save them */
7280 		bnx2_save_stats(bp);
7281 
7282 		bnx2_netif_stop(bp, true);
7283 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7284 		if (reset_irq) {
7285 			bnx2_free_irq(bp);
7286 			bnx2_del_napi(bp);
7287 		} else {
7288 			__bnx2_free_irq(bp);
7289 		}
7290 		bnx2_free_skbs(bp);
7291 		bnx2_free_mem(bp);
7292 	}
7293 
7294 	bnx2_set_rx_ring_size(bp, rx);
7295 	bp->tx_ring_size = tx;
7296 
7297 	if (netif_running(bp->dev)) {
7298 		int rc = 0;
7299 
7300 		if (reset_irq) {
7301 			rc = bnx2_setup_int_mode(bp, disable_msi);
7302 			bnx2_init_napi(bp);
7303 		}
7304 
7305 		if (!rc)
7306 			rc = bnx2_alloc_mem(bp);
7307 
7308 		if (!rc)
7309 			rc = bnx2_request_irq(bp);
7310 
7311 		if (!rc)
7312 			rc = bnx2_init_nic(bp, 0);
7313 
7314 		if (rc) {
7315 			bnx2_napi_enable(bp);
7316 			dev_close(bp->dev);
7317 			return rc;
7318 		}
7319 #ifdef BCM_CNIC
7320 		mutex_lock(&bp->cnic_lock);
7321 		/* Let cnic know about the new status block. */
7322 		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7323 			bnx2_setup_cnic_irq_info(bp);
7324 		mutex_unlock(&bp->cnic_lock);
7325 #endif
7326 		bnx2_netif_start(bp, true);
7327 	}
7328 	return 0;
7329 }
7330 
7331 static int
7332 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7333 {
7334 	struct bnx2 *bp = netdev_priv(dev);
7335 	int rc;
7336 
7337 	if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7338 		(ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7339 		(ering->tx_pending <= MAX_SKB_FRAGS)) {
7340 
7341 		return -EINVAL;
7342 	}
7343 	rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7344 				   false);
7345 	return rc;
7346 }
7347 
7348 static void
7349 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7350 {
7351 	struct bnx2 *bp = netdev_priv(dev);
7352 
7353 	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7354 	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7355 	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7356 }
7357 
7358 static int
7359 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7360 {
7361 	struct bnx2 *bp = netdev_priv(dev);
7362 
7363 	bp->req_flow_ctrl = 0;
7364 	if (epause->rx_pause)
7365 		bp->req_flow_ctrl |= FLOW_CTRL_RX;
7366 	if (epause->tx_pause)
7367 		bp->req_flow_ctrl |= FLOW_CTRL_TX;
7368 
7369 	if (epause->autoneg) {
7370 		bp->autoneg |= AUTONEG_FLOW_CTRL;
7371 	}
7372 	else {
7373 		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7374 	}
7375 
7376 	if (netif_running(dev)) {
7377 		spin_lock_bh(&bp->phy_lock);
7378 		bnx2_setup_phy(bp, bp->phy_port);
7379 		spin_unlock_bh(&bp->phy_lock);
7380 	}
7381 
7382 	return 0;
7383 }
7384 
7385 static struct {
7386 	char string[ETH_GSTRING_LEN];
7387 } bnx2_stats_str_arr[] = {
7388 	{ "rx_bytes" },
7389 	{ "rx_error_bytes" },
7390 	{ "tx_bytes" },
7391 	{ "tx_error_bytes" },
7392 	{ "rx_ucast_packets" },
7393 	{ "rx_mcast_packets" },
7394 	{ "rx_bcast_packets" },
7395 	{ "tx_ucast_packets" },
7396 	{ "tx_mcast_packets" },
7397 	{ "tx_bcast_packets" },
7398 	{ "tx_mac_errors" },
7399 	{ "tx_carrier_errors" },
7400 	{ "rx_crc_errors" },
7401 	{ "rx_align_errors" },
7402 	{ "tx_single_collisions" },
7403 	{ "tx_multi_collisions" },
7404 	{ "tx_deferred" },
7405 	{ "tx_excess_collisions" },
7406 	{ "tx_late_collisions" },
7407 	{ "tx_total_collisions" },
7408 	{ "rx_fragments" },
7409 	{ "rx_jabbers" },
7410 	{ "rx_undersize_packets" },
7411 	{ "rx_oversize_packets" },
7412 	{ "rx_64_byte_packets" },
7413 	{ "rx_65_to_127_byte_packets" },
7414 	{ "rx_128_to_255_byte_packets" },
7415 	{ "rx_256_to_511_byte_packets" },
7416 	{ "rx_512_to_1023_byte_packets" },
7417 	{ "rx_1024_to_1522_byte_packets" },
7418 	{ "rx_1523_to_9022_byte_packets" },
7419 	{ "tx_64_byte_packets" },
7420 	{ "tx_65_to_127_byte_packets" },
7421 	{ "tx_128_to_255_byte_packets" },
7422 	{ "tx_256_to_511_byte_packets" },
7423 	{ "tx_512_to_1023_byte_packets" },
7424 	{ "tx_1024_to_1522_byte_packets" },
7425 	{ "tx_1523_to_9022_byte_packets" },
7426 	{ "rx_xon_frames" },
7427 	{ "rx_xoff_frames" },
7428 	{ "tx_xon_frames" },
7429 	{ "tx_xoff_frames" },
7430 	{ "rx_mac_ctrl_frames" },
7431 	{ "rx_filtered_packets" },
7432 	{ "rx_ftq_discards" },
7433 	{ "rx_discards" },
7434 	{ "rx_fw_discards" },
7435 };
7436 
7437 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7438 
7439 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7440 
7441 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7442     STATS_OFFSET32(stat_IfHCInOctets_hi),
7443     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7444     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7445     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7446     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7447     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7448     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7449     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7450     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7451     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7452     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7453     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7454     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7455     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7456     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7457     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7458     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7459     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7460     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7461     STATS_OFFSET32(stat_EtherStatsCollisions),
7462     STATS_OFFSET32(stat_EtherStatsFragments),
7463     STATS_OFFSET32(stat_EtherStatsJabbers),
7464     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7465     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7466     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7467     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7468     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7469     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7470     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7471     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7472     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7473     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7474     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7475     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7476     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7477     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7478     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7479     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7480     STATS_OFFSET32(stat_XonPauseFramesReceived),
7481     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7482     STATS_OFFSET32(stat_OutXonSent),
7483     STATS_OFFSET32(stat_OutXoffSent),
7484     STATS_OFFSET32(stat_MacControlFramesReceived),
7485     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7486     STATS_OFFSET32(stat_IfInFTQDiscards),
7487     STATS_OFFSET32(stat_IfInMBUFDiscards),
7488     STATS_OFFSET32(stat_FwRxDrop),
7489 };
7490 
7491 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7492  * skipped because of errata.
7493  */
7494 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7495 	8,0,8,8,8,8,8,8,8,8,
7496 	4,0,4,4,4,4,4,4,4,4,
7497 	4,4,4,4,4,4,4,4,4,4,
7498 	4,4,4,4,4,4,4,4,4,4,
7499 	4,4,4,4,4,4,4,
7500 };
7501 
7502 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7503 	8,0,8,8,8,8,8,8,8,8,
7504 	4,4,4,4,4,4,4,4,4,4,
7505 	4,4,4,4,4,4,4,4,4,4,
7506 	4,4,4,4,4,4,4,4,4,4,
7507 	4,4,4,4,4,4,4,
7508 };
7509 
7510 #define BNX2_NUM_TESTS 6
7511 
7512 static struct {
7513 	char string[ETH_GSTRING_LEN];
7514 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7515 	{ "register_test (offline)" },
7516 	{ "memory_test (offline)" },
7517 	{ "loopback_test (offline)" },
7518 	{ "nvram_test (online)" },
7519 	{ "interrupt_test (online)" },
7520 	{ "link_test (online)" },
7521 };
7522 
7523 static int
7524 bnx2_get_sset_count(struct net_device *dev, int sset)
7525 {
7526 	switch (sset) {
7527 	case ETH_SS_TEST:
7528 		return BNX2_NUM_TESTS;
7529 	case ETH_SS_STATS:
7530 		return BNX2_NUM_STATS;
7531 	default:
7532 		return -EOPNOTSUPP;
7533 	}
7534 }
7535 
7536 static void
7537 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7538 {
7539 	struct bnx2 *bp = netdev_priv(dev);
7540 
7541 	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7542 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
7543 		int i;
7544 
7545 		bnx2_netif_stop(bp, true);
7546 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7547 		bnx2_free_skbs(bp);
7548 
7549 		if (bnx2_test_registers(bp) != 0) {
7550 			buf[0] = 1;
7551 			etest->flags |= ETH_TEST_FL_FAILED;
7552 		}
7553 		if (bnx2_test_memory(bp) != 0) {
7554 			buf[1] = 1;
7555 			etest->flags |= ETH_TEST_FL_FAILED;
7556 		}
7557 		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7558 			etest->flags |= ETH_TEST_FL_FAILED;
7559 
7560 		if (!netif_running(bp->dev))
7561 			bnx2_shutdown_chip(bp);
7562 		else {
7563 			bnx2_init_nic(bp, 1);
7564 			bnx2_netif_start(bp, true);
7565 		}
7566 
7567 		/* wait for link up */
7568 		for (i = 0; i < 7; i++) {
7569 			if (bp->link_up)
7570 				break;
7571 			msleep_interruptible(1000);
7572 		}
7573 	}
7574 
7575 	if (bnx2_test_nvram(bp) != 0) {
7576 		buf[3] = 1;
7577 		etest->flags |= ETH_TEST_FL_FAILED;
7578 	}
7579 	if (bnx2_test_intr(bp) != 0) {
7580 		buf[4] = 1;
7581 		etest->flags |= ETH_TEST_FL_FAILED;
7582 	}
7583 
7584 	if (bnx2_test_link(bp) != 0) {
7585 		buf[5] = 1;
7586 		etest->flags |= ETH_TEST_FL_FAILED;
7587 
7588 	}
7589 }
7590 
7591 static void
7592 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7593 {
7594 	switch (stringset) {
7595 	case ETH_SS_STATS:
7596 		memcpy(buf, bnx2_stats_str_arr,
7597 			sizeof(bnx2_stats_str_arr));
7598 		break;
7599 	case ETH_SS_TEST:
7600 		memcpy(buf, bnx2_tests_str_arr,
7601 			sizeof(bnx2_tests_str_arr));
7602 		break;
7603 	}
7604 }
7605 
7606 static void
7607 bnx2_get_ethtool_stats(struct net_device *dev,
7608 		struct ethtool_stats *stats, u64 *buf)
7609 {
7610 	struct bnx2 *bp = netdev_priv(dev);
7611 	int i;
7612 	u32 *hw_stats = (u32 *) bp->stats_blk;
7613 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7614 	u8 *stats_len_arr = NULL;
7615 
7616 	if (hw_stats == NULL) {
7617 		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7618 		return;
7619 	}
7620 
7621 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7622 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7623 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7624 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7625 		stats_len_arr = bnx2_5706_stats_len_arr;
7626 	else
7627 		stats_len_arr = bnx2_5708_stats_len_arr;
7628 
7629 	for (i = 0; i < BNX2_NUM_STATS; i++) {
7630 		unsigned long offset;
7631 
7632 		if (stats_len_arr[i] == 0) {
7633 			/* skip this counter */
7634 			buf[i] = 0;
7635 			continue;
7636 		}
7637 
7638 		offset = bnx2_stats_offset_arr[i];
7639 		if (stats_len_arr[i] == 4) {
7640 			/* 4-byte counter */
7641 			buf[i] = (u64) *(hw_stats + offset) +
7642 				 *(temp_stats + offset);
7643 			continue;
7644 		}
7645 		/* 8-byte counter */
7646 		buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7647 			 *(hw_stats + offset + 1) +
7648 			 (((u64) *(temp_stats + offset)) << 32) +
7649 			 *(temp_stats + offset + 1);
7650 	}
7651 }
7652 
7653 static int
7654 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7655 {
7656 	struct bnx2 *bp = netdev_priv(dev);
7657 
7658 	switch (state) {
7659 	case ETHTOOL_ID_ACTIVE:
7660 		bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7661 		BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7662 		return 1;	/* cycle on/off once per second */
7663 
7664 	case ETHTOOL_ID_ON:
7665 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7666 			BNX2_EMAC_LED_1000MB_OVERRIDE |
7667 			BNX2_EMAC_LED_100MB_OVERRIDE |
7668 			BNX2_EMAC_LED_10MB_OVERRIDE |
7669 			BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7670 			BNX2_EMAC_LED_TRAFFIC);
7671 		break;
7672 
7673 	case ETHTOOL_ID_OFF:
7674 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7675 		break;
7676 
7677 	case ETHTOOL_ID_INACTIVE:
7678 		BNX2_WR(bp, BNX2_EMAC_LED, 0);
7679 		BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7680 		break;
7681 	}
7682 
7683 	return 0;
7684 }
7685 
7686 static netdev_features_t
7687 bnx2_fix_features(struct net_device *dev, netdev_features_t features)
7688 {
7689 	struct bnx2 *bp = netdev_priv(dev);
7690 
7691 	if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7692 		features |= NETIF_F_HW_VLAN_CTAG_RX;
7693 
7694 	return features;
7695 }
7696 
7697 static int
7698 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7699 {
7700 	struct bnx2 *bp = netdev_priv(dev);
7701 
7702 	/* TSO with VLAN tag won't work with current firmware */
7703 	if (features & NETIF_F_HW_VLAN_CTAG_TX)
7704 		dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7705 	else
7706 		dev->vlan_features &= ~NETIF_F_ALL_TSO;
7707 
7708 	if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7709 	    !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7710 	    netif_running(dev)) {
7711 		bnx2_netif_stop(bp, false);
7712 		dev->features = features;
7713 		bnx2_set_rx_mode(dev);
7714 		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7715 		bnx2_netif_start(bp, false);
7716 		return 1;
7717 	}
7718 
7719 	return 0;
7720 }
7721 
7722 static void bnx2_get_channels(struct net_device *dev,
7723 			      struct ethtool_channels *channels)
7724 {
7725 	struct bnx2 *bp = netdev_priv(dev);
7726 	u32 max_rx_rings = 1;
7727 	u32 max_tx_rings = 1;
7728 
7729 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7730 		max_rx_rings = RX_MAX_RINGS;
7731 		max_tx_rings = TX_MAX_RINGS;
7732 	}
7733 
7734 	channels->max_rx = max_rx_rings;
7735 	channels->max_tx = max_tx_rings;
7736 	channels->max_other = 0;
7737 	channels->max_combined = 0;
7738 	channels->rx_count = bp->num_rx_rings;
7739 	channels->tx_count = bp->num_tx_rings;
7740 	channels->other_count = 0;
7741 	channels->combined_count = 0;
7742 }
7743 
7744 static int bnx2_set_channels(struct net_device *dev,
7745 			      struct ethtool_channels *channels)
7746 {
7747 	struct bnx2 *bp = netdev_priv(dev);
7748 	u32 max_rx_rings = 1;
7749 	u32 max_tx_rings = 1;
7750 	int rc = 0;
7751 
7752 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7753 		max_rx_rings = RX_MAX_RINGS;
7754 		max_tx_rings = TX_MAX_RINGS;
7755 	}
7756 	if (channels->rx_count > max_rx_rings ||
7757 	    channels->tx_count > max_tx_rings)
7758 		return -EINVAL;
7759 
7760 	bp->num_req_rx_rings = channels->rx_count;
7761 	bp->num_req_tx_rings = channels->tx_count;
7762 
7763 	if (netif_running(dev))
7764 		rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7765 					   bp->tx_ring_size, true);
7766 
7767 	return rc;
7768 }
7769 
7770 static const struct ethtool_ops bnx2_ethtool_ops = {
7771 	.get_settings		= bnx2_get_settings,
7772 	.set_settings		= bnx2_set_settings,
7773 	.get_drvinfo		= bnx2_get_drvinfo,
7774 	.get_regs_len		= bnx2_get_regs_len,
7775 	.get_regs		= bnx2_get_regs,
7776 	.get_wol		= bnx2_get_wol,
7777 	.set_wol		= bnx2_set_wol,
7778 	.nway_reset		= bnx2_nway_reset,
7779 	.get_link		= bnx2_get_link,
7780 	.get_eeprom_len		= bnx2_get_eeprom_len,
7781 	.get_eeprom		= bnx2_get_eeprom,
7782 	.set_eeprom		= bnx2_set_eeprom,
7783 	.get_coalesce		= bnx2_get_coalesce,
7784 	.set_coalesce		= bnx2_set_coalesce,
7785 	.get_ringparam		= bnx2_get_ringparam,
7786 	.set_ringparam		= bnx2_set_ringparam,
7787 	.get_pauseparam		= bnx2_get_pauseparam,
7788 	.set_pauseparam		= bnx2_set_pauseparam,
7789 	.self_test		= bnx2_self_test,
7790 	.get_strings		= bnx2_get_strings,
7791 	.set_phys_id		= bnx2_set_phys_id,
7792 	.get_ethtool_stats	= bnx2_get_ethtool_stats,
7793 	.get_sset_count		= bnx2_get_sset_count,
7794 	.get_channels		= bnx2_get_channels,
7795 	.set_channels		= bnx2_set_channels,
7796 };
7797 
7798 /* Called with rtnl_lock */
7799 static int
7800 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7801 {
7802 	struct mii_ioctl_data *data = if_mii(ifr);
7803 	struct bnx2 *bp = netdev_priv(dev);
7804 	int err;
7805 
7806 	switch(cmd) {
7807 	case SIOCGMIIPHY:
7808 		data->phy_id = bp->phy_addr;
7809 
7810 		/* fallthru */
7811 	case SIOCGMIIREG: {
7812 		u32 mii_regval;
7813 
7814 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7815 			return -EOPNOTSUPP;
7816 
7817 		if (!netif_running(dev))
7818 			return -EAGAIN;
7819 
7820 		spin_lock_bh(&bp->phy_lock);
7821 		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7822 		spin_unlock_bh(&bp->phy_lock);
7823 
7824 		data->val_out = mii_regval;
7825 
7826 		return err;
7827 	}
7828 
7829 	case SIOCSMIIREG:
7830 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7831 			return -EOPNOTSUPP;
7832 
7833 		if (!netif_running(dev))
7834 			return -EAGAIN;
7835 
7836 		spin_lock_bh(&bp->phy_lock);
7837 		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7838 		spin_unlock_bh(&bp->phy_lock);
7839 
7840 		return err;
7841 
7842 	default:
7843 		/* do nothing */
7844 		break;
7845 	}
7846 	return -EOPNOTSUPP;
7847 }
7848 
7849 /* Called with rtnl_lock */
7850 static int
7851 bnx2_change_mac_addr(struct net_device *dev, void *p)
7852 {
7853 	struct sockaddr *addr = p;
7854 	struct bnx2 *bp = netdev_priv(dev);
7855 
7856 	if (!is_valid_ether_addr(addr->sa_data))
7857 		return -EADDRNOTAVAIL;
7858 
7859 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7860 	if (netif_running(dev))
7861 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7862 
7863 	return 0;
7864 }
7865 
7866 /* Called with rtnl_lock */
7867 static int
7868 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7869 {
7870 	struct bnx2 *bp = netdev_priv(dev);
7871 
7872 	if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7873 		((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7874 		return -EINVAL;
7875 
7876 	dev->mtu = new_mtu;
7877 	return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7878 				     false);
7879 }
7880 
7881 #ifdef CONFIG_NET_POLL_CONTROLLER
7882 static void
7883 poll_bnx2(struct net_device *dev)
7884 {
7885 	struct bnx2 *bp = netdev_priv(dev);
7886 	int i;
7887 
7888 	for (i = 0; i < bp->irq_nvecs; i++) {
7889 		struct bnx2_irq *irq = &bp->irq_tbl[i];
7890 
7891 		disable_irq(irq->vector);
7892 		irq->handler(irq->vector, &bp->bnx2_napi[i]);
7893 		enable_irq(irq->vector);
7894 	}
7895 }
7896 #endif
7897 
7898 static void
7899 bnx2_get_5709_media(struct bnx2 *bp)
7900 {
7901 	u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7902 	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7903 	u32 strap;
7904 
7905 	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7906 		return;
7907 	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7908 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7909 		return;
7910 	}
7911 
7912 	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7913 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7914 	else
7915 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7916 
7917 	if (bp->func == 0) {
7918 		switch (strap) {
7919 		case 0x4:
7920 		case 0x5:
7921 		case 0x6:
7922 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7923 			return;
7924 		}
7925 	} else {
7926 		switch (strap) {
7927 		case 0x1:
7928 		case 0x2:
7929 		case 0x4:
7930 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7931 			return;
7932 		}
7933 	}
7934 }
7935 
7936 static void
7937 bnx2_get_pci_speed(struct bnx2 *bp)
7938 {
7939 	u32 reg;
7940 
7941 	reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7942 	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7943 		u32 clkreg;
7944 
7945 		bp->flags |= BNX2_FLAG_PCIX;
7946 
7947 		clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7948 
7949 		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7950 		switch (clkreg) {
7951 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7952 			bp->bus_speed_mhz = 133;
7953 			break;
7954 
7955 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7956 			bp->bus_speed_mhz = 100;
7957 			break;
7958 
7959 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7960 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7961 			bp->bus_speed_mhz = 66;
7962 			break;
7963 
7964 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7965 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7966 			bp->bus_speed_mhz = 50;
7967 			break;
7968 
7969 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7970 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7971 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7972 			bp->bus_speed_mhz = 33;
7973 			break;
7974 		}
7975 	}
7976 	else {
7977 		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7978 			bp->bus_speed_mhz = 66;
7979 		else
7980 			bp->bus_speed_mhz = 33;
7981 	}
7982 
7983 	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7984 		bp->flags |= BNX2_FLAG_PCI_32BIT;
7985 
7986 }
7987 
7988 static void
7989 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7990 {
7991 	int rc, i, j;
7992 	u8 *data;
7993 	unsigned int block_end, rosize, len;
7994 
7995 #define BNX2_VPD_NVRAM_OFFSET	0x300
7996 #define BNX2_VPD_LEN		128
7997 #define BNX2_MAX_VER_SLEN	30
7998 
7999 	data = kmalloc(256, GFP_KERNEL);
8000 	if (!data)
8001 		return;
8002 
8003 	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
8004 			     BNX2_VPD_LEN);
8005 	if (rc)
8006 		goto vpd_done;
8007 
8008 	for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8009 		data[i] = data[i + BNX2_VPD_LEN + 3];
8010 		data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8011 		data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8012 		data[i + 3] = data[i + BNX2_VPD_LEN];
8013 	}
8014 
8015 	i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8016 	if (i < 0)
8017 		goto vpd_done;
8018 
8019 	rosize = pci_vpd_lrdt_size(&data[i]);
8020 	i += PCI_VPD_LRDT_TAG_SIZE;
8021 	block_end = i + rosize;
8022 
8023 	if (block_end > BNX2_VPD_LEN)
8024 		goto vpd_done;
8025 
8026 	j = pci_vpd_find_info_keyword(data, i, rosize,
8027 				      PCI_VPD_RO_KEYWORD_MFR_ID);
8028 	if (j < 0)
8029 		goto vpd_done;
8030 
8031 	len = pci_vpd_info_field_size(&data[j]);
8032 
8033 	j += PCI_VPD_INFO_FLD_HDR_SIZE;
8034 	if (j + len > block_end || len != 4 ||
8035 	    memcmp(&data[j], "1028", 4))
8036 		goto vpd_done;
8037 
8038 	j = pci_vpd_find_info_keyword(data, i, rosize,
8039 				      PCI_VPD_RO_KEYWORD_VENDOR0);
8040 	if (j < 0)
8041 		goto vpd_done;
8042 
8043 	len = pci_vpd_info_field_size(&data[j]);
8044 
8045 	j += PCI_VPD_INFO_FLD_HDR_SIZE;
8046 	if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8047 		goto vpd_done;
8048 
8049 	memcpy(bp->fw_version, &data[j], len);
8050 	bp->fw_version[len] = ' ';
8051 
8052 vpd_done:
8053 	kfree(data);
8054 }
8055 
8056 static int
8057 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8058 {
8059 	struct bnx2 *bp;
8060 	int rc, i, j;
8061 	u32 reg;
8062 	u64 dma_mask, persist_dma_mask;
8063 	int err;
8064 
8065 	SET_NETDEV_DEV(dev, &pdev->dev);
8066 	bp = netdev_priv(dev);
8067 
8068 	bp->flags = 0;
8069 	bp->phy_flags = 0;
8070 
8071 	bp->temp_stats_blk =
8072 		kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8073 
8074 	if (bp->temp_stats_blk == NULL) {
8075 		rc = -ENOMEM;
8076 		goto err_out;
8077 	}
8078 
8079 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
8080 	rc = pci_enable_device(pdev);
8081 	if (rc) {
8082 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8083 		goto err_out;
8084 	}
8085 
8086 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8087 		dev_err(&pdev->dev,
8088 			"Cannot find PCI device base address, aborting\n");
8089 		rc = -ENODEV;
8090 		goto err_out_disable;
8091 	}
8092 
8093 	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8094 	if (rc) {
8095 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8096 		goto err_out_disable;
8097 	}
8098 
8099 	pci_set_master(pdev);
8100 
8101 	bp->pm_cap = pdev->pm_cap;
8102 	if (bp->pm_cap == 0) {
8103 		dev_err(&pdev->dev,
8104 			"Cannot find power management capability, aborting\n");
8105 		rc = -EIO;
8106 		goto err_out_release;
8107 	}
8108 
8109 	bp->dev = dev;
8110 	bp->pdev = pdev;
8111 
8112 	spin_lock_init(&bp->phy_lock);
8113 	spin_lock_init(&bp->indirect_lock);
8114 #ifdef BCM_CNIC
8115 	mutex_init(&bp->cnic_lock);
8116 #endif
8117 	INIT_WORK(&bp->reset_task, bnx2_reset_task);
8118 
8119 	bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8120 							 TX_MAX_TSS_RINGS + 1));
8121 	if (!bp->regview) {
8122 		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8123 		rc = -ENOMEM;
8124 		goto err_out_release;
8125 	}
8126 
8127 	/* Configure byte swap and enable write to the reg_window registers.
8128 	 * Rely on CPU to do target byte swapping on big endian systems
8129 	 * The chip's target access swapping will not swap all accesses
8130 	 */
8131 	BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8132 		BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8133 		BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8134 
8135 	bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8136 
8137 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8138 		if (!pci_is_pcie(pdev)) {
8139 			dev_err(&pdev->dev, "Not PCIE, aborting\n");
8140 			rc = -EIO;
8141 			goto err_out_unmap;
8142 		}
8143 		bp->flags |= BNX2_FLAG_PCIE;
8144 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8145 			bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8146 
8147 		/* AER (Advanced Error Reporting) hooks */
8148 		err = pci_enable_pcie_error_reporting(pdev);
8149 		if (!err)
8150 			bp->flags |= BNX2_FLAG_AER_ENABLED;
8151 
8152 	} else {
8153 		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8154 		if (bp->pcix_cap == 0) {
8155 			dev_err(&pdev->dev,
8156 				"Cannot find PCIX capability, aborting\n");
8157 			rc = -EIO;
8158 			goto err_out_unmap;
8159 		}
8160 		bp->flags |= BNX2_FLAG_BROKEN_STATS;
8161 	}
8162 
8163 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8164 	    BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8165 		if (pdev->msix_cap)
8166 			bp->flags |= BNX2_FLAG_MSIX_CAP;
8167 	}
8168 
8169 	if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8170 	    BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8171 		if (pdev->msi_cap)
8172 			bp->flags |= BNX2_FLAG_MSI_CAP;
8173 	}
8174 
8175 	/* 5708 cannot support DMA addresses > 40-bit.  */
8176 	if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8177 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8178 	else
8179 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8180 
8181 	/* Configure DMA attributes. */
8182 	if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8183 		dev->features |= NETIF_F_HIGHDMA;
8184 		rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8185 		if (rc) {
8186 			dev_err(&pdev->dev,
8187 				"pci_set_consistent_dma_mask failed, aborting\n");
8188 			goto err_out_unmap;
8189 		}
8190 	} else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8191 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8192 		goto err_out_unmap;
8193 	}
8194 
8195 	if (!(bp->flags & BNX2_FLAG_PCIE))
8196 		bnx2_get_pci_speed(bp);
8197 
8198 	/* 5706A0 may falsely detect SERR and PERR. */
8199 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8200 		reg = BNX2_RD(bp, PCI_COMMAND);
8201 		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8202 		BNX2_WR(bp, PCI_COMMAND, reg);
8203 	} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8204 		!(bp->flags & BNX2_FLAG_PCIX)) {
8205 
8206 		dev_err(&pdev->dev,
8207 			"5706 A1 can only be used in a PCIX bus, aborting\n");
8208 		goto err_out_unmap;
8209 	}
8210 
8211 	bnx2_init_nvram(bp);
8212 
8213 	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8214 
8215 	if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8216 		bp->func = 1;
8217 
8218 	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8219 	    BNX2_SHM_HDR_SIGNATURE_SIG) {
8220 		u32 off = bp->func << 2;
8221 
8222 		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8223 	} else
8224 		bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8225 
8226 	/* Get the permanent MAC address.  First we need to make sure the
8227 	 * firmware is actually running.
8228 	 */
8229 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8230 
8231 	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8232 	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8233 		dev_err(&pdev->dev, "Firmware not running, aborting\n");
8234 		rc = -ENODEV;
8235 		goto err_out_unmap;
8236 	}
8237 
8238 	bnx2_read_vpd_fw_ver(bp);
8239 
8240 	j = strlen(bp->fw_version);
8241 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8242 	for (i = 0; i < 3 && j < 24; i++) {
8243 		u8 num, k, skip0;
8244 
8245 		if (i == 0) {
8246 			bp->fw_version[j++] = 'b';
8247 			bp->fw_version[j++] = 'c';
8248 			bp->fw_version[j++] = ' ';
8249 		}
8250 		num = (u8) (reg >> (24 - (i * 8)));
8251 		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8252 			if (num >= k || !skip0 || k == 1) {
8253 				bp->fw_version[j++] = (num / k) + '0';
8254 				skip0 = 0;
8255 			}
8256 		}
8257 		if (i != 2)
8258 			bp->fw_version[j++] = '.';
8259 	}
8260 	reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8261 	if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8262 		bp->wol = 1;
8263 
8264 	if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8265 		bp->flags |= BNX2_FLAG_ASF_ENABLE;
8266 
8267 		for (i = 0; i < 30; i++) {
8268 			reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8269 			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8270 				break;
8271 			msleep(10);
8272 		}
8273 	}
8274 	reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8275 	reg &= BNX2_CONDITION_MFW_RUN_MASK;
8276 	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8277 	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
8278 		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8279 
8280 		if (j < 32)
8281 			bp->fw_version[j++] = ' ';
8282 		for (i = 0; i < 3 && j < 28; i++) {
8283 			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8284 			reg = be32_to_cpu(reg);
8285 			memcpy(&bp->fw_version[j], &reg, 4);
8286 			j += 4;
8287 		}
8288 	}
8289 
8290 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8291 	bp->mac_addr[0] = (u8) (reg >> 8);
8292 	bp->mac_addr[1] = (u8) reg;
8293 
8294 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8295 	bp->mac_addr[2] = (u8) (reg >> 24);
8296 	bp->mac_addr[3] = (u8) (reg >> 16);
8297 	bp->mac_addr[4] = (u8) (reg >> 8);
8298 	bp->mac_addr[5] = (u8) reg;
8299 
8300 	bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8301 	bnx2_set_rx_ring_size(bp, 255);
8302 
8303 	bp->tx_quick_cons_trip_int = 2;
8304 	bp->tx_quick_cons_trip = 20;
8305 	bp->tx_ticks_int = 18;
8306 	bp->tx_ticks = 80;
8307 
8308 	bp->rx_quick_cons_trip_int = 2;
8309 	bp->rx_quick_cons_trip = 12;
8310 	bp->rx_ticks_int = 18;
8311 	bp->rx_ticks = 18;
8312 
8313 	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8314 
8315 	bp->current_interval = BNX2_TIMER_INTERVAL;
8316 
8317 	bp->phy_addr = 1;
8318 
8319 	/* Disable WOL support if we are running on a SERDES chip. */
8320 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8321 		bnx2_get_5709_media(bp);
8322 	else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8323 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8324 
8325 	bp->phy_port = PORT_TP;
8326 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8327 		bp->phy_port = PORT_FIBRE;
8328 		reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8329 		if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8330 			bp->flags |= BNX2_FLAG_NO_WOL;
8331 			bp->wol = 0;
8332 		}
8333 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8334 			/* Don't do parallel detect on this board because of
8335 			 * some board problems.  The link will not go down
8336 			 * if we do parallel detect.
8337 			 */
8338 			if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8339 			    pdev->subsystem_device == 0x310c)
8340 				bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8341 		} else {
8342 			bp->phy_addr = 2;
8343 			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8344 				bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8345 		}
8346 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8347 		   BNX2_CHIP(bp) == BNX2_CHIP_5708)
8348 		bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8349 	else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8350 		 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8351 		  BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8352 		bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8353 
8354 	bnx2_init_fw_cap(bp);
8355 
8356 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8357 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8358 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8359 	    !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8360 		bp->flags |= BNX2_FLAG_NO_WOL;
8361 		bp->wol = 0;
8362 	}
8363 
8364 	if (bp->flags & BNX2_FLAG_NO_WOL)
8365 		device_set_wakeup_capable(&bp->pdev->dev, false);
8366 	else
8367 		device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8368 
8369 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8370 		bp->tx_quick_cons_trip_int =
8371 			bp->tx_quick_cons_trip;
8372 		bp->tx_ticks_int = bp->tx_ticks;
8373 		bp->rx_quick_cons_trip_int =
8374 			bp->rx_quick_cons_trip;
8375 		bp->rx_ticks_int = bp->rx_ticks;
8376 		bp->comp_prod_trip_int = bp->comp_prod_trip;
8377 		bp->com_ticks_int = bp->com_ticks;
8378 		bp->cmd_ticks_int = bp->cmd_ticks;
8379 	}
8380 
8381 	/* Disable MSI on 5706 if AMD 8132 bridge is found.
8382 	 *
8383 	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8384 	 * with byte enables disabled on the unused 32-bit word.  This is legal
8385 	 * but causes problems on the AMD 8132 which will eventually stop
8386 	 * responding after a while.
8387 	 *
8388 	 * AMD believes this incompatibility is unique to the 5706, and
8389 	 * prefers to locally disable MSI rather than globally disabling it.
8390 	 */
8391 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8392 		struct pci_dev *amd_8132 = NULL;
8393 
8394 		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8395 						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
8396 						  amd_8132))) {
8397 
8398 			if (amd_8132->revision >= 0x10 &&
8399 			    amd_8132->revision <= 0x13) {
8400 				disable_msi = 1;
8401 				pci_dev_put(amd_8132);
8402 				break;
8403 			}
8404 		}
8405 	}
8406 
8407 	bnx2_set_default_link(bp);
8408 	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8409 
8410 	init_timer(&bp->timer);
8411 	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8412 	bp->timer.data = (unsigned long) bp;
8413 	bp->timer.function = bnx2_timer;
8414 
8415 #ifdef BCM_CNIC
8416 	if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8417 		bp->cnic_eth_dev.max_iscsi_conn =
8418 			(bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8419 			 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8420 	bp->cnic_probe = bnx2_cnic_probe;
8421 #endif
8422 	pci_save_state(pdev);
8423 
8424 	return 0;
8425 
8426 err_out_unmap:
8427 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8428 		pci_disable_pcie_error_reporting(pdev);
8429 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8430 	}
8431 
8432 	pci_iounmap(pdev, bp->regview);
8433 	bp->regview = NULL;
8434 
8435 err_out_release:
8436 	pci_release_regions(pdev);
8437 
8438 err_out_disable:
8439 	pci_disable_device(pdev);
8440 
8441 err_out:
8442 	return rc;
8443 }
8444 
8445 static char *
8446 bnx2_bus_string(struct bnx2 *bp, char *str)
8447 {
8448 	char *s = str;
8449 
8450 	if (bp->flags & BNX2_FLAG_PCIE) {
8451 		s += sprintf(s, "PCI Express");
8452 	} else {
8453 		s += sprintf(s, "PCI");
8454 		if (bp->flags & BNX2_FLAG_PCIX)
8455 			s += sprintf(s, "-X");
8456 		if (bp->flags & BNX2_FLAG_PCI_32BIT)
8457 			s += sprintf(s, " 32-bit");
8458 		else
8459 			s += sprintf(s, " 64-bit");
8460 		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8461 	}
8462 	return str;
8463 }
8464 
8465 static void
8466 bnx2_del_napi(struct bnx2 *bp)
8467 {
8468 	int i;
8469 
8470 	for (i = 0; i < bp->irq_nvecs; i++)
8471 		netif_napi_del(&bp->bnx2_napi[i].napi);
8472 }
8473 
8474 static void
8475 bnx2_init_napi(struct bnx2 *bp)
8476 {
8477 	int i;
8478 
8479 	for (i = 0; i < bp->irq_nvecs; i++) {
8480 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8481 		int (*poll)(struct napi_struct *, int);
8482 
8483 		if (i == 0)
8484 			poll = bnx2_poll;
8485 		else
8486 			poll = bnx2_poll_msix;
8487 
8488 		netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8489 		bnapi->bp = bp;
8490 	}
8491 }
8492 
8493 static const struct net_device_ops bnx2_netdev_ops = {
8494 	.ndo_open		= bnx2_open,
8495 	.ndo_start_xmit		= bnx2_start_xmit,
8496 	.ndo_stop		= bnx2_close,
8497 	.ndo_get_stats64	= bnx2_get_stats64,
8498 	.ndo_set_rx_mode	= bnx2_set_rx_mode,
8499 	.ndo_do_ioctl		= bnx2_ioctl,
8500 	.ndo_validate_addr	= eth_validate_addr,
8501 	.ndo_set_mac_address	= bnx2_change_mac_addr,
8502 	.ndo_change_mtu		= bnx2_change_mtu,
8503 	.ndo_fix_features	= bnx2_fix_features,
8504 	.ndo_set_features	= bnx2_set_features,
8505 	.ndo_tx_timeout		= bnx2_tx_timeout,
8506 #ifdef CONFIG_NET_POLL_CONTROLLER
8507 	.ndo_poll_controller	= poll_bnx2,
8508 #endif
8509 };
8510 
8511 static int
8512 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8513 {
8514 	static int version_printed = 0;
8515 	struct net_device *dev;
8516 	struct bnx2 *bp;
8517 	int rc;
8518 	char str[40];
8519 
8520 	if (version_printed++ == 0)
8521 		pr_info("%s", version);
8522 
8523 	/* dev zeroed in init_etherdev */
8524 	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8525 	if (!dev)
8526 		return -ENOMEM;
8527 
8528 	rc = bnx2_init_board(pdev, dev);
8529 	if (rc < 0)
8530 		goto err_free;
8531 
8532 	dev->netdev_ops = &bnx2_netdev_ops;
8533 	dev->watchdog_timeo = TX_TIMEOUT;
8534 	dev->ethtool_ops = &bnx2_ethtool_ops;
8535 
8536 	bp = netdev_priv(dev);
8537 
8538 	pci_set_drvdata(pdev, dev);
8539 
8540 	memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8541 
8542 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8543 		NETIF_F_TSO | NETIF_F_TSO_ECN |
8544 		NETIF_F_RXHASH | NETIF_F_RXCSUM;
8545 
8546 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8547 		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8548 
8549 	dev->vlan_features = dev->hw_features;
8550 	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8551 	dev->features |= dev->hw_features;
8552 	dev->priv_flags |= IFF_UNICAST_FLT;
8553 
8554 	if ((rc = register_netdev(dev))) {
8555 		dev_err(&pdev->dev, "Cannot register net device\n");
8556 		goto error;
8557 	}
8558 
8559 	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8560 		    "node addr %pM\n", board_info[ent->driver_data].name,
8561 		    ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8562 		    ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8563 		    bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8564 		    pdev->irq, dev->dev_addr);
8565 
8566 	return 0;
8567 
8568 error:
8569 	pci_iounmap(pdev, bp->regview);
8570 	pci_release_regions(pdev);
8571 	pci_disable_device(pdev);
8572 err_free:
8573 	free_netdev(dev);
8574 	return rc;
8575 }
8576 
8577 static void
8578 bnx2_remove_one(struct pci_dev *pdev)
8579 {
8580 	struct net_device *dev = pci_get_drvdata(pdev);
8581 	struct bnx2 *bp = netdev_priv(dev);
8582 
8583 	unregister_netdev(dev);
8584 
8585 	del_timer_sync(&bp->timer);
8586 	cancel_work_sync(&bp->reset_task);
8587 
8588 	pci_iounmap(bp->pdev, bp->regview);
8589 
8590 	kfree(bp->temp_stats_blk);
8591 
8592 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8593 		pci_disable_pcie_error_reporting(pdev);
8594 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8595 	}
8596 
8597 	bnx2_release_firmware(bp);
8598 
8599 	free_netdev(dev);
8600 
8601 	pci_release_regions(pdev);
8602 	pci_disable_device(pdev);
8603 }
8604 
8605 static int
8606 bnx2_suspend(struct device *device)
8607 {
8608 	struct pci_dev *pdev = to_pci_dev(device);
8609 	struct net_device *dev = pci_get_drvdata(pdev);
8610 	struct bnx2 *bp = netdev_priv(dev);
8611 
8612 	if (netif_running(dev)) {
8613 		cancel_work_sync(&bp->reset_task);
8614 		bnx2_netif_stop(bp, true);
8615 		netif_device_detach(dev);
8616 		del_timer_sync(&bp->timer);
8617 		bnx2_shutdown_chip(bp);
8618 		__bnx2_free_irq(bp);
8619 		bnx2_free_skbs(bp);
8620 	}
8621 	bnx2_setup_wol(bp);
8622 	return 0;
8623 }
8624 
8625 static int
8626 bnx2_resume(struct device *device)
8627 {
8628 	struct pci_dev *pdev = to_pci_dev(device);
8629 	struct net_device *dev = pci_get_drvdata(pdev);
8630 	struct bnx2 *bp = netdev_priv(dev);
8631 
8632 	if (!netif_running(dev))
8633 		return 0;
8634 
8635 	bnx2_set_power_state(bp, PCI_D0);
8636 	netif_device_attach(dev);
8637 	bnx2_request_irq(bp);
8638 	bnx2_init_nic(bp, 1);
8639 	bnx2_netif_start(bp, true);
8640 	return 0;
8641 }
8642 
8643 #ifdef CONFIG_PM_SLEEP
8644 static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8645 #define BNX2_PM_OPS (&bnx2_pm_ops)
8646 
8647 #else
8648 
8649 #define BNX2_PM_OPS NULL
8650 
8651 #endif /* CONFIG_PM_SLEEP */
8652 /**
8653  * bnx2_io_error_detected - called when PCI error is detected
8654  * @pdev: Pointer to PCI device
8655  * @state: The current pci connection state
8656  *
8657  * This function is called after a PCI bus error affecting
8658  * this device has been detected.
8659  */
8660 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8661 					       pci_channel_state_t state)
8662 {
8663 	struct net_device *dev = pci_get_drvdata(pdev);
8664 	struct bnx2 *bp = netdev_priv(dev);
8665 
8666 	rtnl_lock();
8667 	netif_device_detach(dev);
8668 
8669 	if (state == pci_channel_io_perm_failure) {
8670 		rtnl_unlock();
8671 		return PCI_ERS_RESULT_DISCONNECT;
8672 	}
8673 
8674 	if (netif_running(dev)) {
8675 		bnx2_netif_stop(bp, true);
8676 		del_timer_sync(&bp->timer);
8677 		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8678 	}
8679 
8680 	pci_disable_device(pdev);
8681 	rtnl_unlock();
8682 
8683 	/* Request a slot slot reset. */
8684 	return PCI_ERS_RESULT_NEED_RESET;
8685 }
8686 
8687 /**
8688  * bnx2_io_slot_reset - called after the pci bus has been reset.
8689  * @pdev: Pointer to PCI device
8690  *
8691  * Restart the card from scratch, as if from a cold-boot.
8692  */
8693 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8694 {
8695 	struct net_device *dev = pci_get_drvdata(pdev);
8696 	struct bnx2 *bp = netdev_priv(dev);
8697 	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8698 	int err = 0;
8699 
8700 	rtnl_lock();
8701 	if (pci_enable_device(pdev)) {
8702 		dev_err(&pdev->dev,
8703 			"Cannot re-enable PCI device after reset\n");
8704 	} else {
8705 		pci_set_master(pdev);
8706 		pci_restore_state(pdev);
8707 		pci_save_state(pdev);
8708 
8709 		if (netif_running(dev))
8710 			err = bnx2_init_nic(bp, 1);
8711 
8712 		if (!err)
8713 			result = PCI_ERS_RESULT_RECOVERED;
8714 	}
8715 
8716 	if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8717 		bnx2_napi_enable(bp);
8718 		dev_close(dev);
8719 	}
8720 	rtnl_unlock();
8721 
8722 	if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8723 		return result;
8724 
8725 	err = pci_cleanup_aer_uncorrect_error_status(pdev);
8726 	if (err) {
8727 		dev_err(&pdev->dev,
8728 			"pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8729 			 err); /* non-fatal, continue */
8730 	}
8731 
8732 	return result;
8733 }
8734 
8735 /**
8736  * bnx2_io_resume - called when traffic can start flowing again.
8737  * @pdev: Pointer to PCI device
8738  *
8739  * This callback is called when the error recovery driver tells us that
8740  * its OK to resume normal operation.
8741  */
8742 static void bnx2_io_resume(struct pci_dev *pdev)
8743 {
8744 	struct net_device *dev = pci_get_drvdata(pdev);
8745 	struct bnx2 *bp = netdev_priv(dev);
8746 
8747 	rtnl_lock();
8748 	if (netif_running(dev))
8749 		bnx2_netif_start(bp, true);
8750 
8751 	netif_device_attach(dev);
8752 	rtnl_unlock();
8753 }
8754 
8755 static void bnx2_shutdown(struct pci_dev *pdev)
8756 {
8757 	struct net_device *dev = pci_get_drvdata(pdev);
8758 	struct bnx2 *bp;
8759 
8760 	if (!dev)
8761 		return;
8762 
8763 	bp = netdev_priv(dev);
8764 	if (!bp)
8765 		return;
8766 
8767 	rtnl_lock();
8768 	if (netif_running(dev))
8769 		dev_close(bp->dev);
8770 
8771 	if (system_state == SYSTEM_POWER_OFF)
8772 		bnx2_set_power_state(bp, PCI_D3hot);
8773 
8774 	rtnl_unlock();
8775 }
8776 
8777 static const struct pci_error_handlers bnx2_err_handler = {
8778 	.error_detected	= bnx2_io_error_detected,
8779 	.slot_reset	= bnx2_io_slot_reset,
8780 	.resume		= bnx2_io_resume,
8781 };
8782 
8783 static struct pci_driver bnx2_pci_driver = {
8784 	.name		= DRV_MODULE_NAME,
8785 	.id_table	= bnx2_pci_tbl,
8786 	.probe		= bnx2_init_one,
8787 	.remove		= bnx2_remove_one,
8788 	.driver.pm	= BNX2_PM_OPS,
8789 	.err_handler	= &bnx2_err_handler,
8790 	.shutdown	= bnx2_shutdown,
8791 };
8792 
8793 module_pci_driver(bnx2_pci_driver);
8794