1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2011 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 
17 #include <linux/stringify.h>
18 #include <linux/kernel.h>
19 #include <linux/timer.h>
20 #include <linux/errno.h>
21 #include <linux/ioport.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/interrupt.h>
25 #include <linux/pci.h>
26 #include <linux/init.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52 
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59 
60 #define DRV_MODULE_NAME		"bnx2"
61 #define DRV_MODULE_VERSION	"2.2.3"
62 #define DRV_MODULE_RELDATE	"June 27, 2012"
63 #define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.3.fw"
64 #define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
65 #define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1b.fw"
66 #define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-6.0.17.fw"
67 #define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-6.0.17.fw"
68 
69 #define RUN_AT(x) (jiffies + (x))
70 
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT  (5*HZ)
73 
74 static char version[] =
75 	"Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86 
87 static int disable_msi = 0;
88 
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91 
92 typedef enum {
93 	BCM5706 = 0,
94 	NC370T,
95 	NC370I,
96 	BCM5706S,
97 	NC370F,
98 	BCM5708,
99 	BCM5708S,
100 	BCM5709,
101 	BCM5709S,
102 	BCM5716,
103 	BCM5716S,
104 } board_t;
105 
106 /* indexed by board_t, above */
107 static struct {
108 	char *name;
109 } board_info[] = {
110 	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
111 	{ "HP NC370T Multifunction Gigabit Server Adapter" },
112 	{ "HP NC370i Multifunction Gigabit Server Adapter" },
113 	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114 	{ "HP NC370F Multifunction Gigabit Server Adapter" },
115 	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
116 	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117 	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
118 	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119 	{ "Broadcom NetXtreme II BCM5716 1000Base-T" },
120 	{ "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121 	};
122 
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125 	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127 	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133 	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142 	{ PCI_VENDOR_ID_BROADCOM, 0x163b,
143 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144 	{ PCI_VENDOR_ID_BROADCOM, 0x163c,
145 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146 	{ 0, }
147 };
148 
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
153 	/* Slow EEPROM */
154 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157 	 "EEPROM - slow"},
158 	/* Expansion entry 0001 */
159 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162 	 "Entry 0001"},
163 	/* Saifun SA25F010 (non-buffered flash) */
164 	/* strap, cfg1, & write1 need updates */
165 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168 	 "Non-buffered flash (128kB)"},
169 	/* Saifun SA25F020 (non-buffered flash) */
170 	/* strap, cfg1, & write1 need updates */
171 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174 	 "Non-buffered flash (256kB)"},
175 	/* Expansion entry 0100 */
176 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179 	 "Entry 0100"},
180 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
186 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190 	/* Saifun SA25F005 (non-buffered flash) */
191 	/* strap, cfg1, & write1 need updates */
192 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195 	 "Non-buffered flash (64kB)"},
196 	/* Fast EEPROM */
197 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200 	 "EEPROM - fast"},
201 	/* Expansion entry 1001 */
202 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 	 "Entry 1001"},
206 	/* Expansion entry 1010 */
207 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210 	 "Entry 1010"},
211 	/* ATMEL AT45DB011B (buffered flash) */
212 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215 	 "Buffered flash (128kB)"},
216 	/* Expansion entry 1100 */
217 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220 	 "Entry 1100"},
221 	/* Expansion entry 1101 */
222 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225 	 "Entry 1101"},
226 	/* Ateml Expansion entry 1110 */
227 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230 	 "Entry 1110 (Atmel)"},
231 	/* ATMEL AT45DB021B (buffered flash) */
232 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235 	 "Buffered flash (256kB)"},
236 };
237 
238 static const struct flash_spec flash_5709 = {
239 	.flags		= BNX2_NV_BUFFERED,
240 	.page_bits	= BCM5709_FLASH_PAGE_BITS,
241 	.page_size	= BCM5709_FLASH_PAGE_SIZE,
242 	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
243 	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
244 	.name		= "5709 Buffered flash (256kB)",
245 };
246 
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248 
249 static void bnx2_init_napi(struct bnx2 *bp);
250 static void bnx2_del_napi(struct bnx2 *bp);
251 
252 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
253 {
254 	u32 diff;
255 
256 	/* Tell compiler to fetch tx_prod and tx_cons from memory. */
257 	barrier();
258 
259 	/* The ring uses 256 indices for 255 entries, one of them
260 	 * needs to be skipped.
261 	 */
262 	diff = txr->tx_prod - txr->tx_cons;
263 	if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
264 		diff &= 0xffff;
265 		if (diff == BNX2_TX_DESC_CNT)
266 			diff = BNX2_MAX_TX_DESC_CNT;
267 	}
268 	return bp->tx_ring_size - diff;
269 }
270 
271 static u32
272 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
273 {
274 	u32 val;
275 
276 	spin_lock_bh(&bp->indirect_lock);
277 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
278 	val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
279 	spin_unlock_bh(&bp->indirect_lock);
280 	return val;
281 }
282 
283 static void
284 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
285 {
286 	spin_lock_bh(&bp->indirect_lock);
287 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
288 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
289 	spin_unlock_bh(&bp->indirect_lock);
290 }
291 
292 static void
293 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
294 {
295 	bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
296 }
297 
298 static u32
299 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
300 {
301 	return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
302 }
303 
304 static void
305 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
306 {
307 	offset += cid_addr;
308 	spin_lock_bh(&bp->indirect_lock);
309 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
310 		int i;
311 
312 		BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
313 		BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
314 			offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
315 		for (i = 0; i < 5; i++) {
316 			val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
317 			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
318 				break;
319 			udelay(5);
320 		}
321 	} else {
322 		BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
323 		BNX2_WR(bp, BNX2_CTX_DATA, val);
324 	}
325 	spin_unlock_bh(&bp->indirect_lock);
326 }
327 
328 #ifdef BCM_CNIC
329 static int
330 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
331 {
332 	struct bnx2 *bp = netdev_priv(dev);
333 	struct drv_ctl_io *io = &info->data.io;
334 
335 	switch (info->cmd) {
336 	case DRV_CTL_IO_WR_CMD:
337 		bnx2_reg_wr_ind(bp, io->offset, io->data);
338 		break;
339 	case DRV_CTL_IO_RD_CMD:
340 		io->data = bnx2_reg_rd_ind(bp, io->offset);
341 		break;
342 	case DRV_CTL_CTX_WR_CMD:
343 		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
344 		break;
345 	default:
346 		return -EINVAL;
347 	}
348 	return 0;
349 }
350 
351 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
352 {
353 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
354 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
355 	int sb_id;
356 
357 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
358 		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
359 		bnapi->cnic_present = 0;
360 		sb_id = bp->irq_nvecs;
361 		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
362 	} else {
363 		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
364 		bnapi->cnic_tag = bnapi->last_status_idx;
365 		bnapi->cnic_present = 1;
366 		sb_id = 0;
367 		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
368 	}
369 
370 	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
371 	cp->irq_arr[0].status_blk = (void *)
372 		((unsigned long) bnapi->status_blk.msi +
373 		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
374 	cp->irq_arr[0].status_blk_num = sb_id;
375 	cp->num_irq = 1;
376 }
377 
378 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
379 			      void *data)
380 {
381 	struct bnx2 *bp = netdev_priv(dev);
382 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
383 
384 	if (ops == NULL)
385 		return -EINVAL;
386 
387 	if (cp->drv_state & CNIC_DRV_STATE_REGD)
388 		return -EBUSY;
389 
390 	if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
391 		return -ENODEV;
392 
393 	bp->cnic_data = data;
394 	rcu_assign_pointer(bp->cnic_ops, ops);
395 
396 	cp->num_irq = 0;
397 	cp->drv_state = CNIC_DRV_STATE_REGD;
398 
399 	bnx2_setup_cnic_irq_info(bp);
400 
401 	return 0;
402 }
403 
404 static int bnx2_unregister_cnic(struct net_device *dev)
405 {
406 	struct bnx2 *bp = netdev_priv(dev);
407 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
408 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
409 
410 	mutex_lock(&bp->cnic_lock);
411 	cp->drv_state = 0;
412 	bnapi->cnic_present = 0;
413 	RCU_INIT_POINTER(bp->cnic_ops, NULL);
414 	mutex_unlock(&bp->cnic_lock);
415 	synchronize_rcu();
416 	return 0;
417 }
418 
419 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
420 {
421 	struct bnx2 *bp = netdev_priv(dev);
422 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
423 
424 	if (!cp->max_iscsi_conn)
425 		return NULL;
426 
427 	cp->drv_owner = THIS_MODULE;
428 	cp->chip_id = bp->chip_id;
429 	cp->pdev = bp->pdev;
430 	cp->io_base = bp->regview;
431 	cp->drv_ctl = bnx2_drv_ctl;
432 	cp->drv_register_cnic = bnx2_register_cnic;
433 	cp->drv_unregister_cnic = bnx2_unregister_cnic;
434 
435 	return cp;
436 }
437 
438 static void
439 bnx2_cnic_stop(struct bnx2 *bp)
440 {
441 	struct cnic_ops *c_ops;
442 	struct cnic_ctl_info info;
443 
444 	mutex_lock(&bp->cnic_lock);
445 	c_ops = rcu_dereference_protected(bp->cnic_ops,
446 					  lockdep_is_held(&bp->cnic_lock));
447 	if (c_ops) {
448 		info.cmd = CNIC_CTL_STOP_CMD;
449 		c_ops->cnic_ctl(bp->cnic_data, &info);
450 	}
451 	mutex_unlock(&bp->cnic_lock);
452 }
453 
454 static void
455 bnx2_cnic_start(struct bnx2 *bp)
456 {
457 	struct cnic_ops *c_ops;
458 	struct cnic_ctl_info info;
459 
460 	mutex_lock(&bp->cnic_lock);
461 	c_ops = rcu_dereference_protected(bp->cnic_ops,
462 					  lockdep_is_held(&bp->cnic_lock));
463 	if (c_ops) {
464 		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
465 			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
466 
467 			bnapi->cnic_tag = bnapi->last_status_idx;
468 		}
469 		info.cmd = CNIC_CTL_START_CMD;
470 		c_ops->cnic_ctl(bp->cnic_data, &info);
471 	}
472 	mutex_unlock(&bp->cnic_lock);
473 }
474 
475 #else
476 
477 static void
478 bnx2_cnic_stop(struct bnx2 *bp)
479 {
480 }
481 
482 static void
483 bnx2_cnic_start(struct bnx2 *bp)
484 {
485 }
486 
487 #endif
488 
489 static int
490 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
491 {
492 	u32 val1;
493 	int i, ret;
494 
495 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
496 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
497 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
498 
499 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
500 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
501 
502 		udelay(40);
503 	}
504 
505 	val1 = (bp->phy_addr << 21) | (reg << 16) |
506 		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
507 		BNX2_EMAC_MDIO_COMM_START_BUSY;
508 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
509 
510 	for (i = 0; i < 50; i++) {
511 		udelay(10);
512 
513 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
514 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
515 			udelay(5);
516 
517 			val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
518 			val1 &= BNX2_EMAC_MDIO_COMM_DATA;
519 
520 			break;
521 		}
522 	}
523 
524 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
525 		*val = 0x0;
526 		ret = -EBUSY;
527 	}
528 	else {
529 		*val = val1;
530 		ret = 0;
531 	}
532 
533 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
534 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
535 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
536 
537 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
538 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
539 
540 		udelay(40);
541 	}
542 
543 	return ret;
544 }
545 
546 static int
547 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
548 {
549 	u32 val1;
550 	int i, ret;
551 
552 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
553 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
554 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
555 
556 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
557 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
558 
559 		udelay(40);
560 	}
561 
562 	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
563 		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
564 		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
565 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
566 
567 	for (i = 0; i < 50; i++) {
568 		udelay(10);
569 
570 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
571 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
572 			udelay(5);
573 			break;
574 		}
575 	}
576 
577 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
578         	ret = -EBUSY;
579 	else
580 		ret = 0;
581 
582 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
583 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
584 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
585 
586 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
587 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
588 
589 		udelay(40);
590 	}
591 
592 	return ret;
593 }
594 
595 static void
596 bnx2_disable_int(struct bnx2 *bp)
597 {
598 	int i;
599 	struct bnx2_napi *bnapi;
600 
601 	for (i = 0; i < bp->irq_nvecs; i++) {
602 		bnapi = &bp->bnx2_napi[i];
603 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
604 		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
605 	}
606 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
607 }
608 
609 static void
610 bnx2_enable_int(struct bnx2 *bp)
611 {
612 	int i;
613 	struct bnx2_napi *bnapi;
614 
615 	for (i = 0; i < bp->irq_nvecs; i++) {
616 		bnapi = &bp->bnx2_napi[i];
617 
618 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
619 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
620 			BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
621 			bnapi->last_status_idx);
622 
623 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
624 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
625 			bnapi->last_status_idx);
626 	}
627 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
628 }
629 
630 static void
631 bnx2_disable_int_sync(struct bnx2 *bp)
632 {
633 	int i;
634 
635 	atomic_inc(&bp->intr_sem);
636 	if (!netif_running(bp->dev))
637 		return;
638 
639 	bnx2_disable_int(bp);
640 	for (i = 0; i < bp->irq_nvecs; i++)
641 		synchronize_irq(bp->irq_tbl[i].vector);
642 }
643 
644 static void
645 bnx2_napi_disable(struct bnx2 *bp)
646 {
647 	int i;
648 
649 	for (i = 0; i < bp->irq_nvecs; i++)
650 		napi_disable(&bp->bnx2_napi[i].napi);
651 }
652 
653 static void
654 bnx2_napi_enable(struct bnx2 *bp)
655 {
656 	int i;
657 
658 	for (i = 0; i < bp->irq_nvecs; i++)
659 		napi_enable(&bp->bnx2_napi[i].napi);
660 }
661 
662 static void
663 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
664 {
665 	if (stop_cnic)
666 		bnx2_cnic_stop(bp);
667 	if (netif_running(bp->dev)) {
668 		bnx2_napi_disable(bp);
669 		netif_tx_disable(bp->dev);
670 	}
671 	bnx2_disable_int_sync(bp);
672 	netif_carrier_off(bp->dev);	/* prevent tx timeout */
673 }
674 
675 static void
676 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
677 {
678 	if (atomic_dec_and_test(&bp->intr_sem)) {
679 		if (netif_running(bp->dev)) {
680 			netif_tx_wake_all_queues(bp->dev);
681 			spin_lock_bh(&bp->phy_lock);
682 			if (bp->link_up)
683 				netif_carrier_on(bp->dev);
684 			spin_unlock_bh(&bp->phy_lock);
685 			bnx2_napi_enable(bp);
686 			bnx2_enable_int(bp);
687 			if (start_cnic)
688 				bnx2_cnic_start(bp);
689 		}
690 	}
691 }
692 
693 static void
694 bnx2_free_tx_mem(struct bnx2 *bp)
695 {
696 	int i;
697 
698 	for (i = 0; i < bp->num_tx_rings; i++) {
699 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
700 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
701 
702 		if (txr->tx_desc_ring) {
703 			dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
704 					  txr->tx_desc_ring,
705 					  txr->tx_desc_mapping);
706 			txr->tx_desc_ring = NULL;
707 		}
708 		kfree(txr->tx_buf_ring);
709 		txr->tx_buf_ring = NULL;
710 	}
711 }
712 
713 static void
714 bnx2_free_rx_mem(struct bnx2 *bp)
715 {
716 	int i;
717 
718 	for (i = 0; i < bp->num_rx_rings; i++) {
719 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
720 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
721 		int j;
722 
723 		for (j = 0; j < bp->rx_max_ring; j++) {
724 			if (rxr->rx_desc_ring[j])
725 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
726 						  rxr->rx_desc_ring[j],
727 						  rxr->rx_desc_mapping[j]);
728 			rxr->rx_desc_ring[j] = NULL;
729 		}
730 		vfree(rxr->rx_buf_ring);
731 		rxr->rx_buf_ring = NULL;
732 
733 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
734 			if (rxr->rx_pg_desc_ring[j])
735 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
736 						  rxr->rx_pg_desc_ring[j],
737 						  rxr->rx_pg_desc_mapping[j]);
738 			rxr->rx_pg_desc_ring[j] = NULL;
739 		}
740 		vfree(rxr->rx_pg_ring);
741 		rxr->rx_pg_ring = NULL;
742 	}
743 }
744 
745 static int
746 bnx2_alloc_tx_mem(struct bnx2 *bp)
747 {
748 	int i;
749 
750 	for (i = 0; i < bp->num_tx_rings; i++) {
751 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
752 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
753 
754 		txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
755 		if (txr->tx_buf_ring == NULL)
756 			return -ENOMEM;
757 
758 		txr->tx_desc_ring =
759 			dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
760 					   &txr->tx_desc_mapping, GFP_KERNEL);
761 		if (txr->tx_desc_ring == NULL)
762 			return -ENOMEM;
763 	}
764 	return 0;
765 }
766 
767 static int
768 bnx2_alloc_rx_mem(struct bnx2 *bp)
769 {
770 	int i;
771 
772 	for (i = 0; i < bp->num_rx_rings; i++) {
773 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
774 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
775 		int j;
776 
777 		rxr->rx_buf_ring =
778 			vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
779 		if (rxr->rx_buf_ring == NULL)
780 			return -ENOMEM;
781 
782 		for (j = 0; j < bp->rx_max_ring; j++) {
783 			rxr->rx_desc_ring[j] =
784 				dma_alloc_coherent(&bp->pdev->dev,
785 						   RXBD_RING_SIZE,
786 						   &rxr->rx_desc_mapping[j],
787 						   GFP_KERNEL);
788 			if (rxr->rx_desc_ring[j] == NULL)
789 				return -ENOMEM;
790 
791 		}
792 
793 		if (bp->rx_pg_ring_size) {
794 			rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
795 						  bp->rx_max_pg_ring);
796 			if (rxr->rx_pg_ring == NULL)
797 				return -ENOMEM;
798 
799 		}
800 
801 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
802 			rxr->rx_pg_desc_ring[j] =
803 				dma_alloc_coherent(&bp->pdev->dev,
804 						   RXBD_RING_SIZE,
805 						   &rxr->rx_pg_desc_mapping[j],
806 						   GFP_KERNEL);
807 			if (rxr->rx_pg_desc_ring[j] == NULL)
808 				return -ENOMEM;
809 
810 		}
811 	}
812 	return 0;
813 }
814 
815 static void
816 bnx2_free_mem(struct bnx2 *bp)
817 {
818 	int i;
819 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
820 
821 	bnx2_free_tx_mem(bp);
822 	bnx2_free_rx_mem(bp);
823 
824 	for (i = 0; i < bp->ctx_pages; i++) {
825 		if (bp->ctx_blk[i]) {
826 			dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
827 					  bp->ctx_blk[i],
828 					  bp->ctx_blk_mapping[i]);
829 			bp->ctx_blk[i] = NULL;
830 		}
831 	}
832 	if (bnapi->status_blk.msi) {
833 		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
834 				  bnapi->status_blk.msi,
835 				  bp->status_blk_mapping);
836 		bnapi->status_blk.msi = NULL;
837 		bp->stats_blk = NULL;
838 	}
839 }
840 
841 static int
842 bnx2_alloc_mem(struct bnx2 *bp)
843 {
844 	int i, status_blk_size, err;
845 	struct bnx2_napi *bnapi;
846 	void *status_blk;
847 
848 	/* Combine status and statistics blocks into one allocation. */
849 	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
850 	if (bp->flags & BNX2_FLAG_MSIX_CAP)
851 		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
852 						 BNX2_SBLK_MSIX_ALIGN_SIZE);
853 	bp->status_stats_size = status_blk_size +
854 				sizeof(struct statistics_block);
855 
856 	status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
857 					&bp->status_blk_mapping,
858 					GFP_KERNEL | __GFP_ZERO);
859 	if (status_blk == NULL)
860 		goto alloc_mem_err;
861 
862 	bnapi = &bp->bnx2_napi[0];
863 	bnapi->status_blk.msi = status_blk;
864 	bnapi->hw_tx_cons_ptr =
865 		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
866 	bnapi->hw_rx_cons_ptr =
867 		&bnapi->status_blk.msi->status_rx_quick_consumer_index0;
868 	if (bp->flags & BNX2_FLAG_MSIX_CAP) {
869 		for (i = 1; i < bp->irq_nvecs; i++) {
870 			struct status_block_msix *sblk;
871 
872 			bnapi = &bp->bnx2_napi[i];
873 
874 			sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
875 			bnapi->status_blk.msix = sblk;
876 			bnapi->hw_tx_cons_ptr =
877 				&sblk->status_tx_quick_consumer_index;
878 			bnapi->hw_rx_cons_ptr =
879 				&sblk->status_rx_quick_consumer_index;
880 			bnapi->int_num = i << 24;
881 		}
882 	}
883 
884 	bp->stats_blk = status_blk + status_blk_size;
885 
886 	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
887 
888 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
889 		bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
890 		if (bp->ctx_pages == 0)
891 			bp->ctx_pages = 1;
892 		for (i = 0; i < bp->ctx_pages; i++) {
893 			bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
894 						BNX2_PAGE_SIZE,
895 						&bp->ctx_blk_mapping[i],
896 						GFP_KERNEL);
897 			if (bp->ctx_blk[i] == NULL)
898 				goto alloc_mem_err;
899 		}
900 	}
901 
902 	err = bnx2_alloc_rx_mem(bp);
903 	if (err)
904 		goto alloc_mem_err;
905 
906 	err = bnx2_alloc_tx_mem(bp);
907 	if (err)
908 		goto alloc_mem_err;
909 
910 	return 0;
911 
912 alloc_mem_err:
913 	bnx2_free_mem(bp);
914 	return -ENOMEM;
915 }
916 
917 static void
918 bnx2_report_fw_link(struct bnx2 *bp)
919 {
920 	u32 fw_link_status = 0;
921 
922 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
923 		return;
924 
925 	if (bp->link_up) {
926 		u32 bmsr;
927 
928 		switch (bp->line_speed) {
929 		case SPEED_10:
930 			if (bp->duplex == DUPLEX_HALF)
931 				fw_link_status = BNX2_LINK_STATUS_10HALF;
932 			else
933 				fw_link_status = BNX2_LINK_STATUS_10FULL;
934 			break;
935 		case SPEED_100:
936 			if (bp->duplex == DUPLEX_HALF)
937 				fw_link_status = BNX2_LINK_STATUS_100HALF;
938 			else
939 				fw_link_status = BNX2_LINK_STATUS_100FULL;
940 			break;
941 		case SPEED_1000:
942 			if (bp->duplex == DUPLEX_HALF)
943 				fw_link_status = BNX2_LINK_STATUS_1000HALF;
944 			else
945 				fw_link_status = BNX2_LINK_STATUS_1000FULL;
946 			break;
947 		case SPEED_2500:
948 			if (bp->duplex == DUPLEX_HALF)
949 				fw_link_status = BNX2_LINK_STATUS_2500HALF;
950 			else
951 				fw_link_status = BNX2_LINK_STATUS_2500FULL;
952 			break;
953 		}
954 
955 		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
956 
957 		if (bp->autoneg) {
958 			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
959 
960 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
961 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
962 
963 			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
964 			    bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
965 				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
966 			else
967 				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
968 		}
969 	}
970 	else
971 		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
972 
973 	bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
974 }
975 
976 static char *
977 bnx2_xceiver_str(struct bnx2 *bp)
978 {
979 	return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
980 		((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
981 		 "Copper");
982 }
983 
984 static void
985 bnx2_report_link(struct bnx2 *bp)
986 {
987 	if (bp->link_up) {
988 		netif_carrier_on(bp->dev);
989 		netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
990 			    bnx2_xceiver_str(bp),
991 			    bp->line_speed,
992 			    bp->duplex == DUPLEX_FULL ? "full" : "half");
993 
994 		if (bp->flow_ctrl) {
995 			if (bp->flow_ctrl & FLOW_CTRL_RX) {
996 				pr_cont(", receive ");
997 				if (bp->flow_ctrl & FLOW_CTRL_TX)
998 					pr_cont("& transmit ");
999 			}
1000 			else {
1001 				pr_cont(", transmit ");
1002 			}
1003 			pr_cont("flow control ON");
1004 		}
1005 		pr_cont("\n");
1006 	} else {
1007 		netif_carrier_off(bp->dev);
1008 		netdev_err(bp->dev, "NIC %s Link is Down\n",
1009 			   bnx2_xceiver_str(bp));
1010 	}
1011 
1012 	bnx2_report_fw_link(bp);
1013 }
1014 
1015 static void
1016 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1017 {
1018 	u32 local_adv, remote_adv;
1019 
1020 	bp->flow_ctrl = 0;
1021 	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1022 		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1023 
1024 		if (bp->duplex == DUPLEX_FULL) {
1025 			bp->flow_ctrl = bp->req_flow_ctrl;
1026 		}
1027 		return;
1028 	}
1029 
1030 	if (bp->duplex != DUPLEX_FULL) {
1031 		return;
1032 	}
1033 
1034 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1035 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1036 		u32 val;
1037 
1038 		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1039 		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1040 			bp->flow_ctrl |= FLOW_CTRL_TX;
1041 		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1042 			bp->flow_ctrl |= FLOW_CTRL_RX;
1043 		return;
1044 	}
1045 
1046 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1047 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1048 
1049 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1050 		u32 new_local_adv = 0;
1051 		u32 new_remote_adv = 0;
1052 
1053 		if (local_adv & ADVERTISE_1000XPAUSE)
1054 			new_local_adv |= ADVERTISE_PAUSE_CAP;
1055 		if (local_adv & ADVERTISE_1000XPSE_ASYM)
1056 			new_local_adv |= ADVERTISE_PAUSE_ASYM;
1057 		if (remote_adv & ADVERTISE_1000XPAUSE)
1058 			new_remote_adv |= ADVERTISE_PAUSE_CAP;
1059 		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1060 			new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1061 
1062 		local_adv = new_local_adv;
1063 		remote_adv = new_remote_adv;
1064 	}
1065 
1066 	/* See Table 28B-3 of 802.3ab-1999 spec. */
1067 	if (local_adv & ADVERTISE_PAUSE_CAP) {
1068 		if(local_adv & ADVERTISE_PAUSE_ASYM) {
1069 	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
1070 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1071 			}
1072 			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1073 				bp->flow_ctrl = FLOW_CTRL_RX;
1074 			}
1075 		}
1076 		else {
1077 			if (remote_adv & ADVERTISE_PAUSE_CAP) {
1078 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1079 			}
1080 		}
1081 	}
1082 	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1083 		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1084 			(remote_adv & ADVERTISE_PAUSE_ASYM)) {
1085 
1086 			bp->flow_ctrl = FLOW_CTRL_TX;
1087 		}
1088 	}
1089 }
1090 
1091 static int
1092 bnx2_5709s_linkup(struct bnx2 *bp)
1093 {
1094 	u32 val, speed;
1095 
1096 	bp->link_up = 1;
1097 
1098 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1099 	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1100 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1101 
1102 	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1103 		bp->line_speed = bp->req_line_speed;
1104 		bp->duplex = bp->req_duplex;
1105 		return 0;
1106 	}
1107 	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1108 	switch (speed) {
1109 		case MII_BNX2_GP_TOP_AN_SPEED_10:
1110 			bp->line_speed = SPEED_10;
1111 			break;
1112 		case MII_BNX2_GP_TOP_AN_SPEED_100:
1113 			bp->line_speed = SPEED_100;
1114 			break;
1115 		case MII_BNX2_GP_TOP_AN_SPEED_1G:
1116 		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1117 			bp->line_speed = SPEED_1000;
1118 			break;
1119 		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1120 			bp->line_speed = SPEED_2500;
1121 			break;
1122 	}
1123 	if (val & MII_BNX2_GP_TOP_AN_FD)
1124 		bp->duplex = DUPLEX_FULL;
1125 	else
1126 		bp->duplex = DUPLEX_HALF;
1127 	return 0;
1128 }
1129 
1130 static int
1131 bnx2_5708s_linkup(struct bnx2 *bp)
1132 {
1133 	u32 val;
1134 
1135 	bp->link_up = 1;
1136 	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1137 	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1138 		case BCM5708S_1000X_STAT1_SPEED_10:
1139 			bp->line_speed = SPEED_10;
1140 			break;
1141 		case BCM5708S_1000X_STAT1_SPEED_100:
1142 			bp->line_speed = SPEED_100;
1143 			break;
1144 		case BCM5708S_1000X_STAT1_SPEED_1G:
1145 			bp->line_speed = SPEED_1000;
1146 			break;
1147 		case BCM5708S_1000X_STAT1_SPEED_2G5:
1148 			bp->line_speed = SPEED_2500;
1149 			break;
1150 	}
1151 	if (val & BCM5708S_1000X_STAT1_FD)
1152 		bp->duplex = DUPLEX_FULL;
1153 	else
1154 		bp->duplex = DUPLEX_HALF;
1155 
1156 	return 0;
1157 }
1158 
1159 static int
1160 bnx2_5706s_linkup(struct bnx2 *bp)
1161 {
1162 	u32 bmcr, local_adv, remote_adv, common;
1163 
1164 	bp->link_up = 1;
1165 	bp->line_speed = SPEED_1000;
1166 
1167 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1168 	if (bmcr & BMCR_FULLDPLX) {
1169 		bp->duplex = DUPLEX_FULL;
1170 	}
1171 	else {
1172 		bp->duplex = DUPLEX_HALF;
1173 	}
1174 
1175 	if (!(bmcr & BMCR_ANENABLE)) {
1176 		return 0;
1177 	}
1178 
1179 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1180 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1181 
1182 	common = local_adv & remote_adv;
1183 	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1184 
1185 		if (common & ADVERTISE_1000XFULL) {
1186 			bp->duplex = DUPLEX_FULL;
1187 		}
1188 		else {
1189 			bp->duplex = DUPLEX_HALF;
1190 		}
1191 	}
1192 
1193 	return 0;
1194 }
1195 
1196 static int
1197 bnx2_copper_linkup(struct bnx2 *bp)
1198 {
1199 	u32 bmcr;
1200 
1201 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1202 	if (bmcr & BMCR_ANENABLE) {
1203 		u32 local_adv, remote_adv, common;
1204 
1205 		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1206 		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1207 
1208 		common = local_adv & (remote_adv >> 2);
1209 		if (common & ADVERTISE_1000FULL) {
1210 			bp->line_speed = SPEED_1000;
1211 			bp->duplex = DUPLEX_FULL;
1212 		}
1213 		else if (common & ADVERTISE_1000HALF) {
1214 			bp->line_speed = SPEED_1000;
1215 			bp->duplex = DUPLEX_HALF;
1216 		}
1217 		else {
1218 			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1219 			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1220 
1221 			common = local_adv & remote_adv;
1222 			if (common & ADVERTISE_100FULL) {
1223 				bp->line_speed = SPEED_100;
1224 				bp->duplex = DUPLEX_FULL;
1225 			}
1226 			else if (common & ADVERTISE_100HALF) {
1227 				bp->line_speed = SPEED_100;
1228 				bp->duplex = DUPLEX_HALF;
1229 			}
1230 			else if (common & ADVERTISE_10FULL) {
1231 				bp->line_speed = SPEED_10;
1232 				bp->duplex = DUPLEX_FULL;
1233 			}
1234 			else if (common & ADVERTISE_10HALF) {
1235 				bp->line_speed = SPEED_10;
1236 				bp->duplex = DUPLEX_HALF;
1237 			}
1238 			else {
1239 				bp->line_speed = 0;
1240 				bp->link_up = 0;
1241 			}
1242 		}
1243 	}
1244 	else {
1245 		if (bmcr & BMCR_SPEED100) {
1246 			bp->line_speed = SPEED_100;
1247 		}
1248 		else {
1249 			bp->line_speed = SPEED_10;
1250 		}
1251 		if (bmcr & BMCR_FULLDPLX) {
1252 			bp->duplex = DUPLEX_FULL;
1253 		}
1254 		else {
1255 			bp->duplex = DUPLEX_HALF;
1256 		}
1257 	}
1258 
1259 	return 0;
1260 }
1261 
1262 static void
1263 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1264 {
1265 	u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1266 
1267 	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1268 	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1269 	val |= 0x02 << 8;
1270 
1271 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1272 		val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1273 
1274 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1275 }
1276 
1277 static void
1278 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1279 {
1280 	int i;
1281 	u32 cid;
1282 
1283 	for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1284 		if (i == 1)
1285 			cid = RX_RSS_CID;
1286 		bnx2_init_rx_context(bp, cid);
1287 	}
1288 }
1289 
1290 static void
1291 bnx2_set_mac_link(struct bnx2 *bp)
1292 {
1293 	u32 val;
1294 
1295 	BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1296 	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1297 		(bp->duplex == DUPLEX_HALF)) {
1298 		BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1299 	}
1300 
1301 	/* Configure the EMAC mode register. */
1302 	val = BNX2_RD(bp, BNX2_EMAC_MODE);
1303 
1304 	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1305 		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1306 		BNX2_EMAC_MODE_25G_MODE);
1307 
1308 	if (bp->link_up) {
1309 		switch (bp->line_speed) {
1310 			case SPEED_10:
1311 				if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1312 					val |= BNX2_EMAC_MODE_PORT_MII_10M;
1313 					break;
1314 				}
1315 				/* fall through */
1316 			case SPEED_100:
1317 				val |= BNX2_EMAC_MODE_PORT_MII;
1318 				break;
1319 			case SPEED_2500:
1320 				val |= BNX2_EMAC_MODE_25G_MODE;
1321 				/* fall through */
1322 			case SPEED_1000:
1323 				val |= BNX2_EMAC_MODE_PORT_GMII;
1324 				break;
1325 		}
1326 	}
1327 	else {
1328 		val |= BNX2_EMAC_MODE_PORT_GMII;
1329 	}
1330 
1331 	/* Set the MAC to operate in the appropriate duplex mode. */
1332 	if (bp->duplex == DUPLEX_HALF)
1333 		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1334 	BNX2_WR(bp, BNX2_EMAC_MODE, val);
1335 
1336 	/* Enable/disable rx PAUSE. */
1337 	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1338 
1339 	if (bp->flow_ctrl & FLOW_CTRL_RX)
1340 		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1341 	BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1342 
1343 	/* Enable/disable tx PAUSE. */
1344 	val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1345 	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1346 
1347 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1348 		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1349 	BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1350 
1351 	/* Acknowledge the interrupt. */
1352 	BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1353 
1354 	bnx2_init_all_rx_contexts(bp);
1355 }
1356 
1357 static void
1358 bnx2_enable_bmsr1(struct bnx2 *bp)
1359 {
1360 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1361 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1362 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1363 			       MII_BNX2_BLK_ADDR_GP_STATUS);
1364 }
1365 
1366 static void
1367 bnx2_disable_bmsr1(struct bnx2 *bp)
1368 {
1369 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1370 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1371 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1372 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1373 }
1374 
1375 static int
1376 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1377 {
1378 	u32 up1;
1379 	int ret = 1;
1380 
1381 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1382 		return 0;
1383 
1384 	if (bp->autoneg & AUTONEG_SPEED)
1385 		bp->advertising |= ADVERTISED_2500baseX_Full;
1386 
1387 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1388 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1389 
1390 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1391 	if (!(up1 & BCM5708S_UP1_2G5)) {
1392 		up1 |= BCM5708S_UP1_2G5;
1393 		bnx2_write_phy(bp, bp->mii_up1, up1);
1394 		ret = 0;
1395 	}
1396 
1397 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1398 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1399 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1400 
1401 	return ret;
1402 }
1403 
1404 static int
1405 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1406 {
1407 	u32 up1;
1408 	int ret = 0;
1409 
1410 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1411 		return 0;
1412 
1413 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1414 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1415 
1416 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1417 	if (up1 & BCM5708S_UP1_2G5) {
1418 		up1 &= ~BCM5708S_UP1_2G5;
1419 		bnx2_write_phy(bp, bp->mii_up1, up1);
1420 		ret = 1;
1421 	}
1422 
1423 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1424 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1425 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1426 
1427 	return ret;
1428 }
1429 
1430 static void
1431 bnx2_enable_forced_2g5(struct bnx2 *bp)
1432 {
1433 	u32 uninitialized_var(bmcr);
1434 	int err;
1435 
1436 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1437 		return;
1438 
1439 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1440 		u32 val;
1441 
1442 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1443 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1444 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1445 			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1446 			val |= MII_BNX2_SD_MISC1_FORCE |
1447 				MII_BNX2_SD_MISC1_FORCE_2_5G;
1448 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1449 		}
1450 
1451 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1452 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1453 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1454 
1455 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1456 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1457 		if (!err)
1458 			bmcr |= BCM5708S_BMCR_FORCE_2500;
1459 	} else {
1460 		return;
1461 	}
1462 
1463 	if (err)
1464 		return;
1465 
1466 	if (bp->autoneg & AUTONEG_SPEED) {
1467 		bmcr &= ~BMCR_ANENABLE;
1468 		if (bp->req_duplex == DUPLEX_FULL)
1469 			bmcr |= BMCR_FULLDPLX;
1470 	}
1471 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1472 }
1473 
1474 static void
1475 bnx2_disable_forced_2g5(struct bnx2 *bp)
1476 {
1477 	u32 uninitialized_var(bmcr);
1478 	int err;
1479 
1480 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1481 		return;
1482 
1483 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1484 		u32 val;
1485 
1486 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1487 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1488 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1489 			val &= ~MII_BNX2_SD_MISC1_FORCE;
1490 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1491 		}
1492 
1493 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1494 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1495 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1496 
1497 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1498 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1499 		if (!err)
1500 			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1501 	} else {
1502 		return;
1503 	}
1504 
1505 	if (err)
1506 		return;
1507 
1508 	if (bp->autoneg & AUTONEG_SPEED)
1509 		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1510 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1511 }
1512 
1513 static void
1514 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1515 {
1516 	u32 val;
1517 
1518 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1519 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1520 	if (start)
1521 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1522 	else
1523 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1524 }
1525 
1526 static int
1527 bnx2_set_link(struct bnx2 *bp)
1528 {
1529 	u32 bmsr;
1530 	u8 link_up;
1531 
1532 	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1533 		bp->link_up = 1;
1534 		return 0;
1535 	}
1536 
1537 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1538 		return 0;
1539 
1540 	link_up = bp->link_up;
1541 
1542 	bnx2_enable_bmsr1(bp);
1543 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1544 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1545 	bnx2_disable_bmsr1(bp);
1546 
1547 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1548 	    (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1549 		u32 val, an_dbg;
1550 
1551 		if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1552 			bnx2_5706s_force_link_dn(bp, 0);
1553 			bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1554 		}
1555 		val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1556 
1557 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1558 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1559 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1560 
1561 		if ((val & BNX2_EMAC_STATUS_LINK) &&
1562 		    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1563 			bmsr |= BMSR_LSTATUS;
1564 		else
1565 			bmsr &= ~BMSR_LSTATUS;
1566 	}
1567 
1568 	if (bmsr & BMSR_LSTATUS) {
1569 		bp->link_up = 1;
1570 
1571 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1572 			if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1573 				bnx2_5706s_linkup(bp);
1574 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1575 				bnx2_5708s_linkup(bp);
1576 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1577 				bnx2_5709s_linkup(bp);
1578 		}
1579 		else {
1580 			bnx2_copper_linkup(bp);
1581 		}
1582 		bnx2_resolve_flow_ctrl(bp);
1583 	}
1584 	else {
1585 		if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1586 		    (bp->autoneg & AUTONEG_SPEED))
1587 			bnx2_disable_forced_2g5(bp);
1588 
1589 		if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1590 			u32 bmcr;
1591 
1592 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1593 			bmcr |= BMCR_ANENABLE;
1594 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1595 
1596 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1597 		}
1598 		bp->link_up = 0;
1599 	}
1600 
1601 	if (bp->link_up != link_up) {
1602 		bnx2_report_link(bp);
1603 	}
1604 
1605 	bnx2_set_mac_link(bp);
1606 
1607 	return 0;
1608 }
1609 
1610 static int
1611 bnx2_reset_phy(struct bnx2 *bp)
1612 {
1613 	int i;
1614 	u32 reg;
1615 
1616         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1617 
1618 #define PHY_RESET_MAX_WAIT 100
1619 	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1620 		udelay(10);
1621 
1622 		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1623 		if (!(reg & BMCR_RESET)) {
1624 			udelay(20);
1625 			break;
1626 		}
1627 	}
1628 	if (i == PHY_RESET_MAX_WAIT) {
1629 		return -EBUSY;
1630 	}
1631 	return 0;
1632 }
1633 
1634 static u32
1635 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1636 {
1637 	u32 adv = 0;
1638 
1639 	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1640 		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1641 
1642 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1643 			adv = ADVERTISE_1000XPAUSE;
1644 		}
1645 		else {
1646 			adv = ADVERTISE_PAUSE_CAP;
1647 		}
1648 	}
1649 	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1650 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1651 			adv = ADVERTISE_1000XPSE_ASYM;
1652 		}
1653 		else {
1654 			adv = ADVERTISE_PAUSE_ASYM;
1655 		}
1656 	}
1657 	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1658 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1659 			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1660 		}
1661 		else {
1662 			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1663 		}
1664 	}
1665 	return adv;
1666 }
1667 
1668 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1669 
1670 static int
1671 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1672 __releases(&bp->phy_lock)
1673 __acquires(&bp->phy_lock)
1674 {
1675 	u32 speed_arg = 0, pause_adv;
1676 
1677 	pause_adv = bnx2_phy_get_pause_adv(bp);
1678 
1679 	if (bp->autoneg & AUTONEG_SPEED) {
1680 		speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1681 		if (bp->advertising & ADVERTISED_10baseT_Half)
1682 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1683 		if (bp->advertising & ADVERTISED_10baseT_Full)
1684 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1685 		if (bp->advertising & ADVERTISED_100baseT_Half)
1686 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1687 		if (bp->advertising & ADVERTISED_100baseT_Full)
1688 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1689 		if (bp->advertising & ADVERTISED_1000baseT_Full)
1690 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1691 		if (bp->advertising & ADVERTISED_2500baseX_Full)
1692 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1693 	} else {
1694 		if (bp->req_line_speed == SPEED_2500)
1695 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1696 		else if (bp->req_line_speed == SPEED_1000)
1697 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1698 		else if (bp->req_line_speed == SPEED_100) {
1699 			if (bp->req_duplex == DUPLEX_FULL)
1700 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1701 			else
1702 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1703 		} else if (bp->req_line_speed == SPEED_10) {
1704 			if (bp->req_duplex == DUPLEX_FULL)
1705 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1706 			else
1707 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1708 		}
1709 	}
1710 
1711 	if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1712 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1713 	if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1714 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1715 
1716 	if (port == PORT_TP)
1717 		speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1718 			     BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1719 
1720 	bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1721 
1722 	spin_unlock_bh(&bp->phy_lock);
1723 	bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1724 	spin_lock_bh(&bp->phy_lock);
1725 
1726 	return 0;
1727 }
1728 
1729 static int
1730 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1731 __releases(&bp->phy_lock)
1732 __acquires(&bp->phy_lock)
1733 {
1734 	u32 adv, bmcr;
1735 	u32 new_adv = 0;
1736 
1737 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1738 		return bnx2_setup_remote_phy(bp, port);
1739 
1740 	if (!(bp->autoneg & AUTONEG_SPEED)) {
1741 		u32 new_bmcr;
1742 		int force_link_down = 0;
1743 
1744 		if (bp->req_line_speed == SPEED_2500) {
1745 			if (!bnx2_test_and_enable_2g5(bp))
1746 				force_link_down = 1;
1747 		} else if (bp->req_line_speed == SPEED_1000) {
1748 			if (bnx2_test_and_disable_2g5(bp))
1749 				force_link_down = 1;
1750 		}
1751 		bnx2_read_phy(bp, bp->mii_adv, &adv);
1752 		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1753 
1754 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1755 		new_bmcr = bmcr & ~BMCR_ANENABLE;
1756 		new_bmcr |= BMCR_SPEED1000;
1757 
1758 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1759 			if (bp->req_line_speed == SPEED_2500)
1760 				bnx2_enable_forced_2g5(bp);
1761 			else if (bp->req_line_speed == SPEED_1000) {
1762 				bnx2_disable_forced_2g5(bp);
1763 				new_bmcr &= ~0x2000;
1764 			}
1765 
1766 		} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1767 			if (bp->req_line_speed == SPEED_2500)
1768 				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1769 			else
1770 				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1771 		}
1772 
1773 		if (bp->req_duplex == DUPLEX_FULL) {
1774 			adv |= ADVERTISE_1000XFULL;
1775 			new_bmcr |= BMCR_FULLDPLX;
1776 		}
1777 		else {
1778 			adv |= ADVERTISE_1000XHALF;
1779 			new_bmcr &= ~BMCR_FULLDPLX;
1780 		}
1781 		if ((new_bmcr != bmcr) || (force_link_down)) {
1782 			/* Force a link down visible on the other side */
1783 			if (bp->link_up) {
1784 				bnx2_write_phy(bp, bp->mii_adv, adv &
1785 					       ~(ADVERTISE_1000XFULL |
1786 						 ADVERTISE_1000XHALF));
1787 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1788 					BMCR_ANRESTART | BMCR_ANENABLE);
1789 
1790 				bp->link_up = 0;
1791 				netif_carrier_off(bp->dev);
1792 				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1793 				bnx2_report_link(bp);
1794 			}
1795 			bnx2_write_phy(bp, bp->mii_adv, adv);
1796 			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1797 		} else {
1798 			bnx2_resolve_flow_ctrl(bp);
1799 			bnx2_set_mac_link(bp);
1800 		}
1801 		return 0;
1802 	}
1803 
1804 	bnx2_test_and_enable_2g5(bp);
1805 
1806 	if (bp->advertising & ADVERTISED_1000baseT_Full)
1807 		new_adv |= ADVERTISE_1000XFULL;
1808 
1809 	new_adv |= bnx2_phy_get_pause_adv(bp);
1810 
1811 	bnx2_read_phy(bp, bp->mii_adv, &adv);
1812 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1813 
1814 	bp->serdes_an_pending = 0;
1815 	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1816 		/* Force a link down visible on the other side */
1817 		if (bp->link_up) {
1818 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1819 			spin_unlock_bh(&bp->phy_lock);
1820 			msleep(20);
1821 			spin_lock_bh(&bp->phy_lock);
1822 		}
1823 
1824 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
1825 		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1826 			BMCR_ANENABLE);
1827 		/* Speed up link-up time when the link partner
1828 		 * does not autonegotiate which is very common
1829 		 * in blade servers. Some blade servers use
1830 		 * IPMI for kerboard input and it's important
1831 		 * to minimize link disruptions. Autoneg. involves
1832 		 * exchanging base pages plus 3 next pages and
1833 		 * normally completes in about 120 msec.
1834 		 */
1835 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1836 		bp->serdes_an_pending = 1;
1837 		mod_timer(&bp->timer, jiffies + bp->current_interval);
1838 	} else {
1839 		bnx2_resolve_flow_ctrl(bp);
1840 		bnx2_set_mac_link(bp);
1841 	}
1842 
1843 	return 0;
1844 }
1845 
1846 #define ETHTOOL_ALL_FIBRE_SPEED						\
1847 	(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?			\
1848 		(ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1849 		(ADVERTISED_1000baseT_Full)
1850 
1851 #define ETHTOOL_ALL_COPPER_SPEED					\
1852 	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
1853 	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
1854 	ADVERTISED_1000baseT_Full)
1855 
1856 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1857 	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1858 
1859 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1860 
1861 static void
1862 bnx2_set_default_remote_link(struct bnx2 *bp)
1863 {
1864 	u32 link;
1865 
1866 	if (bp->phy_port == PORT_TP)
1867 		link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1868 	else
1869 		link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1870 
1871 	if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1872 		bp->req_line_speed = 0;
1873 		bp->autoneg |= AUTONEG_SPEED;
1874 		bp->advertising = ADVERTISED_Autoneg;
1875 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1876 			bp->advertising |= ADVERTISED_10baseT_Half;
1877 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1878 			bp->advertising |= ADVERTISED_10baseT_Full;
1879 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1880 			bp->advertising |= ADVERTISED_100baseT_Half;
1881 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1882 			bp->advertising |= ADVERTISED_100baseT_Full;
1883 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1884 			bp->advertising |= ADVERTISED_1000baseT_Full;
1885 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1886 			bp->advertising |= ADVERTISED_2500baseX_Full;
1887 	} else {
1888 		bp->autoneg = 0;
1889 		bp->advertising = 0;
1890 		bp->req_duplex = DUPLEX_FULL;
1891 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1892 			bp->req_line_speed = SPEED_10;
1893 			if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1894 				bp->req_duplex = DUPLEX_HALF;
1895 		}
1896 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1897 			bp->req_line_speed = SPEED_100;
1898 			if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1899 				bp->req_duplex = DUPLEX_HALF;
1900 		}
1901 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1902 			bp->req_line_speed = SPEED_1000;
1903 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1904 			bp->req_line_speed = SPEED_2500;
1905 	}
1906 }
1907 
1908 static void
1909 bnx2_set_default_link(struct bnx2 *bp)
1910 {
1911 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1912 		bnx2_set_default_remote_link(bp);
1913 		return;
1914 	}
1915 
1916 	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1917 	bp->req_line_speed = 0;
1918 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1919 		u32 reg;
1920 
1921 		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1922 
1923 		reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1924 		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1925 		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1926 			bp->autoneg = 0;
1927 			bp->req_line_speed = bp->line_speed = SPEED_1000;
1928 			bp->req_duplex = DUPLEX_FULL;
1929 		}
1930 	} else
1931 		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1932 }
1933 
1934 static void
1935 bnx2_send_heart_beat(struct bnx2 *bp)
1936 {
1937 	u32 msg;
1938 	u32 addr;
1939 
1940 	spin_lock(&bp->indirect_lock);
1941 	msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1942 	addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1943 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1944 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1945 	spin_unlock(&bp->indirect_lock);
1946 }
1947 
1948 static void
1949 bnx2_remote_phy_event(struct bnx2 *bp)
1950 {
1951 	u32 msg;
1952 	u8 link_up = bp->link_up;
1953 	u8 old_port;
1954 
1955 	msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1956 
1957 	if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1958 		bnx2_send_heart_beat(bp);
1959 
1960 	msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1961 
1962 	if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1963 		bp->link_up = 0;
1964 	else {
1965 		u32 speed;
1966 
1967 		bp->link_up = 1;
1968 		speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1969 		bp->duplex = DUPLEX_FULL;
1970 		switch (speed) {
1971 			case BNX2_LINK_STATUS_10HALF:
1972 				bp->duplex = DUPLEX_HALF;
1973 				/* fall through */
1974 			case BNX2_LINK_STATUS_10FULL:
1975 				bp->line_speed = SPEED_10;
1976 				break;
1977 			case BNX2_LINK_STATUS_100HALF:
1978 				bp->duplex = DUPLEX_HALF;
1979 				/* fall through */
1980 			case BNX2_LINK_STATUS_100BASE_T4:
1981 			case BNX2_LINK_STATUS_100FULL:
1982 				bp->line_speed = SPEED_100;
1983 				break;
1984 			case BNX2_LINK_STATUS_1000HALF:
1985 				bp->duplex = DUPLEX_HALF;
1986 				/* fall through */
1987 			case BNX2_LINK_STATUS_1000FULL:
1988 				bp->line_speed = SPEED_1000;
1989 				break;
1990 			case BNX2_LINK_STATUS_2500HALF:
1991 				bp->duplex = DUPLEX_HALF;
1992 				/* fall through */
1993 			case BNX2_LINK_STATUS_2500FULL:
1994 				bp->line_speed = SPEED_2500;
1995 				break;
1996 			default:
1997 				bp->line_speed = 0;
1998 				break;
1999 		}
2000 
2001 		bp->flow_ctrl = 0;
2002 		if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2003 		    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2004 			if (bp->duplex == DUPLEX_FULL)
2005 				bp->flow_ctrl = bp->req_flow_ctrl;
2006 		} else {
2007 			if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2008 				bp->flow_ctrl |= FLOW_CTRL_TX;
2009 			if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2010 				bp->flow_ctrl |= FLOW_CTRL_RX;
2011 		}
2012 
2013 		old_port = bp->phy_port;
2014 		if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2015 			bp->phy_port = PORT_FIBRE;
2016 		else
2017 			bp->phy_port = PORT_TP;
2018 
2019 		if (old_port != bp->phy_port)
2020 			bnx2_set_default_link(bp);
2021 
2022 	}
2023 	if (bp->link_up != link_up)
2024 		bnx2_report_link(bp);
2025 
2026 	bnx2_set_mac_link(bp);
2027 }
2028 
2029 static int
2030 bnx2_set_remote_link(struct bnx2 *bp)
2031 {
2032 	u32 evt_code;
2033 
2034 	evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2035 	switch (evt_code) {
2036 		case BNX2_FW_EVT_CODE_LINK_EVENT:
2037 			bnx2_remote_phy_event(bp);
2038 			break;
2039 		case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2040 		default:
2041 			bnx2_send_heart_beat(bp);
2042 			break;
2043 	}
2044 	return 0;
2045 }
2046 
2047 static int
2048 bnx2_setup_copper_phy(struct bnx2 *bp)
2049 __releases(&bp->phy_lock)
2050 __acquires(&bp->phy_lock)
2051 {
2052 	u32 bmcr;
2053 	u32 new_bmcr;
2054 
2055 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2056 
2057 	if (bp->autoneg & AUTONEG_SPEED) {
2058 		u32 adv_reg, adv1000_reg;
2059 		u32 new_adv = 0;
2060 		u32 new_adv1000 = 0;
2061 
2062 		bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2063 		adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2064 			ADVERTISE_PAUSE_ASYM);
2065 
2066 		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2067 		adv1000_reg &= PHY_ALL_1000_SPEED;
2068 
2069 		new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
2070 		new_adv |= ADVERTISE_CSMA;
2071 		new_adv |= bnx2_phy_get_pause_adv(bp);
2072 
2073 		new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2074 
2075 		if ((adv1000_reg != new_adv1000) ||
2076 			(adv_reg != new_adv) ||
2077 			((bmcr & BMCR_ANENABLE) == 0)) {
2078 
2079 			bnx2_write_phy(bp, bp->mii_adv, new_adv);
2080 			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2081 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2082 				BMCR_ANENABLE);
2083 		}
2084 		else if (bp->link_up) {
2085 			/* Flow ctrl may have changed from auto to forced */
2086 			/* or vice-versa. */
2087 
2088 			bnx2_resolve_flow_ctrl(bp);
2089 			bnx2_set_mac_link(bp);
2090 		}
2091 		return 0;
2092 	}
2093 
2094 	new_bmcr = 0;
2095 	if (bp->req_line_speed == SPEED_100) {
2096 		new_bmcr |= BMCR_SPEED100;
2097 	}
2098 	if (bp->req_duplex == DUPLEX_FULL) {
2099 		new_bmcr |= BMCR_FULLDPLX;
2100 	}
2101 	if (new_bmcr != bmcr) {
2102 		u32 bmsr;
2103 
2104 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2105 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2106 
2107 		if (bmsr & BMSR_LSTATUS) {
2108 			/* Force link down */
2109 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2110 			spin_unlock_bh(&bp->phy_lock);
2111 			msleep(50);
2112 			spin_lock_bh(&bp->phy_lock);
2113 
2114 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2115 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2116 		}
2117 
2118 		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2119 
2120 		/* Normally, the new speed is setup after the link has
2121 		 * gone down and up again. In some cases, link will not go
2122 		 * down so we need to set up the new speed here.
2123 		 */
2124 		if (bmsr & BMSR_LSTATUS) {
2125 			bp->line_speed = bp->req_line_speed;
2126 			bp->duplex = bp->req_duplex;
2127 			bnx2_resolve_flow_ctrl(bp);
2128 			bnx2_set_mac_link(bp);
2129 		}
2130 	} else {
2131 		bnx2_resolve_flow_ctrl(bp);
2132 		bnx2_set_mac_link(bp);
2133 	}
2134 	return 0;
2135 }
2136 
2137 static int
2138 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2139 __releases(&bp->phy_lock)
2140 __acquires(&bp->phy_lock)
2141 {
2142 	if (bp->loopback == MAC_LOOPBACK)
2143 		return 0;
2144 
2145 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2146 		return bnx2_setup_serdes_phy(bp, port);
2147 	}
2148 	else {
2149 		return bnx2_setup_copper_phy(bp);
2150 	}
2151 }
2152 
2153 static int
2154 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2155 {
2156 	u32 val;
2157 
2158 	bp->mii_bmcr = MII_BMCR + 0x10;
2159 	bp->mii_bmsr = MII_BMSR + 0x10;
2160 	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2161 	bp->mii_adv = MII_ADVERTISE + 0x10;
2162 	bp->mii_lpa = MII_LPA + 0x10;
2163 	bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2164 
2165 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2166 	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2167 
2168 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2169 	if (reset_phy)
2170 		bnx2_reset_phy(bp);
2171 
2172 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2173 
2174 	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2175 	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2176 	val |= MII_BNX2_SD_1000XCTL1_FIBER;
2177 	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2178 
2179 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2180 	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2181 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2182 		val |= BCM5708S_UP1_2G5;
2183 	else
2184 		val &= ~BCM5708S_UP1_2G5;
2185 	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2186 
2187 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2188 	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2189 	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2190 	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2191 
2192 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2193 
2194 	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2195 	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2196 	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2197 
2198 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2199 
2200 	return 0;
2201 }
2202 
2203 static int
2204 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2205 {
2206 	u32 val;
2207 
2208 	if (reset_phy)
2209 		bnx2_reset_phy(bp);
2210 
2211 	bp->mii_up1 = BCM5708S_UP1;
2212 
2213 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2214 	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2215 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2216 
2217 	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2218 	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2219 	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2220 
2221 	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2222 	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2223 	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2224 
2225 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2226 		bnx2_read_phy(bp, BCM5708S_UP1, &val);
2227 		val |= BCM5708S_UP1_2G5;
2228 		bnx2_write_phy(bp, BCM5708S_UP1, val);
2229 	}
2230 
2231 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2232 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2233 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2234 		/* increase tx signal amplitude */
2235 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2236 			       BCM5708S_BLK_ADDR_TX_MISC);
2237 		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2238 		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2239 		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2240 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2241 	}
2242 
2243 	val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2244 	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2245 
2246 	if (val) {
2247 		u32 is_backplane;
2248 
2249 		is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2250 		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2251 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2252 				       BCM5708S_BLK_ADDR_TX_MISC);
2253 			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2254 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2255 				       BCM5708S_BLK_ADDR_DIG);
2256 		}
2257 	}
2258 	return 0;
2259 }
2260 
2261 static int
2262 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2263 {
2264 	if (reset_phy)
2265 		bnx2_reset_phy(bp);
2266 
2267 	bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2268 
2269 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2270 		BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2271 
2272 	if (bp->dev->mtu > 1500) {
2273 		u32 val;
2274 
2275 		/* Set extended packet length bit */
2276 		bnx2_write_phy(bp, 0x18, 0x7);
2277 		bnx2_read_phy(bp, 0x18, &val);
2278 		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2279 
2280 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2281 		bnx2_read_phy(bp, 0x1c, &val);
2282 		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2283 	}
2284 	else {
2285 		u32 val;
2286 
2287 		bnx2_write_phy(bp, 0x18, 0x7);
2288 		bnx2_read_phy(bp, 0x18, &val);
2289 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2290 
2291 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2292 		bnx2_read_phy(bp, 0x1c, &val);
2293 		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2294 	}
2295 
2296 	return 0;
2297 }
2298 
2299 static int
2300 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2301 {
2302 	u32 val;
2303 
2304 	if (reset_phy)
2305 		bnx2_reset_phy(bp);
2306 
2307 	if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2308 		bnx2_write_phy(bp, 0x18, 0x0c00);
2309 		bnx2_write_phy(bp, 0x17, 0x000a);
2310 		bnx2_write_phy(bp, 0x15, 0x310b);
2311 		bnx2_write_phy(bp, 0x17, 0x201f);
2312 		bnx2_write_phy(bp, 0x15, 0x9506);
2313 		bnx2_write_phy(bp, 0x17, 0x401f);
2314 		bnx2_write_phy(bp, 0x15, 0x14e2);
2315 		bnx2_write_phy(bp, 0x18, 0x0400);
2316 	}
2317 
2318 	if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2319 		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2320 			       MII_BNX2_DSP_EXPAND_REG | 0x8);
2321 		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2322 		val &= ~(1 << 8);
2323 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2324 	}
2325 
2326 	if (bp->dev->mtu > 1500) {
2327 		/* Set extended packet length bit */
2328 		bnx2_write_phy(bp, 0x18, 0x7);
2329 		bnx2_read_phy(bp, 0x18, &val);
2330 		bnx2_write_phy(bp, 0x18, val | 0x4000);
2331 
2332 		bnx2_read_phy(bp, 0x10, &val);
2333 		bnx2_write_phy(bp, 0x10, val | 0x1);
2334 	}
2335 	else {
2336 		bnx2_write_phy(bp, 0x18, 0x7);
2337 		bnx2_read_phy(bp, 0x18, &val);
2338 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2339 
2340 		bnx2_read_phy(bp, 0x10, &val);
2341 		bnx2_write_phy(bp, 0x10, val & ~0x1);
2342 	}
2343 
2344 	/* ethernet@wirespeed */
2345 	bnx2_write_phy(bp, 0x18, 0x7007);
2346 	bnx2_read_phy(bp, 0x18, &val);
2347 	bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2348 	return 0;
2349 }
2350 
2351 
2352 static int
2353 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2354 __releases(&bp->phy_lock)
2355 __acquires(&bp->phy_lock)
2356 {
2357 	u32 val;
2358 	int rc = 0;
2359 
2360 	bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2361 	bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2362 
2363 	bp->mii_bmcr = MII_BMCR;
2364 	bp->mii_bmsr = MII_BMSR;
2365 	bp->mii_bmsr1 = MII_BMSR;
2366 	bp->mii_adv = MII_ADVERTISE;
2367 	bp->mii_lpa = MII_LPA;
2368 
2369 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2370 
2371 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2372 		goto setup_phy;
2373 
2374 	bnx2_read_phy(bp, MII_PHYSID1, &val);
2375 	bp->phy_id = val << 16;
2376 	bnx2_read_phy(bp, MII_PHYSID2, &val);
2377 	bp->phy_id |= val & 0xffff;
2378 
2379 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2380 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2381 			rc = bnx2_init_5706s_phy(bp, reset_phy);
2382 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2383 			rc = bnx2_init_5708s_phy(bp, reset_phy);
2384 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2385 			rc = bnx2_init_5709s_phy(bp, reset_phy);
2386 	}
2387 	else {
2388 		rc = bnx2_init_copper_phy(bp, reset_phy);
2389 	}
2390 
2391 setup_phy:
2392 	if (!rc)
2393 		rc = bnx2_setup_phy(bp, bp->phy_port);
2394 
2395 	return rc;
2396 }
2397 
2398 static int
2399 bnx2_set_mac_loopback(struct bnx2 *bp)
2400 {
2401 	u32 mac_mode;
2402 
2403 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2404 	mac_mode &= ~BNX2_EMAC_MODE_PORT;
2405 	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2406 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2407 	bp->link_up = 1;
2408 	return 0;
2409 }
2410 
2411 static int bnx2_test_link(struct bnx2 *);
2412 
2413 static int
2414 bnx2_set_phy_loopback(struct bnx2 *bp)
2415 {
2416 	u32 mac_mode;
2417 	int rc, i;
2418 
2419 	spin_lock_bh(&bp->phy_lock);
2420 	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2421 			    BMCR_SPEED1000);
2422 	spin_unlock_bh(&bp->phy_lock);
2423 	if (rc)
2424 		return rc;
2425 
2426 	for (i = 0; i < 10; i++) {
2427 		if (bnx2_test_link(bp) == 0)
2428 			break;
2429 		msleep(100);
2430 	}
2431 
2432 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2433 	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2434 		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2435 		      BNX2_EMAC_MODE_25G_MODE);
2436 
2437 	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2438 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2439 	bp->link_up = 1;
2440 	return 0;
2441 }
2442 
2443 static void
2444 bnx2_dump_mcp_state(struct bnx2 *bp)
2445 {
2446 	struct net_device *dev = bp->dev;
2447 	u32 mcp_p0, mcp_p1;
2448 
2449 	netdev_err(dev, "<--- start MCP states dump --->\n");
2450 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2451 		mcp_p0 = BNX2_MCP_STATE_P0;
2452 		mcp_p1 = BNX2_MCP_STATE_P1;
2453 	} else {
2454 		mcp_p0 = BNX2_MCP_STATE_P0_5708;
2455 		mcp_p1 = BNX2_MCP_STATE_P1_5708;
2456 	}
2457 	netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2458 		   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2459 	netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2460 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2461 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2462 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2463 	netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2464 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2465 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2466 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2467 	netdev_err(dev, "DEBUG: shmem states:\n");
2468 	netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2469 		   bnx2_shmem_rd(bp, BNX2_DRV_MB),
2470 		   bnx2_shmem_rd(bp, BNX2_FW_MB),
2471 		   bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2472 	pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2473 	netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2474 		   bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2475 		   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2476 	pr_cont(" condition[%08x]\n",
2477 		bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2478 	DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2479 	DP_SHMEM_LINE(bp, 0x3cc);
2480 	DP_SHMEM_LINE(bp, 0x3dc);
2481 	DP_SHMEM_LINE(bp, 0x3ec);
2482 	netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2483 	netdev_err(dev, "<--- end MCP states dump --->\n");
2484 }
2485 
2486 static int
2487 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2488 {
2489 	int i;
2490 	u32 val;
2491 
2492 	bp->fw_wr_seq++;
2493 	msg_data |= bp->fw_wr_seq;
2494 
2495 	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2496 
2497 	if (!ack)
2498 		return 0;
2499 
2500 	/* wait for an acknowledgement. */
2501 	for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2502 		msleep(10);
2503 
2504 		val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2505 
2506 		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2507 			break;
2508 	}
2509 	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2510 		return 0;
2511 
2512 	/* If we timed out, inform the firmware that this is the case. */
2513 	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2514 		msg_data &= ~BNX2_DRV_MSG_CODE;
2515 		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2516 
2517 		bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2518 		if (!silent) {
2519 			pr_err("fw sync timeout, reset code = %x\n", msg_data);
2520 			bnx2_dump_mcp_state(bp);
2521 		}
2522 
2523 		return -EBUSY;
2524 	}
2525 
2526 	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2527 		return -EIO;
2528 
2529 	return 0;
2530 }
2531 
2532 static int
2533 bnx2_init_5709_context(struct bnx2 *bp)
2534 {
2535 	int i, ret = 0;
2536 	u32 val;
2537 
2538 	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2539 	val |= (BNX2_PAGE_BITS - 8) << 16;
2540 	BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2541 	for (i = 0; i < 10; i++) {
2542 		val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2543 		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2544 			break;
2545 		udelay(2);
2546 	}
2547 	if (val & BNX2_CTX_COMMAND_MEM_INIT)
2548 		return -EBUSY;
2549 
2550 	for (i = 0; i < bp->ctx_pages; i++) {
2551 		int j;
2552 
2553 		if (bp->ctx_blk[i])
2554 			memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2555 		else
2556 			return -ENOMEM;
2557 
2558 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2559 			(bp->ctx_blk_mapping[i] & 0xffffffff) |
2560 			BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2561 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2562 			(u64) bp->ctx_blk_mapping[i] >> 32);
2563 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2564 			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2565 		for (j = 0; j < 10; j++) {
2566 
2567 			val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2568 			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2569 				break;
2570 			udelay(5);
2571 		}
2572 		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2573 			ret = -EBUSY;
2574 			break;
2575 		}
2576 	}
2577 	return ret;
2578 }
2579 
2580 static void
2581 bnx2_init_context(struct bnx2 *bp)
2582 {
2583 	u32 vcid;
2584 
2585 	vcid = 96;
2586 	while (vcid) {
2587 		u32 vcid_addr, pcid_addr, offset;
2588 		int i;
2589 
2590 		vcid--;
2591 
2592 		if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2593 			u32 new_vcid;
2594 
2595 			vcid_addr = GET_PCID_ADDR(vcid);
2596 			if (vcid & 0x8) {
2597 				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2598 			}
2599 			else {
2600 				new_vcid = vcid;
2601 			}
2602 			pcid_addr = GET_PCID_ADDR(new_vcid);
2603 		}
2604 		else {
2605 	    		vcid_addr = GET_CID_ADDR(vcid);
2606 			pcid_addr = vcid_addr;
2607 		}
2608 
2609 		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2610 			vcid_addr += (i << PHY_CTX_SHIFT);
2611 			pcid_addr += (i << PHY_CTX_SHIFT);
2612 
2613 			BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2614 			BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2615 
2616 			/* Zero out the context. */
2617 			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2618 				bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2619 		}
2620 	}
2621 }
2622 
2623 static int
2624 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2625 {
2626 	u16 *good_mbuf;
2627 	u32 good_mbuf_cnt;
2628 	u32 val;
2629 
2630 	good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2631 	if (good_mbuf == NULL)
2632 		return -ENOMEM;
2633 
2634 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2635 		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2636 
2637 	good_mbuf_cnt = 0;
2638 
2639 	/* Allocate a bunch of mbufs and save the good ones in an array. */
2640 	val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2641 	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2642 		bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2643 				BNX2_RBUF_COMMAND_ALLOC_REQ);
2644 
2645 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2646 
2647 		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2648 
2649 		/* The addresses with Bit 9 set are bad memory blocks. */
2650 		if (!(val & (1 << 9))) {
2651 			good_mbuf[good_mbuf_cnt] = (u16) val;
2652 			good_mbuf_cnt++;
2653 		}
2654 
2655 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2656 	}
2657 
2658 	/* Free the good ones back to the mbuf pool thus discarding
2659 	 * all the bad ones. */
2660 	while (good_mbuf_cnt) {
2661 		good_mbuf_cnt--;
2662 
2663 		val = good_mbuf[good_mbuf_cnt];
2664 		val = (val << 9) | val | 1;
2665 
2666 		bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2667 	}
2668 	kfree(good_mbuf);
2669 	return 0;
2670 }
2671 
2672 static void
2673 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2674 {
2675 	u32 val;
2676 
2677 	val = (mac_addr[0] << 8) | mac_addr[1];
2678 
2679 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2680 
2681 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2682 		(mac_addr[4] << 8) | mac_addr[5];
2683 
2684 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2685 }
2686 
2687 static inline int
2688 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2689 {
2690 	dma_addr_t mapping;
2691 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2692 	struct bnx2_rx_bd *rxbd =
2693 		&rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2694 	struct page *page = alloc_page(gfp);
2695 
2696 	if (!page)
2697 		return -ENOMEM;
2698 	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2699 			       PCI_DMA_FROMDEVICE);
2700 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2701 		__free_page(page);
2702 		return -EIO;
2703 	}
2704 
2705 	rx_pg->page = page;
2706 	dma_unmap_addr_set(rx_pg, mapping, mapping);
2707 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2708 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2709 	return 0;
2710 }
2711 
2712 static void
2713 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2714 {
2715 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2716 	struct page *page = rx_pg->page;
2717 
2718 	if (!page)
2719 		return;
2720 
2721 	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2722 		       PAGE_SIZE, PCI_DMA_FROMDEVICE);
2723 
2724 	__free_page(page);
2725 	rx_pg->page = NULL;
2726 }
2727 
2728 static inline int
2729 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2730 {
2731 	u8 *data;
2732 	struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2733 	dma_addr_t mapping;
2734 	struct bnx2_rx_bd *rxbd =
2735 		&rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2736 
2737 	data = kmalloc(bp->rx_buf_size, gfp);
2738 	if (!data)
2739 		return -ENOMEM;
2740 
2741 	mapping = dma_map_single(&bp->pdev->dev,
2742 				 get_l2_fhdr(data),
2743 				 bp->rx_buf_use_size,
2744 				 PCI_DMA_FROMDEVICE);
2745 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2746 		kfree(data);
2747 		return -EIO;
2748 	}
2749 
2750 	rx_buf->data = data;
2751 	dma_unmap_addr_set(rx_buf, mapping, mapping);
2752 
2753 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2754 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2755 
2756 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2757 
2758 	return 0;
2759 }
2760 
2761 static int
2762 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2763 {
2764 	struct status_block *sblk = bnapi->status_blk.msi;
2765 	u32 new_link_state, old_link_state;
2766 	int is_set = 1;
2767 
2768 	new_link_state = sblk->status_attn_bits & event;
2769 	old_link_state = sblk->status_attn_bits_ack & event;
2770 	if (new_link_state != old_link_state) {
2771 		if (new_link_state)
2772 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2773 		else
2774 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2775 	} else
2776 		is_set = 0;
2777 
2778 	return is_set;
2779 }
2780 
2781 static void
2782 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2783 {
2784 	spin_lock(&bp->phy_lock);
2785 
2786 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2787 		bnx2_set_link(bp);
2788 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2789 		bnx2_set_remote_link(bp);
2790 
2791 	spin_unlock(&bp->phy_lock);
2792 
2793 }
2794 
2795 static inline u16
2796 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2797 {
2798 	u16 cons;
2799 
2800 	/* Tell compiler that status block fields can change. */
2801 	barrier();
2802 	cons = *bnapi->hw_tx_cons_ptr;
2803 	barrier();
2804 	if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2805 		cons++;
2806 	return cons;
2807 }
2808 
2809 static int
2810 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2811 {
2812 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2813 	u16 hw_cons, sw_cons, sw_ring_cons;
2814 	int tx_pkt = 0, index;
2815 	unsigned int tx_bytes = 0;
2816 	struct netdev_queue *txq;
2817 
2818 	index = (bnapi - bp->bnx2_napi);
2819 	txq = netdev_get_tx_queue(bp->dev, index);
2820 
2821 	hw_cons = bnx2_get_hw_tx_cons(bnapi);
2822 	sw_cons = txr->tx_cons;
2823 
2824 	while (sw_cons != hw_cons) {
2825 		struct bnx2_sw_tx_bd *tx_buf;
2826 		struct sk_buff *skb;
2827 		int i, last;
2828 
2829 		sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2830 
2831 		tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2832 		skb = tx_buf->skb;
2833 
2834 		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2835 		prefetch(&skb->end);
2836 
2837 		/* partial BD completions possible with TSO packets */
2838 		if (tx_buf->is_gso) {
2839 			u16 last_idx, last_ring_idx;
2840 
2841 			last_idx = sw_cons + tx_buf->nr_frags + 1;
2842 			last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2843 			if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2844 				last_idx++;
2845 			}
2846 			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2847 				break;
2848 			}
2849 		}
2850 
2851 		dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2852 			skb_headlen(skb), PCI_DMA_TODEVICE);
2853 
2854 		tx_buf->skb = NULL;
2855 		last = tx_buf->nr_frags;
2856 
2857 		for (i = 0; i < last; i++) {
2858 			struct bnx2_sw_tx_bd *tx_buf;
2859 
2860 			sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2861 
2862 			tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2863 			dma_unmap_page(&bp->pdev->dev,
2864 				dma_unmap_addr(tx_buf, mapping),
2865 				skb_frag_size(&skb_shinfo(skb)->frags[i]),
2866 				PCI_DMA_TODEVICE);
2867 		}
2868 
2869 		sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2870 
2871 		tx_bytes += skb->len;
2872 		dev_kfree_skb(skb);
2873 		tx_pkt++;
2874 		if (tx_pkt == budget)
2875 			break;
2876 
2877 		if (hw_cons == sw_cons)
2878 			hw_cons = bnx2_get_hw_tx_cons(bnapi);
2879 	}
2880 
2881 	netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2882 	txr->hw_tx_cons = hw_cons;
2883 	txr->tx_cons = sw_cons;
2884 
2885 	/* Need to make the tx_cons update visible to bnx2_start_xmit()
2886 	 * before checking for netif_tx_queue_stopped().  Without the
2887 	 * memory barrier, there is a small possibility that bnx2_start_xmit()
2888 	 * will miss it and cause the queue to be stopped forever.
2889 	 */
2890 	smp_mb();
2891 
2892 	if (unlikely(netif_tx_queue_stopped(txq)) &&
2893 		     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2894 		__netif_tx_lock(txq, smp_processor_id());
2895 		if ((netif_tx_queue_stopped(txq)) &&
2896 		    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2897 			netif_tx_wake_queue(txq);
2898 		__netif_tx_unlock(txq);
2899 	}
2900 
2901 	return tx_pkt;
2902 }
2903 
2904 static void
2905 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2906 			struct sk_buff *skb, int count)
2907 {
2908 	struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2909 	struct bnx2_rx_bd *cons_bd, *prod_bd;
2910 	int i;
2911 	u16 hw_prod, prod;
2912 	u16 cons = rxr->rx_pg_cons;
2913 
2914 	cons_rx_pg = &rxr->rx_pg_ring[cons];
2915 
2916 	/* The caller was unable to allocate a new page to replace the
2917 	 * last one in the frags array, so we need to recycle that page
2918 	 * and then free the skb.
2919 	 */
2920 	if (skb) {
2921 		struct page *page;
2922 		struct skb_shared_info *shinfo;
2923 
2924 		shinfo = skb_shinfo(skb);
2925 		shinfo->nr_frags--;
2926 		page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2927 		__skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2928 
2929 		cons_rx_pg->page = page;
2930 		dev_kfree_skb(skb);
2931 	}
2932 
2933 	hw_prod = rxr->rx_pg_prod;
2934 
2935 	for (i = 0; i < count; i++) {
2936 		prod = BNX2_RX_PG_RING_IDX(hw_prod);
2937 
2938 		prod_rx_pg = &rxr->rx_pg_ring[prod];
2939 		cons_rx_pg = &rxr->rx_pg_ring[cons];
2940 		cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2941 						[BNX2_RX_IDX(cons)];
2942 		prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2943 						[BNX2_RX_IDX(prod)];
2944 
2945 		if (prod != cons) {
2946 			prod_rx_pg->page = cons_rx_pg->page;
2947 			cons_rx_pg->page = NULL;
2948 			dma_unmap_addr_set(prod_rx_pg, mapping,
2949 				dma_unmap_addr(cons_rx_pg, mapping));
2950 
2951 			prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2952 			prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2953 
2954 		}
2955 		cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2956 		hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2957 	}
2958 	rxr->rx_pg_prod = hw_prod;
2959 	rxr->rx_pg_cons = cons;
2960 }
2961 
2962 static inline void
2963 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2964 		   u8 *data, u16 cons, u16 prod)
2965 {
2966 	struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2967 	struct bnx2_rx_bd *cons_bd, *prod_bd;
2968 
2969 	cons_rx_buf = &rxr->rx_buf_ring[cons];
2970 	prod_rx_buf = &rxr->rx_buf_ring[prod];
2971 
2972 	dma_sync_single_for_device(&bp->pdev->dev,
2973 		dma_unmap_addr(cons_rx_buf, mapping),
2974 		BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2975 
2976 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2977 
2978 	prod_rx_buf->data = data;
2979 
2980 	if (cons == prod)
2981 		return;
2982 
2983 	dma_unmap_addr_set(prod_rx_buf, mapping,
2984 			dma_unmap_addr(cons_rx_buf, mapping));
2985 
2986 	cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
2987 	prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
2988 	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2989 	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2990 }
2991 
2992 static struct sk_buff *
2993 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
2994 	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2995 	    u32 ring_idx)
2996 {
2997 	int err;
2998 	u16 prod = ring_idx & 0xffff;
2999 	struct sk_buff *skb;
3000 
3001 	err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3002 	if (unlikely(err)) {
3003 		bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3004 error:
3005 		if (hdr_len) {
3006 			unsigned int raw_len = len + 4;
3007 			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3008 
3009 			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3010 		}
3011 		return NULL;
3012 	}
3013 
3014 	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3015 			 PCI_DMA_FROMDEVICE);
3016 	skb = build_skb(data, 0);
3017 	if (!skb) {
3018 		kfree(data);
3019 		goto error;
3020 	}
3021 	skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3022 	if (hdr_len == 0) {
3023 		skb_put(skb, len);
3024 		return skb;
3025 	} else {
3026 		unsigned int i, frag_len, frag_size, pages;
3027 		struct bnx2_sw_pg *rx_pg;
3028 		u16 pg_cons = rxr->rx_pg_cons;
3029 		u16 pg_prod = rxr->rx_pg_prod;
3030 
3031 		frag_size = len + 4 - hdr_len;
3032 		pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3033 		skb_put(skb, hdr_len);
3034 
3035 		for (i = 0; i < pages; i++) {
3036 			dma_addr_t mapping_old;
3037 
3038 			frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3039 			if (unlikely(frag_len <= 4)) {
3040 				unsigned int tail = 4 - frag_len;
3041 
3042 				rxr->rx_pg_cons = pg_cons;
3043 				rxr->rx_pg_prod = pg_prod;
3044 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3045 							pages - i);
3046 				skb->len -= tail;
3047 				if (i == 0) {
3048 					skb->tail -= tail;
3049 				} else {
3050 					skb_frag_t *frag =
3051 						&skb_shinfo(skb)->frags[i - 1];
3052 					skb_frag_size_sub(frag, tail);
3053 					skb->data_len -= tail;
3054 				}
3055 				return skb;
3056 			}
3057 			rx_pg = &rxr->rx_pg_ring[pg_cons];
3058 
3059 			/* Don't unmap yet.  If we're unable to allocate a new
3060 			 * page, we need to recycle the page and the DMA addr.
3061 			 */
3062 			mapping_old = dma_unmap_addr(rx_pg, mapping);
3063 			if (i == pages - 1)
3064 				frag_len -= 4;
3065 
3066 			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3067 			rx_pg->page = NULL;
3068 
3069 			err = bnx2_alloc_rx_page(bp, rxr,
3070 						 BNX2_RX_PG_RING_IDX(pg_prod),
3071 						 GFP_ATOMIC);
3072 			if (unlikely(err)) {
3073 				rxr->rx_pg_cons = pg_cons;
3074 				rxr->rx_pg_prod = pg_prod;
3075 				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3076 							pages - i);
3077 				return NULL;
3078 			}
3079 
3080 			dma_unmap_page(&bp->pdev->dev, mapping_old,
3081 				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
3082 
3083 			frag_size -= frag_len;
3084 			skb->data_len += frag_len;
3085 			skb->truesize += PAGE_SIZE;
3086 			skb->len += frag_len;
3087 
3088 			pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3089 			pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3090 		}
3091 		rxr->rx_pg_prod = pg_prod;
3092 		rxr->rx_pg_cons = pg_cons;
3093 	}
3094 	return skb;
3095 }
3096 
3097 static inline u16
3098 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3099 {
3100 	u16 cons;
3101 
3102 	/* Tell compiler that status block fields can change. */
3103 	barrier();
3104 	cons = *bnapi->hw_rx_cons_ptr;
3105 	barrier();
3106 	if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3107 		cons++;
3108 	return cons;
3109 }
3110 
3111 static int
3112 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3113 {
3114 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3115 	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3116 	struct l2_fhdr *rx_hdr;
3117 	int rx_pkt = 0, pg_ring_used = 0;
3118 
3119 	hw_cons = bnx2_get_hw_rx_cons(bnapi);
3120 	sw_cons = rxr->rx_cons;
3121 	sw_prod = rxr->rx_prod;
3122 
3123 	/* Memory barrier necessary as speculative reads of the rx
3124 	 * buffer can be ahead of the index in the status block
3125 	 */
3126 	rmb();
3127 	while (sw_cons != hw_cons) {
3128 		unsigned int len, hdr_len;
3129 		u32 status;
3130 		struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3131 		struct sk_buff *skb;
3132 		dma_addr_t dma_addr;
3133 		u8 *data;
3134 		u16 next_ring_idx;
3135 
3136 		sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3137 		sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3138 
3139 		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3140 		data = rx_buf->data;
3141 		rx_buf->data = NULL;
3142 
3143 		rx_hdr = get_l2_fhdr(data);
3144 		prefetch(rx_hdr);
3145 
3146 		dma_addr = dma_unmap_addr(rx_buf, mapping);
3147 
3148 		dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3149 			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3150 			PCI_DMA_FROMDEVICE);
3151 
3152 		next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3153 		next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3154 		prefetch(get_l2_fhdr(next_rx_buf->data));
3155 
3156 		len = rx_hdr->l2_fhdr_pkt_len;
3157 		status = rx_hdr->l2_fhdr_status;
3158 
3159 		hdr_len = 0;
3160 		if (status & L2_FHDR_STATUS_SPLIT) {
3161 			hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3162 			pg_ring_used = 1;
3163 		} else if (len > bp->rx_jumbo_thresh) {
3164 			hdr_len = bp->rx_jumbo_thresh;
3165 			pg_ring_used = 1;
3166 		}
3167 
3168 		if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3169 				       L2_FHDR_ERRORS_PHY_DECODE |
3170 				       L2_FHDR_ERRORS_ALIGNMENT |
3171 				       L2_FHDR_ERRORS_TOO_SHORT |
3172 				       L2_FHDR_ERRORS_GIANT_FRAME))) {
3173 
3174 			bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3175 					  sw_ring_prod);
3176 			if (pg_ring_used) {
3177 				int pages;
3178 
3179 				pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3180 
3181 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3182 			}
3183 			goto next_rx;
3184 		}
3185 
3186 		len -= 4;
3187 
3188 		if (len <= bp->rx_copy_thresh) {
3189 			skb = netdev_alloc_skb(bp->dev, len + 6);
3190 			if (skb == NULL) {
3191 				bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3192 						  sw_ring_prod);
3193 				goto next_rx;
3194 			}
3195 
3196 			/* aligned copy */
3197 			memcpy(skb->data,
3198 			       (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3199 			       len + 6);
3200 			skb_reserve(skb, 6);
3201 			skb_put(skb, len);
3202 
3203 			bnx2_reuse_rx_data(bp, rxr, data,
3204 				sw_ring_cons, sw_ring_prod);
3205 
3206 		} else {
3207 			skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3208 					  (sw_ring_cons << 16) | sw_ring_prod);
3209 			if (!skb)
3210 				goto next_rx;
3211 		}
3212 		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3213 		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3214 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3215 
3216 		skb->protocol = eth_type_trans(skb, bp->dev);
3217 
3218 		if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3219 			(ntohs(skb->protocol) != 0x8100)) {
3220 
3221 			dev_kfree_skb(skb);
3222 			goto next_rx;
3223 
3224 		}
3225 
3226 		skb_checksum_none_assert(skb);
3227 		if ((bp->dev->features & NETIF_F_RXCSUM) &&
3228 			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
3229 			L2_FHDR_STATUS_UDP_DATAGRAM))) {
3230 
3231 			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3232 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3233 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3234 		}
3235 		if ((bp->dev->features & NETIF_F_RXHASH) &&
3236 		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3237 		     L2_FHDR_STATUS_USE_RXHASH))
3238 			skb->rxhash = rx_hdr->l2_fhdr_hash;
3239 
3240 		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3241 		napi_gro_receive(&bnapi->napi, skb);
3242 		rx_pkt++;
3243 
3244 next_rx:
3245 		sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3246 		sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3247 
3248 		if ((rx_pkt == budget))
3249 			break;
3250 
3251 		/* Refresh hw_cons to see if there is new work */
3252 		if (sw_cons == hw_cons) {
3253 			hw_cons = bnx2_get_hw_rx_cons(bnapi);
3254 			rmb();
3255 		}
3256 	}
3257 	rxr->rx_cons = sw_cons;
3258 	rxr->rx_prod = sw_prod;
3259 
3260 	if (pg_ring_used)
3261 		BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3262 
3263 	BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3264 
3265 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3266 
3267 	mmiowb();
3268 
3269 	return rx_pkt;
3270 
3271 }
3272 
3273 /* MSI ISR - The only difference between this and the INTx ISR
3274  * is that the MSI interrupt is always serviced.
3275  */
3276 static irqreturn_t
3277 bnx2_msi(int irq, void *dev_instance)
3278 {
3279 	struct bnx2_napi *bnapi = dev_instance;
3280 	struct bnx2 *bp = bnapi->bp;
3281 
3282 	prefetch(bnapi->status_blk.msi);
3283 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3284 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3285 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3286 
3287 	/* Return here if interrupt is disabled. */
3288 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3289 		return IRQ_HANDLED;
3290 
3291 	napi_schedule(&bnapi->napi);
3292 
3293 	return IRQ_HANDLED;
3294 }
3295 
3296 static irqreturn_t
3297 bnx2_msi_1shot(int irq, void *dev_instance)
3298 {
3299 	struct bnx2_napi *bnapi = dev_instance;
3300 	struct bnx2 *bp = bnapi->bp;
3301 
3302 	prefetch(bnapi->status_blk.msi);
3303 
3304 	/* Return here if interrupt is disabled. */
3305 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3306 		return IRQ_HANDLED;
3307 
3308 	napi_schedule(&bnapi->napi);
3309 
3310 	return IRQ_HANDLED;
3311 }
3312 
3313 static irqreturn_t
3314 bnx2_interrupt(int irq, void *dev_instance)
3315 {
3316 	struct bnx2_napi *bnapi = dev_instance;
3317 	struct bnx2 *bp = bnapi->bp;
3318 	struct status_block *sblk = bnapi->status_blk.msi;
3319 
3320 	/* When using INTx, it is possible for the interrupt to arrive
3321 	 * at the CPU before the status block posted prior to the
3322 	 * interrupt. Reading a register will flush the status block.
3323 	 * When using MSI, the MSI message will always complete after
3324 	 * the status block write.
3325 	 */
3326 	if ((sblk->status_idx == bnapi->last_status_idx) &&
3327 	    (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3328 	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3329 		return IRQ_NONE;
3330 
3331 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3332 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3333 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3334 
3335 	/* Read back to deassert IRQ immediately to avoid too many
3336 	 * spurious interrupts.
3337 	 */
3338 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3339 
3340 	/* Return here if interrupt is shared and is disabled. */
3341 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3342 		return IRQ_HANDLED;
3343 
3344 	if (napi_schedule_prep(&bnapi->napi)) {
3345 		bnapi->last_status_idx = sblk->status_idx;
3346 		__napi_schedule(&bnapi->napi);
3347 	}
3348 
3349 	return IRQ_HANDLED;
3350 }
3351 
3352 static inline int
3353 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3354 {
3355 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3356 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3357 
3358 	if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3359 	    (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3360 		return 1;
3361 	return 0;
3362 }
3363 
3364 #define STATUS_ATTN_EVENTS	(STATUS_ATTN_BITS_LINK_STATE | \
3365 				 STATUS_ATTN_BITS_TIMER_ABORT)
3366 
3367 static inline int
3368 bnx2_has_work(struct bnx2_napi *bnapi)
3369 {
3370 	struct status_block *sblk = bnapi->status_blk.msi;
3371 
3372 	if (bnx2_has_fast_work(bnapi))
3373 		return 1;
3374 
3375 #ifdef BCM_CNIC
3376 	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3377 		return 1;
3378 #endif
3379 
3380 	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3381 	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3382 		return 1;
3383 
3384 	return 0;
3385 }
3386 
3387 static void
3388 bnx2_chk_missed_msi(struct bnx2 *bp)
3389 {
3390 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3391 	u32 msi_ctrl;
3392 
3393 	if (bnx2_has_work(bnapi)) {
3394 		msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3395 		if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3396 			return;
3397 
3398 		if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3399 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3400 				~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3401 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3402 			bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3403 		}
3404 	}
3405 
3406 	bp->idle_chk_status_idx = bnapi->last_status_idx;
3407 }
3408 
3409 #ifdef BCM_CNIC
3410 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3411 {
3412 	struct cnic_ops *c_ops;
3413 
3414 	if (!bnapi->cnic_present)
3415 		return;
3416 
3417 	rcu_read_lock();
3418 	c_ops = rcu_dereference(bp->cnic_ops);
3419 	if (c_ops)
3420 		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3421 						      bnapi->status_blk.msi);
3422 	rcu_read_unlock();
3423 }
3424 #endif
3425 
3426 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3427 {
3428 	struct status_block *sblk = bnapi->status_blk.msi;
3429 	u32 status_attn_bits = sblk->status_attn_bits;
3430 	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3431 
3432 	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3433 	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3434 
3435 		bnx2_phy_int(bp, bnapi);
3436 
3437 		/* This is needed to take care of transient status
3438 		 * during link changes.
3439 		 */
3440 		BNX2_WR(bp, BNX2_HC_COMMAND,
3441 			bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3442 		BNX2_RD(bp, BNX2_HC_COMMAND);
3443 	}
3444 }
3445 
3446 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3447 			  int work_done, int budget)
3448 {
3449 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3450 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3451 
3452 	if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3453 		bnx2_tx_int(bp, bnapi, 0);
3454 
3455 	if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3456 		work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3457 
3458 	return work_done;
3459 }
3460 
3461 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3462 {
3463 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3464 	struct bnx2 *bp = bnapi->bp;
3465 	int work_done = 0;
3466 	struct status_block_msix *sblk = bnapi->status_blk.msix;
3467 
3468 	while (1) {
3469 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3470 		if (unlikely(work_done >= budget))
3471 			break;
3472 
3473 		bnapi->last_status_idx = sblk->status_idx;
3474 		/* status idx must be read before checking for more work. */
3475 		rmb();
3476 		if (likely(!bnx2_has_fast_work(bnapi))) {
3477 
3478 			napi_complete(napi);
3479 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3480 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3481 				bnapi->last_status_idx);
3482 			break;
3483 		}
3484 	}
3485 	return work_done;
3486 }
3487 
3488 static int bnx2_poll(struct napi_struct *napi, int budget)
3489 {
3490 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3491 	struct bnx2 *bp = bnapi->bp;
3492 	int work_done = 0;
3493 	struct status_block *sblk = bnapi->status_blk.msi;
3494 
3495 	while (1) {
3496 		bnx2_poll_link(bp, bnapi);
3497 
3498 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3499 
3500 #ifdef BCM_CNIC
3501 		bnx2_poll_cnic(bp, bnapi);
3502 #endif
3503 
3504 		/* bnapi->last_status_idx is used below to tell the hw how
3505 		 * much work has been processed, so we must read it before
3506 		 * checking for more work.
3507 		 */
3508 		bnapi->last_status_idx = sblk->status_idx;
3509 
3510 		if (unlikely(work_done >= budget))
3511 			break;
3512 
3513 		rmb();
3514 		if (likely(!bnx2_has_work(bnapi))) {
3515 			napi_complete(napi);
3516 			if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3517 				BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3518 					BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3519 					bnapi->last_status_idx);
3520 				break;
3521 			}
3522 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3523 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3524 				BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3525 				bnapi->last_status_idx);
3526 
3527 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3528 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3529 				bnapi->last_status_idx);
3530 			break;
3531 		}
3532 	}
3533 
3534 	return work_done;
3535 }
3536 
3537 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3538  * from set_multicast.
3539  */
3540 static void
3541 bnx2_set_rx_mode(struct net_device *dev)
3542 {
3543 	struct bnx2 *bp = netdev_priv(dev);
3544 	u32 rx_mode, sort_mode;
3545 	struct netdev_hw_addr *ha;
3546 	int i;
3547 
3548 	if (!netif_running(dev))
3549 		return;
3550 
3551 	spin_lock_bh(&bp->phy_lock);
3552 
3553 	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3554 				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3555 	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3556 	if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3557 	     (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3558 		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3559 	if (dev->flags & IFF_PROMISC) {
3560 		/* Promiscuous mode. */
3561 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3562 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3563 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3564 	}
3565 	else if (dev->flags & IFF_ALLMULTI) {
3566 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3567 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3568 				0xffffffff);
3569         	}
3570 		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3571 	}
3572 	else {
3573 		/* Accept one or more multicast(s). */
3574 		u32 mc_filter[NUM_MC_HASH_REGISTERS];
3575 		u32 regidx;
3576 		u32 bit;
3577 		u32 crc;
3578 
3579 		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3580 
3581 		netdev_for_each_mc_addr(ha, dev) {
3582 			crc = ether_crc_le(ETH_ALEN, ha->addr);
3583 			bit = crc & 0xff;
3584 			regidx = (bit & 0xe0) >> 5;
3585 			bit &= 0x1f;
3586 			mc_filter[regidx] |= (1 << bit);
3587 		}
3588 
3589 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3590 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3591 				mc_filter[i]);
3592 		}
3593 
3594 		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3595 	}
3596 
3597 	if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3598 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3599 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3600 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3601 	} else if (!(dev->flags & IFF_PROMISC)) {
3602 		/* Add all entries into to the match filter list */
3603 		i = 0;
3604 		netdev_for_each_uc_addr(ha, dev) {
3605 			bnx2_set_mac_addr(bp, ha->addr,
3606 					  i + BNX2_START_UNICAST_ADDRESS_INDEX);
3607 			sort_mode |= (1 <<
3608 				      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3609 			i++;
3610 		}
3611 
3612 	}
3613 
3614 	if (rx_mode != bp->rx_mode) {
3615 		bp->rx_mode = rx_mode;
3616 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3617 	}
3618 
3619 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3620 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3621 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3622 
3623 	spin_unlock_bh(&bp->phy_lock);
3624 }
3625 
3626 static int
3627 check_fw_section(const struct firmware *fw,
3628 		 const struct bnx2_fw_file_section *section,
3629 		 u32 alignment, bool non_empty)
3630 {
3631 	u32 offset = be32_to_cpu(section->offset);
3632 	u32 len = be32_to_cpu(section->len);
3633 
3634 	if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3635 		return -EINVAL;
3636 	if ((non_empty && len == 0) || len > fw->size - offset ||
3637 	    len & (alignment - 1))
3638 		return -EINVAL;
3639 	return 0;
3640 }
3641 
3642 static int
3643 check_mips_fw_entry(const struct firmware *fw,
3644 		    const struct bnx2_mips_fw_file_entry *entry)
3645 {
3646 	if (check_fw_section(fw, &entry->text, 4, true) ||
3647 	    check_fw_section(fw, &entry->data, 4, false) ||
3648 	    check_fw_section(fw, &entry->rodata, 4, false))
3649 		return -EINVAL;
3650 	return 0;
3651 }
3652 
3653 static void bnx2_release_firmware(struct bnx2 *bp)
3654 {
3655 	if (bp->rv2p_firmware) {
3656 		release_firmware(bp->mips_firmware);
3657 		release_firmware(bp->rv2p_firmware);
3658 		bp->rv2p_firmware = NULL;
3659 	}
3660 }
3661 
3662 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3663 {
3664 	const char *mips_fw_file, *rv2p_fw_file;
3665 	const struct bnx2_mips_fw_file *mips_fw;
3666 	const struct bnx2_rv2p_fw_file *rv2p_fw;
3667 	int rc;
3668 
3669 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3670 		mips_fw_file = FW_MIPS_FILE_09;
3671 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3672 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3673 			rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3674 		else
3675 			rv2p_fw_file = FW_RV2P_FILE_09;
3676 	} else {
3677 		mips_fw_file = FW_MIPS_FILE_06;
3678 		rv2p_fw_file = FW_RV2P_FILE_06;
3679 	}
3680 
3681 	rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3682 	if (rc) {
3683 		pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3684 		goto out;
3685 	}
3686 
3687 	rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3688 	if (rc) {
3689 		pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3690 		goto err_release_mips_firmware;
3691 	}
3692 	mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3693 	rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3694 	if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3695 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3696 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3697 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3698 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3699 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3700 		pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3701 		rc = -EINVAL;
3702 		goto err_release_firmware;
3703 	}
3704 	if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3705 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3706 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3707 		pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3708 		rc = -EINVAL;
3709 		goto err_release_firmware;
3710 	}
3711 out:
3712 	return rc;
3713 
3714 err_release_firmware:
3715 	release_firmware(bp->rv2p_firmware);
3716 	bp->rv2p_firmware = NULL;
3717 err_release_mips_firmware:
3718 	release_firmware(bp->mips_firmware);
3719 	goto out;
3720 }
3721 
3722 static int bnx2_request_firmware(struct bnx2 *bp)
3723 {
3724 	return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3725 }
3726 
3727 static u32
3728 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3729 {
3730 	switch (idx) {
3731 	case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3732 		rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3733 		rv2p_code |= RV2P_BD_PAGE_SIZE;
3734 		break;
3735 	}
3736 	return rv2p_code;
3737 }
3738 
3739 static int
3740 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3741 	     const struct bnx2_rv2p_fw_file_entry *fw_entry)
3742 {
3743 	u32 rv2p_code_len, file_offset;
3744 	__be32 *rv2p_code;
3745 	int i;
3746 	u32 val, cmd, addr;
3747 
3748 	rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3749 	file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3750 
3751 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3752 
3753 	if (rv2p_proc == RV2P_PROC1) {
3754 		cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3755 		addr = BNX2_RV2P_PROC1_ADDR_CMD;
3756 	} else {
3757 		cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3758 		addr = BNX2_RV2P_PROC2_ADDR_CMD;
3759 	}
3760 
3761 	for (i = 0; i < rv2p_code_len; i += 8) {
3762 		BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3763 		rv2p_code++;
3764 		BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3765 		rv2p_code++;
3766 
3767 		val = (i / 8) | cmd;
3768 		BNX2_WR(bp, addr, val);
3769 	}
3770 
3771 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3772 	for (i = 0; i < 8; i++) {
3773 		u32 loc, code;
3774 
3775 		loc = be32_to_cpu(fw_entry->fixup[i]);
3776 		if (loc && ((loc * 4) < rv2p_code_len)) {
3777 			code = be32_to_cpu(*(rv2p_code + loc - 1));
3778 			BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3779 			code = be32_to_cpu(*(rv2p_code + loc));
3780 			code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3781 			BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3782 
3783 			val = (loc / 2) | cmd;
3784 			BNX2_WR(bp, addr, val);
3785 		}
3786 	}
3787 
3788 	/* Reset the processor, un-stall is done later. */
3789 	if (rv2p_proc == RV2P_PROC1) {
3790 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3791 	}
3792 	else {
3793 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3794 	}
3795 
3796 	return 0;
3797 }
3798 
3799 static int
3800 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3801 	    const struct bnx2_mips_fw_file_entry *fw_entry)
3802 {
3803 	u32 addr, len, file_offset;
3804 	__be32 *data;
3805 	u32 offset;
3806 	u32 val;
3807 
3808 	/* Halt the CPU. */
3809 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3810 	val |= cpu_reg->mode_value_halt;
3811 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3812 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3813 
3814 	/* Load the Text area. */
3815 	addr = be32_to_cpu(fw_entry->text.addr);
3816 	len = be32_to_cpu(fw_entry->text.len);
3817 	file_offset = be32_to_cpu(fw_entry->text.offset);
3818 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3819 
3820 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3821 	if (len) {
3822 		int j;
3823 
3824 		for (j = 0; j < (len / 4); j++, offset += 4)
3825 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3826 	}
3827 
3828 	/* Load the Data area. */
3829 	addr = be32_to_cpu(fw_entry->data.addr);
3830 	len = be32_to_cpu(fw_entry->data.len);
3831 	file_offset = be32_to_cpu(fw_entry->data.offset);
3832 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3833 
3834 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3835 	if (len) {
3836 		int j;
3837 
3838 		for (j = 0; j < (len / 4); j++, offset += 4)
3839 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3840 	}
3841 
3842 	/* Load the Read-Only area. */
3843 	addr = be32_to_cpu(fw_entry->rodata.addr);
3844 	len = be32_to_cpu(fw_entry->rodata.len);
3845 	file_offset = be32_to_cpu(fw_entry->rodata.offset);
3846 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3847 
3848 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3849 	if (len) {
3850 		int j;
3851 
3852 		for (j = 0; j < (len / 4); j++, offset += 4)
3853 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3854 	}
3855 
3856 	/* Clear the pre-fetch instruction. */
3857 	bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3858 
3859 	val = be32_to_cpu(fw_entry->start_addr);
3860 	bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3861 
3862 	/* Start the CPU. */
3863 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3864 	val &= ~cpu_reg->mode_value_halt;
3865 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3866 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3867 
3868 	return 0;
3869 }
3870 
3871 static int
3872 bnx2_init_cpus(struct bnx2 *bp)
3873 {
3874 	const struct bnx2_mips_fw_file *mips_fw =
3875 		(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3876 	const struct bnx2_rv2p_fw_file *rv2p_fw =
3877 		(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3878 	int rc;
3879 
3880 	/* Initialize the RV2P processor. */
3881 	load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3882 	load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3883 
3884 	/* Initialize the RX Processor. */
3885 	rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3886 	if (rc)
3887 		goto init_cpu_err;
3888 
3889 	/* Initialize the TX Processor. */
3890 	rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3891 	if (rc)
3892 		goto init_cpu_err;
3893 
3894 	/* Initialize the TX Patch-up Processor. */
3895 	rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3896 	if (rc)
3897 		goto init_cpu_err;
3898 
3899 	/* Initialize the Completion Processor. */
3900 	rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3901 	if (rc)
3902 		goto init_cpu_err;
3903 
3904 	/* Initialize the Command Processor. */
3905 	rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3906 
3907 init_cpu_err:
3908 	return rc;
3909 }
3910 
3911 static int
3912 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3913 {
3914 	u16 pmcsr;
3915 
3916 	pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3917 
3918 	switch (state) {
3919 	case PCI_D0: {
3920 		u32 val;
3921 
3922 		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3923 			(pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3924 			PCI_PM_CTRL_PME_STATUS);
3925 
3926 		if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3927 			/* delay required during transition out of D3hot */
3928 			msleep(20);
3929 
3930 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
3931 		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3932 		val &= ~BNX2_EMAC_MODE_MPKT;
3933 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
3934 
3935 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
3936 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3937 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
3938 		break;
3939 	}
3940 	case PCI_D3hot: {
3941 		int i;
3942 		u32 val, wol_msg;
3943 
3944 		if (bp->wol) {
3945 			u32 advertising;
3946 			u8 autoneg;
3947 
3948 			autoneg = bp->autoneg;
3949 			advertising = bp->advertising;
3950 
3951 			if (bp->phy_port == PORT_TP) {
3952 				bp->autoneg = AUTONEG_SPEED;
3953 				bp->advertising = ADVERTISED_10baseT_Half |
3954 					ADVERTISED_10baseT_Full |
3955 					ADVERTISED_100baseT_Half |
3956 					ADVERTISED_100baseT_Full |
3957 					ADVERTISED_Autoneg;
3958 			}
3959 
3960 			spin_lock_bh(&bp->phy_lock);
3961 			bnx2_setup_phy(bp, bp->phy_port);
3962 			spin_unlock_bh(&bp->phy_lock);
3963 
3964 			bp->autoneg = autoneg;
3965 			bp->advertising = advertising;
3966 
3967 			bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3968 
3969 			val = BNX2_RD(bp, BNX2_EMAC_MODE);
3970 
3971 			/* Enable port mode. */
3972 			val &= ~BNX2_EMAC_MODE_PORT;
3973 			val |= BNX2_EMAC_MODE_MPKT_RCVD |
3974 			       BNX2_EMAC_MODE_ACPI_RCVD |
3975 			       BNX2_EMAC_MODE_MPKT;
3976 			if (bp->phy_port == PORT_TP)
3977 				val |= BNX2_EMAC_MODE_PORT_MII;
3978 			else {
3979 				val |= BNX2_EMAC_MODE_PORT_GMII;
3980 				if (bp->line_speed == SPEED_2500)
3981 					val |= BNX2_EMAC_MODE_25G_MODE;
3982 			}
3983 
3984 			BNX2_WR(bp, BNX2_EMAC_MODE, val);
3985 
3986 			/* receive all multicast */
3987 			for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3988 				BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3989 					0xffffffff);
3990 			}
3991 			BNX2_WR(bp, BNX2_EMAC_RX_MODE,
3992 				BNX2_EMAC_RX_MODE_SORT_MODE);
3993 
3994 			val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3995 			      BNX2_RPM_SORT_USER0_MC_EN;
3996 			BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3997 			BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
3998 			BNX2_WR(bp, BNX2_RPM_SORT_USER0, val |
3999 				BNX2_RPM_SORT_USER0_ENA);
4000 
4001 			/* Need to enable EMAC and RPM for WOL. */
4002 			BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4003 				BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4004 				BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4005 				BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4006 
4007 			val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4008 			val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4009 			BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4010 
4011 			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4012 		}
4013 		else {
4014 			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4015 		}
4016 
4017 		if (!(bp->flags & BNX2_FLAG_NO_WOL))
4018 			bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
4019 				     1, 0);
4020 
4021 		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
4022 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4023 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4024 
4025 			if (bp->wol)
4026 				pmcsr |= 3;
4027 		}
4028 		else {
4029 			pmcsr |= 3;
4030 		}
4031 		if (bp->wol) {
4032 			pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4033 		}
4034 		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4035 				      pmcsr);
4036 
4037 		/* No more memory access after this point until
4038 		 * device is brought back to D0.
4039 		 */
4040 		udelay(50);
4041 		break;
4042 	}
4043 	default:
4044 		return -EINVAL;
4045 	}
4046 	return 0;
4047 }
4048 
4049 static int
4050 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4051 {
4052 	u32 val;
4053 	int j;
4054 
4055 	/* Request access to the flash interface. */
4056 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4057 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4058 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4059 		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4060 			break;
4061 
4062 		udelay(5);
4063 	}
4064 
4065 	if (j >= NVRAM_TIMEOUT_COUNT)
4066 		return -EBUSY;
4067 
4068 	return 0;
4069 }
4070 
4071 static int
4072 bnx2_release_nvram_lock(struct bnx2 *bp)
4073 {
4074 	int j;
4075 	u32 val;
4076 
4077 	/* Relinquish nvram interface. */
4078 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4079 
4080 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4081 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4082 		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4083 			break;
4084 
4085 		udelay(5);
4086 	}
4087 
4088 	if (j >= NVRAM_TIMEOUT_COUNT)
4089 		return -EBUSY;
4090 
4091 	return 0;
4092 }
4093 
4094 
4095 static int
4096 bnx2_enable_nvram_write(struct bnx2 *bp)
4097 {
4098 	u32 val;
4099 
4100 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4101 	BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4102 
4103 	if (bp->flash_info->flags & BNX2_NV_WREN) {
4104 		int j;
4105 
4106 		BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4107 		BNX2_WR(bp, BNX2_NVM_COMMAND,
4108 			BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4109 
4110 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4111 			udelay(5);
4112 
4113 			val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4114 			if (val & BNX2_NVM_COMMAND_DONE)
4115 				break;
4116 		}
4117 
4118 		if (j >= NVRAM_TIMEOUT_COUNT)
4119 			return -EBUSY;
4120 	}
4121 	return 0;
4122 }
4123 
4124 static void
4125 bnx2_disable_nvram_write(struct bnx2 *bp)
4126 {
4127 	u32 val;
4128 
4129 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4130 	BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4131 }
4132 
4133 
4134 static void
4135 bnx2_enable_nvram_access(struct bnx2 *bp)
4136 {
4137 	u32 val;
4138 
4139 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4140 	/* Enable both bits, even on read. */
4141 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4142 		val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4143 }
4144 
4145 static void
4146 bnx2_disable_nvram_access(struct bnx2 *bp)
4147 {
4148 	u32 val;
4149 
4150 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4151 	/* Disable both bits, even after read. */
4152 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4153 		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4154 			BNX2_NVM_ACCESS_ENABLE_WR_EN));
4155 }
4156 
4157 static int
4158 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4159 {
4160 	u32 cmd;
4161 	int j;
4162 
4163 	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4164 		/* Buffered flash, no erase needed */
4165 		return 0;
4166 
4167 	/* Build an erase command */
4168 	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4169 	      BNX2_NVM_COMMAND_DOIT;
4170 
4171 	/* Need to clear DONE bit separately. */
4172 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4173 
4174 	/* Address of the NVRAM to read from. */
4175 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4176 
4177 	/* Issue an erase command. */
4178 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4179 
4180 	/* Wait for completion. */
4181 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4182 		u32 val;
4183 
4184 		udelay(5);
4185 
4186 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4187 		if (val & BNX2_NVM_COMMAND_DONE)
4188 			break;
4189 	}
4190 
4191 	if (j >= NVRAM_TIMEOUT_COUNT)
4192 		return -EBUSY;
4193 
4194 	return 0;
4195 }
4196 
4197 static int
4198 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4199 {
4200 	u32 cmd;
4201 	int j;
4202 
4203 	/* Build the command word. */
4204 	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4205 
4206 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4207 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4208 		offset = ((offset / bp->flash_info->page_size) <<
4209 			   bp->flash_info->page_bits) +
4210 			  (offset % bp->flash_info->page_size);
4211 	}
4212 
4213 	/* Need to clear DONE bit separately. */
4214 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4215 
4216 	/* Address of the NVRAM to read from. */
4217 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4218 
4219 	/* Issue a read command. */
4220 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4221 
4222 	/* Wait for completion. */
4223 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4224 		u32 val;
4225 
4226 		udelay(5);
4227 
4228 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4229 		if (val & BNX2_NVM_COMMAND_DONE) {
4230 			__be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4231 			memcpy(ret_val, &v, 4);
4232 			break;
4233 		}
4234 	}
4235 	if (j >= NVRAM_TIMEOUT_COUNT)
4236 		return -EBUSY;
4237 
4238 	return 0;
4239 }
4240 
4241 
4242 static int
4243 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4244 {
4245 	u32 cmd;
4246 	__be32 val32;
4247 	int j;
4248 
4249 	/* Build the command word. */
4250 	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4251 
4252 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4253 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4254 		offset = ((offset / bp->flash_info->page_size) <<
4255 			  bp->flash_info->page_bits) +
4256 			 (offset % bp->flash_info->page_size);
4257 	}
4258 
4259 	/* Need to clear DONE bit separately. */
4260 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4261 
4262 	memcpy(&val32, val, 4);
4263 
4264 	/* Write the data. */
4265 	BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4266 
4267 	/* Address of the NVRAM to write to. */
4268 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4269 
4270 	/* Issue the write command. */
4271 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4272 
4273 	/* Wait for completion. */
4274 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4275 		udelay(5);
4276 
4277 		if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4278 			break;
4279 	}
4280 	if (j >= NVRAM_TIMEOUT_COUNT)
4281 		return -EBUSY;
4282 
4283 	return 0;
4284 }
4285 
4286 static int
4287 bnx2_init_nvram(struct bnx2 *bp)
4288 {
4289 	u32 val;
4290 	int j, entry_count, rc = 0;
4291 	const struct flash_spec *flash;
4292 
4293 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4294 		bp->flash_info = &flash_5709;
4295 		goto get_flash_size;
4296 	}
4297 
4298 	/* Determine the selected interface. */
4299 	val = BNX2_RD(bp, BNX2_NVM_CFG1);
4300 
4301 	entry_count = ARRAY_SIZE(flash_table);
4302 
4303 	if (val & 0x40000000) {
4304 
4305 		/* Flash interface has been reconfigured */
4306 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4307 		     j++, flash++) {
4308 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
4309 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4310 				bp->flash_info = flash;
4311 				break;
4312 			}
4313 		}
4314 	}
4315 	else {
4316 		u32 mask;
4317 		/* Not yet been reconfigured */
4318 
4319 		if (val & (1 << 23))
4320 			mask = FLASH_BACKUP_STRAP_MASK;
4321 		else
4322 			mask = FLASH_STRAP_MASK;
4323 
4324 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4325 			j++, flash++) {
4326 
4327 			if ((val & mask) == (flash->strapping & mask)) {
4328 				bp->flash_info = flash;
4329 
4330 				/* Request access to the flash interface. */
4331 				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4332 					return rc;
4333 
4334 				/* Enable access to flash interface */
4335 				bnx2_enable_nvram_access(bp);
4336 
4337 				/* Reconfigure the flash interface */
4338 				BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4339 				BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4340 				BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4341 				BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4342 
4343 				/* Disable access to flash interface */
4344 				bnx2_disable_nvram_access(bp);
4345 				bnx2_release_nvram_lock(bp);
4346 
4347 				break;
4348 			}
4349 		}
4350 	} /* if (val & 0x40000000) */
4351 
4352 	if (j == entry_count) {
4353 		bp->flash_info = NULL;
4354 		pr_alert("Unknown flash/EEPROM type\n");
4355 		return -ENODEV;
4356 	}
4357 
4358 get_flash_size:
4359 	val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4360 	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4361 	if (val)
4362 		bp->flash_size = val;
4363 	else
4364 		bp->flash_size = bp->flash_info->total_size;
4365 
4366 	return rc;
4367 }
4368 
4369 static int
4370 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4371 		int buf_size)
4372 {
4373 	int rc = 0;
4374 	u32 cmd_flags, offset32, len32, extra;
4375 
4376 	if (buf_size == 0)
4377 		return 0;
4378 
4379 	/* Request access to the flash interface. */
4380 	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4381 		return rc;
4382 
4383 	/* Enable access to flash interface */
4384 	bnx2_enable_nvram_access(bp);
4385 
4386 	len32 = buf_size;
4387 	offset32 = offset;
4388 	extra = 0;
4389 
4390 	cmd_flags = 0;
4391 
4392 	if (offset32 & 3) {
4393 		u8 buf[4];
4394 		u32 pre_len;
4395 
4396 		offset32 &= ~3;
4397 		pre_len = 4 - (offset & 3);
4398 
4399 		if (pre_len >= len32) {
4400 			pre_len = len32;
4401 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4402 				    BNX2_NVM_COMMAND_LAST;
4403 		}
4404 		else {
4405 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4406 		}
4407 
4408 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4409 
4410 		if (rc)
4411 			return rc;
4412 
4413 		memcpy(ret_buf, buf + (offset & 3), pre_len);
4414 
4415 		offset32 += 4;
4416 		ret_buf += pre_len;
4417 		len32 -= pre_len;
4418 	}
4419 	if (len32 & 3) {
4420 		extra = 4 - (len32 & 3);
4421 		len32 = (len32 + 4) & ~3;
4422 	}
4423 
4424 	if (len32 == 4) {
4425 		u8 buf[4];
4426 
4427 		if (cmd_flags)
4428 			cmd_flags = BNX2_NVM_COMMAND_LAST;
4429 		else
4430 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4431 				    BNX2_NVM_COMMAND_LAST;
4432 
4433 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4434 
4435 		memcpy(ret_buf, buf, 4 - extra);
4436 	}
4437 	else if (len32 > 0) {
4438 		u8 buf[4];
4439 
4440 		/* Read the first word. */
4441 		if (cmd_flags)
4442 			cmd_flags = 0;
4443 		else
4444 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4445 
4446 		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4447 
4448 		/* Advance to the next dword. */
4449 		offset32 += 4;
4450 		ret_buf += 4;
4451 		len32 -= 4;
4452 
4453 		while (len32 > 4 && rc == 0) {
4454 			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4455 
4456 			/* Advance to the next dword. */
4457 			offset32 += 4;
4458 			ret_buf += 4;
4459 			len32 -= 4;
4460 		}
4461 
4462 		if (rc)
4463 			return rc;
4464 
4465 		cmd_flags = BNX2_NVM_COMMAND_LAST;
4466 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4467 
4468 		memcpy(ret_buf, buf, 4 - extra);
4469 	}
4470 
4471 	/* Disable access to flash interface */
4472 	bnx2_disable_nvram_access(bp);
4473 
4474 	bnx2_release_nvram_lock(bp);
4475 
4476 	return rc;
4477 }
4478 
4479 static int
4480 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4481 		int buf_size)
4482 {
4483 	u32 written, offset32, len32;
4484 	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4485 	int rc = 0;
4486 	int align_start, align_end;
4487 
4488 	buf = data_buf;
4489 	offset32 = offset;
4490 	len32 = buf_size;
4491 	align_start = align_end = 0;
4492 
4493 	if ((align_start = (offset32 & 3))) {
4494 		offset32 &= ~3;
4495 		len32 += align_start;
4496 		if (len32 < 4)
4497 			len32 = 4;
4498 		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4499 			return rc;
4500 	}
4501 
4502 	if (len32 & 3) {
4503 		align_end = 4 - (len32 & 3);
4504 		len32 += align_end;
4505 		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4506 			return rc;
4507 	}
4508 
4509 	if (align_start || align_end) {
4510 		align_buf = kmalloc(len32, GFP_KERNEL);
4511 		if (align_buf == NULL)
4512 			return -ENOMEM;
4513 		if (align_start) {
4514 			memcpy(align_buf, start, 4);
4515 		}
4516 		if (align_end) {
4517 			memcpy(align_buf + len32 - 4, end, 4);
4518 		}
4519 		memcpy(align_buf + align_start, data_buf, buf_size);
4520 		buf = align_buf;
4521 	}
4522 
4523 	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4524 		flash_buffer = kmalloc(264, GFP_KERNEL);
4525 		if (flash_buffer == NULL) {
4526 			rc = -ENOMEM;
4527 			goto nvram_write_end;
4528 		}
4529 	}
4530 
4531 	written = 0;
4532 	while ((written < len32) && (rc == 0)) {
4533 		u32 page_start, page_end, data_start, data_end;
4534 		u32 addr, cmd_flags;
4535 		int i;
4536 
4537 	        /* Find the page_start addr */
4538 		page_start = offset32 + written;
4539 		page_start -= (page_start % bp->flash_info->page_size);
4540 		/* Find the page_end addr */
4541 		page_end = page_start + bp->flash_info->page_size;
4542 		/* Find the data_start addr */
4543 		data_start = (written == 0) ? offset32 : page_start;
4544 		/* Find the data_end addr */
4545 		data_end = (page_end > offset32 + len32) ?
4546 			(offset32 + len32) : page_end;
4547 
4548 		/* Request access to the flash interface. */
4549 		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4550 			goto nvram_write_end;
4551 
4552 		/* Enable access to flash interface */
4553 		bnx2_enable_nvram_access(bp);
4554 
4555 		cmd_flags = BNX2_NVM_COMMAND_FIRST;
4556 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4557 			int j;
4558 
4559 			/* Read the whole page into the buffer
4560 			 * (non-buffer flash only) */
4561 			for (j = 0; j < bp->flash_info->page_size; j += 4) {
4562 				if (j == (bp->flash_info->page_size - 4)) {
4563 					cmd_flags |= BNX2_NVM_COMMAND_LAST;
4564 				}
4565 				rc = bnx2_nvram_read_dword(bp,
4566 					page_start + j,
4567 					&flash_buffer[j],
4568 					cmd_flags);
4569 
4570 				if (rc)
4571 					goto nvram_write_end;
4572 
4573 				cmd_flags = 0;
4574 			}
4575 		}
4576 
4577 		/* Enable writes to flash interface (unlock write-protect) */
4578 		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4579 			goto nvram_write_end;
4580 
4581 		/* Loop to write back the buffer data from page_start to
4582 		 * data_start */
4583 		i = 0;
4584 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4585 			/* Erase the page */
4586 			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4587 				goto nvram_write_end;
4588 
4589 			/* Re-enable the write again for the actual write */
4590 			bnx2_enable_nvram_write(bp);
4591 
4592 			for (addr = page_start; addr < data_start;
4593 				addr += 4, i += 4) {
4594 
4595 				rc = bnx2_nvram_write_dword(bp, addr,
4596 					&flash_buffer[i], cmd_flags);
4597 
4598 				if (rc != 0)
4599 					goto nvram_write_end;
4600 
4601 				cmd_flags = 0;
4602 			}
4603 		}
4604 
4605 		/* Loop to write the new data from data_start to data_end */
4606 		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4607 			if ((addr == page_end - 4) ||
4608 				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4609 				 (addr == data_end - 4))) {
4610 
4611 				cmd_flags |= BNX2_NVM_COMMAND_LAST;
4612 			}
4613 			rc = bnx2_nvram_write_dword(bp, addr, buf,
4614 				cmd_flags);
4615 
4616 			if (rc != 0)
4617 				goto nvram_write_end;
4618 
4619 			cmd_flags = 0;
4620 			buf += 4;
4621 		}
4622 
4623 		/* Loop to write back the buffer data from data_end
4624 		 * to page_end */
4625 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4626 			for (addr = data_end; addr < page_end;
4627 				addr += 4, i += 4) {
4628 
4629 				if (addr == page_end-4) {
4630 					cmd_flags = BNX2_NVM_COMMAND_LAST;
4631                 		}
4632 				rc = bnx2_nvram_write_dword(bp, addr,
4633 					&flash_buffer[i], cmd_flags);
4634 
4635 				if (rc != 0)
4636 					goto nvram_write_end;
4637 
4638 				cmd_flags = 0;
4639 			}
4640 		}
4641 
4642 		/* Disable writes to flash interface (lock write-protect) */
4643 		bnx2_disable_nvram_write(bp);
4644 
4645 		/* Disable access to flash interface */
4646 		bnx2_disable_nvram_access(bp);
4647 		bnx2_release_nvram_lock(bp);
4648 
4649 		/* Increment written */
4650 		written += data_end - data_start;
4651 	}
4652 
4653 nvram_write_end:
4654 	kfree(flash_buffer);
4655 	kfree(align_buf);
4656 	return rc;
4657 }
4658 
4659 static void
4660 bnx2_init_fw_cap(struct bnx2 *bp)
4661 {
4662 	u32 val, sig = 0;
4663 
4664 	bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4665 	bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4666 
4667 	if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4668 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4669 
4670 	val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4671 	if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4672 		return;
4673 
4674 	if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4675 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4676 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4677 	}
4678 
4679 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4680 	    (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4681 		u32 link;
4682 
4683 		bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4684 
4685 		link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4686 		if (link & BNX2_LINK_STATUS_SERDES_LINK)
4687 			bp->phy_port = PORT_FIBRE;
4688 		else
4689 			bp->phy_port = PORT_TP;
4690 
4691 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4692 		       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4693 	}
4694 
4695 	if (netif_running(bp->dev) && sig)
4696 		bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4697 }
4698 
4699 static void
4700 bnx2_setup_msix_tbl(struct bnx2 *bp)
4701 {
4702 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4703 
4704 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4705 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4706 }
4707 
4708 static int
4709 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4710 {
4711 	u32 val;
4712 	int i, rc = 0;
4713 	u8 old_port;
4714 
4715 	/* Wait for the current PCI transaction to complete before
4716 	 * issuing a reset. */
4717 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4718 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4719 		BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4720 			BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4721 			BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4722 			BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4723 			BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4724 		val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4725 		udelay(5);
4726 	} else {  /* 5709 */
4727 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4728 		val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4729 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4730 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4731 
4732 		for (i = 0; i < 100; i++) {
4733 			msleep(1);
4734 			val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4735 			if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4736 				break;
4737 		}
4738 	}
4739 
4740 	/* Wait for the firmware to tell us it is ok to issue a reset. */
4741 	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4742 
4743 	/* Deposit a driver reset signature so the firmware knows that
4744 	 * this is a soft reset. */
4745 	bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4746 		      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4747 
4748 	/* Do a dummy read to force the chip to complete all current transaction
4749 	 * before we issue a reset. */
4750 	val = BNX2_RD(bp, BNX2_MISC_ID);
4751 
4752 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4753 		BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4754 		BNX2_RD(bp, BNX2_MISC_COMMAND);
4755 		udelay(5);
4756 
4757 		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4758 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4759 
4760 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4761 
4762 	} else {
4763 		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4764 		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4765 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4766 
4767 		/* Chip reset. */
4768 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4769 
4770 		/* Reading back any register after chip reset will hang the
4771 		 * bus on 5706 A0 and A1.  The msleep below provides plenty
4772 		 * of margin for write posting.
4773 		 */
4774 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4775 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4776 			msleep(20);
4777 
4778 		/* Reset takes approximate 30 usec */
4779 		for (i = 0; i < 10; i++) {
4780 			val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4781 			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4782 				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4783 				break;
4784 			udelay(10);
4785 		}
4786 
4787 		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4788 			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4789 			pr_err("Chip reset did not complete\n");
4790 			return -EBUSY;
4791 		}
4792 	}
4793 
4794 	/* Make sure byte swapping is properly configured. */
4795 	val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4796 	if (val != 0x01020304) {
4797 		pr_err("Chip not in correct endian mode\n");
4798 		return -ENODEV;
4799 	}
4800 
4801 	/* Wait for the firmware to finish its initialization. */
4802 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4803 	if (rc)
4804 		return rc;
4805 
4806 	spin_lock_bh(&bp->phy_lock);
4807 	old_port = bp->phy_port;
4808 	bnx2_init_fw_cap(bp);
4809 	if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4810 	    old_port != bp->phy_port)
4811 		bnx2_set_default_remote_link(bp);
4812 	spin_unlock_bh(&bp->phy_lock);
4813 
4814 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4815 		/* Adjust the voltage regular to two steps lower.  The default
4816 		 * of this register is 0x0000000e. */
4817 		BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4818 
4819 		/* Remove bad rbuf memory from the free pool. */
4820 		rc = bnx2_alloc_bad_rbuf(bp);
4821 	}
4822 
4823 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4824 		bnx2_setup_msix_tbl(bp);
4825 		/* Prevent MSIX table reads and write from timing out */
4826 		BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4827 			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4828 	}
4829 
4830 	return rc;
4831 }
4832 
4833 static int
4834 bnx2_init_chip(struct bnx2 *bp)
4835 {
4836 	u32 val, mtu;
4837 	int rc, i;
4838 
4839 	/* Make sure the interrupt is not active. */
4840 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4841 
4842 	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4843 	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4844 #ifdef __BIG_ENDIAN
4845 	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4846 #endif
4847 	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4848 	      DMA_READ_CHANS << 12 |
4849 	      DMA_WRITE_CHANS << 16;
4850 
4851 	val |= (0x2 << 20) | (1 << 11);
4852 
4853 	if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4854 		val |= (1 << 23);
4855 
4856 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4857 	    (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4858 	    !(bp->flags & BNX2_FLAG_PCIX))
4859 		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4860 
4861 	BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4862 
4863 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4864 		val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4865 		val |= BNX2_TDMA_CONFIG_ONE_DMA;
4866 		BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4867 	}
4868 
4869 	if (bp->flags & BNX2_FLAG_PCIX) {
4870 		u16 val16;
4871 
4872 		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4873 				     &val16);
4874 		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4875 				      val16 & ~PCI_X_CMD_ERO);
4876 	}
4877 
4878 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4879 		BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4880 		BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4881 		BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4882 
4883 	/* Initialize context mapping and zero out the quick contexts.  The
4884 	 * context block must have already been enabled. */
4885 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4886 		rc = bnx2_init_5709_context(bp);
4887 		if (rc)
4888 			return rc;
4889 	} else
4890 		bnx2_init_context(bp);
4891 
4892 	if ((rc = bnx2_init_cpus(bp)) != 0)
4893 		return rc;
4894 
4895 	bnx2_init_nvram(bp);
4896 
4897 	bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4898 
4899 	val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4900 	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4901 	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4902 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4903 		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4904 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4905 			val |= BNX2_MQ_CONFIG_HALT_DIS;
4906 	}
4907 
4908 	BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4909 
4910 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4911 	BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4912 	BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4913 
4914 	val = (BNX2_PAGE_BITS - 8) << 24;
4915 	BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4916 
4917 	/* Configure page size. */
4918 	val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4919 	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4920 	val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4921 	BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4922 
4923 	val = bp->mac_addr[0] +
4924 	      (bp->mac_addr[1] << 8) +
4925 	      (bp->mac_addr[2] << 16) +
4926 	      bp->mac_addr[3] +
4927 	      (bp->mac_addr[4] << 8) +
4928 	      (bp->mac_addr[5] << 16);
4929 	BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4930 
4931 	/* Program the MTU.  Also include 4 bytes for CRC32. */
4932 	mtu = bp->dev->mtu;
4933 	val = mtu + ETH_HLEN + ETH_FCS_LEN;
4934 	if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4935 		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4936 	BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4937 
4938 	if (mtu < 1500)
4939 		mtu = 1500;
4940 
4941 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4942 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4943 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4944 
4945 	memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4946 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4947 		bp->bnx2_napi[i].last_status_idx = 0;
4948 
4949 	bp->idle_chk_status_idx = 0xffff;
4950 
4951 	bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4952 
4953 	/* Set up how to generate a link change interrupt. */
4954 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4955 
4956 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
4957 		(u64) bp->status_blk_mapping & 0xffffffff);
4958 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4959 
4960 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4961 		(u64) bp->stats_blk_mapping & 0xffffffff);
4962 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4963 		(u64) bp->stats_blk_mapping >> 32);
4964 
4965 	BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4966 		(bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4967 
4968 	BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4969 		(bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4970 
4971 	BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4972 		(bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4973 
4974 	BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4975 
4976 	BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4977 
4978 	BNX2_WR(bp, BNX2_HC_COM_TICKS,
4979 		(bp->com_ticks_int << 16) | bp->com_ticks);
4980 
4981 	BNX2_WR(bp, BNX2_HC_CMD_TICKS,
4982 		(bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4983 
4984 	if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4985 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
4986 	else
4987 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4988 	BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4989 
4990 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
4991 		val = BNX2_HC_CONFIG_COLLECT_STATS;
4992 	else {
4993 		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4994 		      BNX2_HC_CONFIG_COLLECT_STATS;
4995 	}
4996 
4997 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4998 		BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4999 			BNX2_HC_MSIX_BIT_VECTOR_VAL);
5000 
5001 		val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5002 	}
5003 
5004 	if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5005 		val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5006 
5007 	BNX2_WR(bp, BNX2_HC_CONFIG, val);
5008 
5009 	if (bp->rx_ticks < 25)
5010 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5011 	else
5012 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5013 
5014 	for (i = 1; i < bp->irq_nvecs; i++) {
5015 		u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5016 			   BNX2_HC_SB_CONFIG_1;
5017 
5018 		BNX2_WR(bp, base,
5019 			BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5020 			BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5021 			BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5022 
5023 		BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5024 			(bp->tx_quick_cons_trip_int << 16) |
5025 			 bp->tx_quick_cons_trip);
5026 
5027 		BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5028 			(bp->tx_ticks_int << 16) | bp->tx_ticks);
5029 
5030 		BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5031 			(bp->rx_quick_cons_trip_int << 16) |
5032 			bp->rx_quick_cons_trip);
5033 
5034 		BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5035 			(bp->rx_ticks_int << 16) | bp->rx_ticks);
5036 	}
5037 
5038 	/* Clear internal stats counters. */
5039 	BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5040 
5041 	BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5042 
5043 	/* Initialize the receive filter. */
5044 	bnx2_set_rx_mode(bp->dev);
5045 
5046 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5047 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5048 		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5049 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5050 	}
5051 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5052 			  1, 0);
5053 
5054 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5055 	BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5056 
5057 	udelay(20);
5058 
5059 	bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5060 
5061 	return rc;
5062 }
5063 
5064 static void
5065 bnx2_clear_ring_states(struct bnx2 *bp)
5066 {
5067 	struct bnx2_napi *bnapi;
5068 	struct bnx2_tx_ring_info *txr;
5069 	struct bnx2_rx_ring_info *rxr;
5070 	int i;
5071 
5072 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5073 		bnapi = &bp->bnx2_napi[i];
5074 		txr = &bnapi->tx_ring;
5075 		rxr = &bnapi->rx_ring;
5076 
5077 		txr->tx_cons = 0;
5078 		txr->hw_tx_cons = 0;
5079 		rxr->rx_prod_bseq = 0;
5080 		rxr->rx_prod = 0;
5081 		rxr->rx_cons = 0;
5082 		rxr->rx_pg_prod = 0;
5083 		rxr->rx_pg_cons = 0;
5084 	}
5085 }
5086 
5087 static void
5088 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5089 {
5090 	u32 val, offset0, offset1, offset2, offset3;
5091 	u32 cid_addr = GET_CID_ADDR(cid);
5092 
5093 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5094 		offset0 = BNX2_L2CTX_TYPE_XI;
5095 		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5096 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5097 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5098 	} else {
5099 		offset0 = BNX2_L2CTX_TYPE;
5100 		offset1 = BNX2_L2CTX_CMD_TYPE;
5101 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5102 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5103 	}
5104 	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5105 	bnx2_ctx_wr(bp, cid_addr, offset0, val);
5106 
5107 	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5108 	bnx2_ctx_wr(bp, cid_addr, offset1, val);
5109 
5110 	val = (u64) txr->tx_desc_mapping >> 32;
5111 	bnx2_ctx_wr(bp, cid_addr, offset2, val);
5112 
5113 	val = (u64) txr->tx_desc_mapping & 0xffffffff;
5114 	bnx2_ctx_wr(bp, cid_addr, offset3, val);
5115 }
5116 
5117 static void
5118 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5119 {
5120 	struct bnx2_tx_bd *txbd;
5121 	u32 cid = TX_CID;
5122 	struct bnx2_napi *bnapi;
5123 	struct bnx2_tx_ring_info *txr;
5124 
5125 	bnapi = &bp->bnx2_napi[ring_num];
5126 	txr = &bnapi->tx_ring;
5127 
5128 	if (ring_num == 0)
5129 		cid = TX_CID;
5130 	else
5131 		cid = TX_TSS_CID + ring_num - 1;
5132 
5133 	bp->tx_wake_thresh = bp->tx_ring_size / 2;
5134 
5135 	txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5136 
5137 	txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5138 	txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5139 
5140 	txr->tx_prod = 0;
5141 	txr->tx_prod_bseq = 0;
5142 
5143 	txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5144 	txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5145 
5146 	bnx2_init_tx_context(bp, cid, txr);
5147 }
5148 
5149 static void
5150 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5151 		     u32 buf_size, int num_rings)
5152 {
5153 	int i;
5154 	struct bnx2_rx_bd *rxbd;
5155 
5156 	for (i = 0; i < num_rings; i++) {
5157 		int j;
5158 
5159 		rxbd = &rx_ring[i][0];
5160 		for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5161 			rxbd->rx_bd_len = buf_size;
5162 			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5163 		}
5164 		if (i == (num_rings - 1))
5165 			j = 0;
5166 		else
5167 			j = i + 1;
5168 		rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5169 		rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5170 	}
5171 }
5172 
5173 static void
5174 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5175 {
5176 	int i;
5177 	u16 prod, ring_prod;
5178 	u32 cid, rx_cid_addr, val;
5179 	struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5180 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5181 
5182 	if (ring_num == 0)
5183 		cid = RX_CID;
5184 	else
5185 		cid = RX_RSS_CID + ring_num - 1;
5186 
5187 	rx_cid_addr = GET_CID_ADDR(cid);
5188 
5189 	bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5190 			     bp->rx_buf_use_size, bp->rx_max_ring);
5191 
5192 	bnx2_init_rx_context(bp, cid);
5193 
5194 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5195 		val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5196 		BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5197 	}
5198 
5199 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5200 	if (bp->rx_pg_ring_size) {
5201 		bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5202 				     rxr->rx_pg_desc_mapping,
5203 				     PAGE_SIZE, bp->rx_max_pg_ring);
5204 		val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5205 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5206 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5207 		       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5208 
5209 		val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5210 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5211 
5212 		val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5213 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5214 
5215 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5216 			BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5217 	}
5218 
5219 	val = (u64) rxr->rx_desc_mapping[0] >> 32;
5220 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5221 
5222 	val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5223 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5224 
5225 	ring_prod = prod = rxr->rx_pg_prod;
5226 	for (i = 0; i < bp->rx_pg_ring_size; i++) {
5227 		if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5228 			netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5229 				    ring_num, i, bp->rx_pg_ring_size);
5230 			break;
5231 		}
5232 		prod = BNX2_NEXT_RX_BD(prod);
5233 		ring_prod = BNX2_RX_PG_RING_IDX(prod);
5234 	}
5235 	rxr->rx_pg_prod = prod;
5236 
5237 	ring_prod = prod = rxr->rx_prod;
5238 	for (i = 0; i < bp->rx_ring_size; i++) {
5239 		if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5240 			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5241 				    ring_num, i, bp->rx_ring_size);
5242 			break;
5243 		}
5244 		prod = BNX2_NEXT_RX_BD(prod);
5245 		ring_prod = BNX2_RX_RING_IDX(prod);
5246 	}
5247 	rxr->rx_prod = prod;
5248 
5249 	rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5250 	rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5251 	rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5252 
5253 	BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5254 	BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5255 
5256 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5257 }
5258 
5259 static void
5260 bnx2_init_all_rings(struct bnx2 *bp)
5261 {
5262 	int i;
5263 	u32 val;
5264 
5265 	bnx2_clear_ring_states(bp);
5266 
5267 	BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5268 	for (i = 0; i < bp->num_tx_rings; i++)
5269 		bnx2_init_tx_ring(bp, i);
5270 
5271 	if (bp->num_tx_rings > 1)
5272 		BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5273 			(TX_TSS_CID << 7));
5274 
5275 	BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5276 	bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5277 
5278 	for (i = 0; i < bp->num_rx_rings; i++)
5279 		bnx2_init_rx_ring(bp, i);
5280 
5281 	if (bp->num_rx_rings > 1) {
5282 		u32 tbl_32 = 0;
5283 
5284 		for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5285 			int shift = (i % 8) << 2;
5286 
5287 			tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5288 			if ((i % 8) == 7) {
5289 				BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5290 				BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5291 					BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5292 					BNX2_RLUP_RSS_COMMAND_WRITE |
5293 					BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5294 				tbl_32 = 0;
5295 			}
5296 		}
5297 
5298 		val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5299 		      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5300 
5301 		BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5302 
5303 	}
5304 }
5305 
5306 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5307 {
5308 	u32 max, num_rings = 1;
5309 
5310 	while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5311 		ring_size -= BNX2_MAX_RX_DESC_CNT;
5312 		num_rings++;
5313 	}
5314 	/* round to next power of 2 */
5315 	max = max_size;
5316 	while ((max & num_rings) == 0)
5317 		max >>= 1;
5318 
5319 	if (num_rings != max)
5320 		max <<= 1;
5321 
5322 	return max;
5323 }
5324 
5325 static void
5326 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5327 {
5328 	u32 rx_size, rx_space, jumbo_size;
5329 
5330 	/* 8 for CRC and VLAN */
5331 	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5332 
5333 	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5334 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5335 
5336 	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5337 	bp->rx_pg_ring_size = 0;
5338 	bp->rx_max_pg_ring = 0;
5339 	bp->rx_max_pg_ring_idx = 0;
5340 	if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5341 		int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5342 
5343 		jumbo_size = size * pages;
5344 		if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5345 			jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5346 
5347 		bp->rx_pg_ring_size = jumbo_size;
5348 		bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5349 							BNX2_MAX_RX_PG_RINGS);
5350 		bp->rx_max_pg_ring_idx =
5351 			(bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5352 		rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5353 		bp->rx_copy_thresh = 0;
5354 	}
5355 
5356 	bp->rx_buf_use_size = rx_size;
5357 	/* hw alignment + build_skb() overhead*/
5358 	bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5359 		NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5360 	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5361 	bp->rx_ring_size = size;
5362 	bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5363 	bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5364 }
5365 
5366 static void
5367 bnx2_free_tx_skbs(struct bnx2 *bp)
5368 {
5369 	int i;
5370 
5371 	for (i = 0; i < bp->num_tx_rings; i++) {
5372 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5373 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5374 		int j;
5375 
5376 		if (txr->tx_buf_ring == NULL)
5377 			continue;
5378 
5379 		for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5380 			struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5381 			struct sk_buff *skb = tx_buf->skb;
5382 			int k, last;
5383 
5384 			if (skb == NULL) {
5385 				j = BNX2_NEXT_TX_BD(j);
5386 				continue;
5387 			}
5388 
5389 			dma_unmap_single(&bp->pdev->dev,
5390 					 dma_unmap_addr(tx_buf, mapping),
5391 					 skb_headlen(skb),
5392 					 PCI_DMA_TODEVICE);
5393 
5394 			tx_buf->skb = NULL;
5395 
5396 			last = tx_buf->nr_frags;
5397 			j = BNX2_NEXT_TX_BD(j);
5398 			for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5399 				tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5400 				dma_unmap_page(&bp->pdev->dev,
5401 					dma_unmap_addr(tx_buf, mapping),
5402 					skb_frag_size(&skb_shinfo(skb)->frags[k]),
5403 					PCI_DMA_TODEVICE);
5404 			}
5405 			dev_kfree_skb(skb);
5406 		}
5407 		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5408 	}
5409 }
5410 
5411 static void
5412 bnx2_free_rx_skbs(struct bnx2 *bp)
5413 {
5414 	int i;
5415 
5416 	for (i = 0; i < bp->num_rx_rings; i++) {
5417 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5418 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5419 		int j;
5420 
5421 		if (rxr->rx_buf_ring == NULL)
5422 			return;
5423 
5424 		for (j = 0; j < bp->rx_max_ring_idx; j++) {
5425 			struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5426 			u8 *data = rx_buf->data;
5427 
5428 			if (data == NULL)
5429 				continue;
5430 
5431 			dma_unmap_single(&bp->pdev->dev,
5432 					 dma_unmap_addr(rx_buf, mapping),
5433 					 bp->rx_buf_use_size,
5434 					 PCI_DMA_FROMDEVICE);
5435 
5436 			rx_buf->data = NULL;
5437 
5438 			kfree(data);
5439 		}
5440 		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5441 			bnx2_free_rx_page(bp, rxr, j);
5442 	}
5443 }
5444 
5445 static void
5446 bnx2_free_skbs(struct bnx2 *bp)
5447 {
5448 	bnx2_free_tx_skbs(bp);
5449 	bnx2_free_rx_skbs(bp);
5450 }
5451 
5452 static int
5453 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5454 {
5455 	int rc;
5456 
5457 	rc = bnx2_reset_chip(bp, reset_code);
5458 	bnx2_free_skbs(bp);
5459 	if (rc)
5460 		return rc;
5461 
5462 	if ((rc = bnx2_init_chip(bp)) != 0)
5463 		return rc;
5464 
5465 	bnx2_init_all_rings(bp);
5466 	return 0;
5467 }
5468 
5469 static int
5470 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5471 {
5472 	int rc;
5473 
5474 	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5475 		return rc;
5476 
5477 	spin_lock_bh(&bp->phy_lock);
5478 	bnx2_init_phy(bp, reset_phy);
5479 	bnx2_set_link(bp);
5480 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5481 		bnx2_remote_phy_event(bp);
5482 	spin_unlock_bh(&bp->phy_lock);
5483 	return 0;
5484 }
5485 
5486 static int
5487 bnx2_shutdown_chip(struct bnx2 *bp)
5488 {
5489 	u32 reset_code;
5490 
5491 	if (bp->flags & BNX2_FLAG_NO_WOL)
5492 		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5493 	else if (bp->wol)
5494 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5495 	else
5496 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5497 
5498 	return bnx2_reset_chip(bp, reset_code);
5499 }
5500 
5501 static int
5502 bnx2_test_registers(struct bnx2 *bp)
5503 {
5504 	int ret;
5505 	int i, is_5709;
5506 	static const struct {
5507 		u16   offset;
5508 		u16   flags;
5509 #define BNX2_FL_NOT_5709	1
5510 		u32   rw_mask;
5511 		u32   ro_mask;
5512 	} reg_tbl[] = {
5513 		{ 0x006c, 0, 0x00000000, 0x0000003f },
5514 		{ 0x0090, 0, 0xffffffff, 0x00000000 },
5515 		{ 0x0094, 0, 0x00000000, 0x00000000 },
5516 
5517 		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5518 		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5519 		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5520 		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5521 		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5522 		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5523 		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5524 		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5525 		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5526 
5527 		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5528 		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5529 		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5530 		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5531 		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5532 		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5533 
5534 		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5535 		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5536 		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5537 
5538 		{ 0x1000, 0, 0x00000000, 0x00000001 },
5539 		{ 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5540 
5541 		{ 0x1408, 0, 0x01c00800, 0x00000000 },
5542 		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
5543 		{ 0x14a8, 0, 0x00000000, 0x000001ff },
5544 		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
5545 		{ 0x14b0, 0, 0x00000002, 0x00000001 },
5546 		{ 0x14b8, 0, 0x00000000, 0x00000000 },
5547 		{ 0x14c0, 0, 0x00000000, 0x00000009 },
5548 		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
5549 		{ 0x14cc, 0, 0x00000000, 0x00000001 },
5550 		{ 0x14d0, 0, 0xffffffff, 0x00000000 },
5551 
5552 		{ 0x1800, 0, 0x00000000, 0x00000001 },
5553 		{ 0x1804, 0, 0x00000000, 0x00000003 },
5554 
5555 		{ 0x2800, 0, 0x00000000, 0x00000001 },
5556 		{ 0x2804, 0, 0x00000000, 0x00003f01 },
5557 		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5558 		{ 0x2810, 0, 0xffff0000, 0x00000000 },
5559 		{ 0x2814, 0, 0xffff0000, 0x00000000 },
5560 		{ 0x2818, 0, 0xffff0000, 0x00000000 },
5561 		{ 0x281c, 0, 0xffff0000, 0x00000000 },
5562 		{ 0x2834, 0, 0xffffffff, 0x00000000 },
5563 		{ 0x2840, 0, 0x00000000, 0xffffffff },
5564 		{ 0x2844, 0, 0x00000000, 0xffffffff },
5565 		{ 0x2848, 0, 0xffffffff, 0x00000000 },
5566 		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },
5567 
5568 		{ 0x2c00, 0, 0x00000000, 0x00000011 },
5569 		{ 0x2c04, 0, 0x00000000, 0x00030007 },
5570 
5571 		{ 0x3c00, 0, 0x00000000, 0x00000001 },
5572 		{ 0x3c04, 0, 0x00000000, 0x00070000 },
5573 		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
5574 		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5575 		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
5576 		{ 0x3c14, 0, 0x00000000, 0xffffffff },
5577 		{ 0x3c18, 0, 0x00000000, 0xffffffff },
5578 		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
5579 		{ 0x3c20, 0, 0xffffff00, 0x00000000 },
5580 
5581 		{ 0x5004, 0, 0x00000000, 0x0000007f },
5582 		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },
5583 
5584 		{ 0x5c00, 0, 0x00000000, 0x00000001 },
5585 		{ 0x5c04, 0, 0x00000000, 0x0003000f },
5586 		{ 0x5c08, 0, 0x00000003, 0x00000000 },
5587 		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5588 		{ 0x5c10, 0, 0x00000000, 0xffffffff },
5589 		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5590 		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
5591 		{ 0x5c88, 0, 0x00000000, 0x00077373 },
5592 		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },
5593 
5594 		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
5595 		{ 0x680c, 0, 0xffffffff, 0x00000000 },
5596 		{ 0x6810, 0, 0xffffffff, 0x00000000 },
5597 		{ 0x6814, 0, 0xffffffff, 0x00000000 },
5598 		{ 0x6818, 0, 0xffffffff, 0x00000000 },
5599 		{ 0x681c, 0, 0xffffffff, 0x00000000 },
5600 		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
5601 		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
5602 		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
5603 		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
5604 		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
5605 		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
5606 		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
5607 		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
5608 		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
5609 		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
5610 		{ 0x684c, 0, 0xffffffff, 0x00000000 },
5611 		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5612 		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5613 		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5614 		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5615 		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
5616 		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5617 
5618 		{ 0xffff, 0, 0x00000000, 0x00000000 },
5619 	};
5620 
5621 	ret = 0;
5622 	is_5709 = 0;
5623 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5624 		is_5709 = 1;
5625 
5626 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5627 		u32 offset, rw_mask, ro_mask, save_val, val;
5628 		u16 flags = reg_tbl[i].flags;
5629 
5630 		if (is_5709 && (flags & BNX2_FL_NOT_5709))
5631 			continue;
5632 
5633 		offset = (u32) reg_tbl[i].offset;
5634 		rw_mask = reg_tbl[i].rw_mask;
5635 		ro_mask = reg_tbl[i].ro_mask;
5636 
5637 		save_val = readl(bp->regview + offset);
5638 
5639 		writel(0, bp->regview + offset);
5640 
5641 		val = readl(bp->regview + offset);
5642 		if ((val & rw_mask) != 0) {
5643 			goto reg_test_err;
5644 		}
5645 
5646 		if ((val & ro_mask) != (save_val & ro_mask)) {
5647 			goto reg_test_err;
5648 		}
5649 
5650 		writel(0xffffffff, bp->regview + offset);
5651 
5652 		val = readl(bp->regview + offset);
5653 		if ((val & rw_mask) != rw_mask) {
5654 			goto reg_test_err;
5655 		}
5656 
5657 		if ((val & ro_mask) != (save_val & ro_mask)) {
5658 			goto reg_test_err;
5659 		}
5660 
5661 		writel(save_val, bp->regview + offset);
5662 		continue;
5663 
5664 reg_test_err:
5665 		writel(save_val, bp->regview + offset);
5666 		ret = -ENODEV;
5667 		break;
5668 	}
5669 	return ret;
5670 }
5671 
5672 static int
5673 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5674 {
5675 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5676 		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5677 	int i;
5678 
5679 	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5680 		u32 offset;
5681 
5682 		for (offset = 0; offset < size; offset += 4) {
5683 
5684 			bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5685 
5686 			if (bnx2_reg_rd_ind(bp, start + offset) !=
5687 				test_pattern[i]) {
5688 				return -ENODEV;
5689 			}
5690 		}
5691 	}
5692 	return 0;
5693 }
5694 
5695 static int
5696 bnx2_test_memory(struct bnx2 *bp)
5697 {
5698 	int ret = 0;
5699 	int i;
5700 	static struct mem_entry {
5701 		u32   offset;
5702 		u32   len;
5703 	} mem_tbl_5706[] = {
5704 		{ 0x60000,  0x4000 },
5705 		{ 0xa0000,  0x3000 },
5706 		{ 0xe0000,  0x4000 },
5707 		{ 0x120000, 0x4000 },
5708 		{ 0x1a0000, 0x4000 },
5709 		{ 0x160000, 0x4000 },
5710 		{ 0xffffffff, 0    },
5711 	},
5712 	mem_tbl_5709[] = {
5713 		{ 0x60000,  0x4000 },
5714 		{ 0xa0000,  0x3000 },
5715 		{ 0xe0000,  0x4000 },
5716 		{ 0x120000, 0x4000 },
5717 		{ 0x1a0000, 0x4000 },
5718 		{ 0xffffffff, 0    },
5719 	};
5720 	struct mem_entry *mem_tbl;
5721 
5722 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5723 		mem_tbl = mem_tbl_5709;
5724 	else
5725 		mem_tbl = mem_tbl_5706;
5726 
5727 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5728 		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5729 			mem_tbl[i].len)) != 0) {
5730 			return ret;
5731 		}
5732 	}
5733 
5734 	return ret;
5735 }
5736 
5737 #define BNX2_MAC_LOOPBACK	0
5738 #define BNX2_PHY_LOOPBACK	1
5739 
5740 static int
5741 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5742 {
5743 	unsigned int pkt_size, num_pkts, i;
5744 	struct sk_buff *skb;
5745 	u8 *data;
5746 	unsigned char *packet;
5747 	u16 rx_start_idx, rx_idx;
5748 	dma_addr_t map;
5749 	struct bnx2_tx_bd *txbd;
5750 	struct bnx2_sw_bd *rx_buf;
5751 	struct l2_fhdr *rx_hdr;
5752 	int ret = -ENODEV;
5753 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5754 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5755 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5756 
5757 	tx_napi = bnapi;
5758 
5759 	txr = &tx_napi->tx_ring;
5760 	rxr = &bnapi->rx_ring;
5761 	if (loopback_mode == BNX2_MAC_LOOPBACK) {
5762 		bp->loopback = MAC_LOOPBACK;
5763 		bnx2_set_mac_loopback(bp);
5764 	}
5765 	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5766 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5767 			return 0;
5768 
5769 		bp->loopback = PHY_LOOPBACK;
5770 		bnx2_set_phy_loopback(bp);
5771 	}
5772 	else
5773 		return -EINVAL;
5774 
5775 	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5776 	skb = netdev_alloc_skb(bp->dev, pkt_size);
5777 	if (!skb)
5778 		return -ENOMEM;
5779 	packet = skb_put(skb, pkt_size);
5780 	memcpy(packet, bp->dev->dev_addr, 6);
5781 	memset(packet + 6, 0x0, 8);
5782 	for (i = 14; i < pkt_size; i++)
5783 		packet[i] = (unsigned char) (i & 0xff);
5784 
5785 	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5786 			     PCI_DMA_TODEVICE);
5787 	if (dma_mapping_error(&bp->pdev->dev, map)) {
5788 		dev_kfree_skb(skb);
5789 		return -EIO;
5790 	}
5791 
5792 	BNX2_WR(bp, BNX2_HC_COMMAND,
5793 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5794 
5795 	BNX2_RD(bp, BNX2_HC_COMMAND);
5796 
5797 	udelay(5);
5798 	rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5799 
5800 	num_pkts = 0;
5801 
5802 	txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5803 
5804 	txbd->tx_bd_haddr_hi = (u64) map >> 32;
5805 	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5806 	txbd->tx_bd_mss_nbytes = pkt_size;
5807 	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5808 
5809 	num_pkts++;
5810 	txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5811 	txr->tx_prod_bseq += pkt_size;
5812 
5813 	BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5814 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5815 
5816 	udelay(100);
5817 
5818 	BNX2_WR(bp, BNX2_HC_COMMAND,
5819 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5820 
5821 	BNX2_RD(bp, BNX2_HC_COMMAND);
5822 
5823 	udelay(5);
5824 
5825 	dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5826 	dev_kfree_skb(skb);
5827 
5828 	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5829 		goto loopback_test_done;
5830 
5831 	rx_idx = bnx2_get_hw_rx_cons(bnapi);
5832 	if (rx_idx != rx_start_idx + num_pkts) {
5833 		goto loopback_test_done;
5834 	}
5835 
5836 	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5837 	data = rx_buf->data;
5838 
5839 	rx_hdr = get_l2_fhdr(data);
5840 	data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5841 
5842 	dma_sync_single_for_cpu(&bp->pdev->dev,
5843 		dma_unmap_addr(rx_buf, mapping),
5844 		bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5845 
5846 	if (rx_hdr->l2_fhdr_status &
5847 		(L2_FHDR_ERRORS_BAD_CRC |
5848 		L2_FHDR_ERRORS_PHY_DECODE |
5849 		L2_FHDR_ERRORS_ALIGNMENT |
5850 		L2_FHDR_ERRORS_TOO_SHORT |
5851 		L2_FHDR_ERRORS_GIANT_FRAME)) {
5852 
5853 		goto loopback_test_done;
5854 	}
5855 
5856 	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5857 		goto loopback_test_done;
5858 	}
5859 
5860 	for (i = 14; i < pkt_size; i++) {
5861 		if (*(data + i) != (unsigned char) (i & 0xff)) {
5862 			goto loopback_test_done;
5863 		}
5864 	}
5865 
5866 	ret = 0;
5867 
5868 loopback_test_done:
5869 	bp->loopback = 0;
5870 	return ret;
5871 }
5872 
5873 #define BNX2_MAC_LOOPBACK_FAILED	1
5874 #define BNX2_PHY_LOOPBACK_FAILED	2
5875 #define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
5876 					 BNX2_PHY_LOOPBACK_FAILED)
5877 
5878 static int
5879 bnx2_test_loopback(struct bnx2 *bp)
5880 {
5881 	int rc = 0;
5882 
5883 	if (!netif_running(bp->dev))
5884 		return BNX2_LOOPBACK_FAILED;
5885 
5886 	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5887 	spin_lock_bh(&bp->phy_lock);
5888 	bnx2_init_phy(bp, 1);
5889 	spin_unlock_bh(&bp->phy_lock);
5890 	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5891 		rc |= BNX2_MAC_LOOPBACK_FAILED;
5892 	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5893 		rc |= BNX2_PHY_LOOPBACK_FAILED;
5894 	return rc;
5895 }
5896 
5897 #define NVRAM_SIZE 0x200
5898 #define CRC32_RESIDUAL 0xdebb20e3
5899 
5900 static int
5901 bnx2_test_nvram(struct bnx2 *bp)
5902 {
5903 	__be32 buf[NVRAM_SIZE / 4];
5904 	u8 *data = (u8 *) buf;
5905 	int rc = 0;
5906 	u32 magic, csum;
5907 
5908 	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5909 		goto test_nvram_done;
5910 
5911         magic = be32_to_cpu(buf[0]);
5912 	if (magic != 0x669955aa) {
5913 		rc = -ENODEV;
5914 		goto test_nvram_done;
5915 	}
5916 
5917 	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5918 		goto test_nvram_done;
5919 
5920 	csum = ether_crc_le(0x100, data);
5921 	if (csum != CRC32_RESIDUAL) {
5922 		rc = -ENODEV;
5923 		goto test_nvram_done;
5924 	}
5925 
5926 	csum = ether_crc_le(0x100, data + 0x100);
5927 	if (csum != CRC32_RESIDUAL) {
5928 		rc = -ENODEV;
5929 	}
5930 
5931 test_nvram_done:
5932 	return rc;
5933 }
5934 
5935 static int
5936 bnx2_test_link(struct bnx2 *bp)
5937 {
5938 	u32 bmsr;
5939 
5940 	if (!netif_running(bp->dev))
5941 		return -ENODEV;
5942 
5943 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5944 		if (bp->link_up)
5945 			return 0;
5946 		return -ENODEV;
5947 	}
5948 	spin_lock_bh(&bp->phy_lock);
5949 	bnx2_enable_bmsr1(bp);
5950 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5951 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5952 	bnx2_disable_bmsr1(bp);
5953 	spin_unlock_bh(&bp->phy_lock);
5954 
5955 	if (bmsr & BMSR_LSTATUS) {
5956 		return 0;
5957 	}
5958 	return -ENODEV;
5959 }
5960 
5961 static int
5962 bnx2_test_intr(struct bnx2 *bp)
5963 {
5964 	int i;
5965 	u16 status_idx;
5966 
5967 	if (!netif_running(bp->dev))
5968 		return -ENODEV;
5969 
5970 	status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5971 
5972 	/* This register is not touched during run-time. */
5973 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5974 	BNX2_RD(bp, BNX2_HC_COMMAND);
5975 
5976 	for (i = 0; i < 10; i++) {
5977 		if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5978 			status_idx) {
5979 
5980 			break;
5981 		}
5982 
5983 		msleep_interruptible(10);
5984 	}
5985 	if (i < 10)
5986 		return 0;
5987 
5988 	return -ENODEV;
5989 }
5990 
5991 /* Determining link for parallel detection. */
5992 static int
5993 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5994 {
5995 	u32 mode_ctl, an_dbg, exp;
5996 
5997 	if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5998 		return 0;
5999 
6000 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6001 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6002 
6003 	if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6004 		return 0;
6005 
6006 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6007 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6008 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6009 
6010 	if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6011 		return 0;
6012 
6013 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6014 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6015 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6016 
6017 	if (exp & MII_EXPAND_REG1_RUDI_C)	/* receiving CONFIG */
6018 		return 0;
6019 
6020 	return 1;
6021 }
6022 
6023 static void
6024 bnx2_5706_serdes_timer(struct bnx2 *bp)
6025 {
6026 	int check_link = 1;
6027 
6028 	spin_lock(&bp->phy_lock);
6029 	if (bp->serdes_an_pending) {
6030 		bp->serdes_an_pending--;
6031 		check_link = 0;
6032 	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6033 		u32 bmcr;
6034 
6035 		bp->current_interval = BNX2_TIMER_INTERVAL;
6036 
6037 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6038 
6039 		if (bmcr & BMCR_ANENABLE) {
6040 			if (bnx2_5706_serdes_has_link(bp)) {
6041 				bmcr &= ~BMCR_ANENABLE;
6042 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6043 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6044 				bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6045 			}
6046 		}
6047 	}
6048 	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6049 		 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6050 		u32 phy2;
6051 
6052 		bnx2_write_phy(bp, 0x17, 0x0f01);
6053 		bnx2_read_phy(bp, 0x15, &phy2);
6054 		if (phy2 & 0x20) {
6055 			u32 bmcr;
6056 
6057 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6058 			bmcr |= BMCR_ANENABLE;
6059 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6060 
6061 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6062 		}
6063 	} else
6064 		bp->current_interval = BNX2_TIMER_INTERVAL;
6065 
6066 	if (check_link) {
6067 		u32 val;
6068 
6069 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6070 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6071 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6072 
6073 		if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6074 			if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6075 				bnx2_5706s_force_link_dn(bp, 1);
6076 				bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6077 			} else
6078 				bnx2_set_link(bp);
6079 		} else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6080 			bnx2_set_link(bp);
6081 	}
6082 	spin_unlock(&bp->phy_lock);
6083 }
6084 
6085 static void
6086 bnx2_5708_serdes_timer(struct bnx2 *bp)
6087 {
6088 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6089 		return;
6090 
6091 	if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6092 		bp->serdes_an_pending = 0;
6093 		return;
6094 	}
6095 
6096 	spin_lock(&bp->phy_lock);
6097 	if (bp->serdes_an_pending)
6098 		bp->serdes_an_pending--;
6099 	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6100 		u32 bmcr;
6101 
6102 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6103 		if (bmcr & BMCR_ANENABLE) {
6104 			bnx2_enable_forced_2g5(bp);
6105 			bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6106 		} else {
6107 			bnx2_disable_forced_2g5(bp);
6108 			bp->serdes_an_pending = 2;
6109 			bp->current_interval = BNX2_TIMER_INTERVAL;
6110 		}
6111 
6112 	} else
6113 		bp->current_interval = BNX2_TIMER_INTERVAL;
6114 
6115 	spin_unlock(&bp->phy_lock);
6116 }
6117 
6118 static void
6119 bnx2_timer(unsigned long data)
6120 {
6121 	struct bnx2 *bp = (struct bnx2 *) data;
6122 
6123 	if (!netif_running(bp->dev))
6124 		return;
6125 
6126 	if (atomic_read(&bp->intr_sem) != 0)
6127 		goto bnx2_restart_timer;
6128 
6129 	if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6130 	     BNX2_FLAG_USING_MSI)
6131 		bnx2_chk_missed_msi(bp);
6132 
6133 	bnx2_send_heart_beat(bp);
6134 
6135 	bp->stats_blk->stat_FwRxDrop =
6136 		bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6137 
6138 	/* workaround occasional corrupted counters */
6139 	if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6140 		BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6141 			BNX2_HC_COMMAND_STATS_NOW);
6142 
6143 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6144 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6145 			bnx2_5706_serdes_timer(bp);
6146 		else
6147 			bnx2_5708_serdes_timer(bp);
6148 	}
6149 
6150 bnx2_restart_timer:
6151 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6152 }
6153 
6154 static int
6155 bnx2_request_irq(struct bnx2 *bp)
6156 {
6157 	unsigned long flags;
6158 	struct bnx2_irq *irq;
6159 	int rc = 0, i;
6160 
6161 	if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6162 		flags = 0;
6163 	else
6164 		flags = IRQF_SHARED;
6165 
6166 	for (i = 0; i < bp->irq_nvecs; i++) {
6167 		irq = &bp->irq_tbl[i];
6168 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6169 				 &bp->bnx2_napi[i]);
6170 		if (rc)
6171 			break;
6172 		irq->requested = 1;
6173 	}
6174 	return rc;
6175 }
6176 
6177 static void
6178 __bnx2_free_irq(struct bnx2 *bp)
6179 {
6180 	struct bnx2_irq *irq;
6181 	int i;
6182 
6183 	for (i = 0; i < bp->irq_nvecs; i++) {
6184 		irq = &bp->irq_tbl[i];
6185 		if (irq->requested)
6186 			free_irq(irq->vector, &bp->bnx2_napi[i]);
6187 		irq->requested = 0;
6188 	}
6189 }
6190 
6191 static void
6192 bnx2_free_irq(struct bnx2 *bp)
6193 {
6194 
6195 	__bnx2_free_irq(bp);
6196 	if (bp->flags & BNX2_FLAG_USING_MSI)
6197 		pci_disable_msi(bp->pdev);
6198 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6199 		pci_disable_msix(bp->pdev);
6200 
6201 	bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6202 }
6203 
6204 static void
6205 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6206 {
6207 	int i, total_vecs, rc;
6208 	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6209 	struct net_device *dev = bp->dev;
6210 	const int len = sizeof(bp->irq_tbl[0].name);
6211 
6212 	bnx2_setup_msix_tbl(bp);
6213 	BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6214 	BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6215 	BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6216 
6217 	/*  Need to flush the previous three writes to ensure MSI-X
6218 	 *  is setup properly */
6219 	BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6220 
6221 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6222 		msix_ent[i].entry = i;
6223 		msix_ent[i].vector = 0;
6224 	}
6225 
6226 	total_vecs = msix_vecs;
6227 #ifdef BCM_CNIC
6228 	total_vecs++;
6229 #endif
6230 	rc = -ENOSPC;
6231 	while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6232 		rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6233 		if (rc <= 0)
6234 			break;
6235 		if (rc > 0)
6236 			total_vecs = rc;
6237 	}
6238 
6239 	if (rc != 0)
6240 		return;
6241 
6242 	msix_vecs = total_vecs;
6243 #ifdef BCM_CNIC
6244 	msix_vecs--;
6245 #endif
6246 	bp->irq_nvecs = msix_vecs;
6247 	bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6248 	for (i = 0; i < total_vecs; i++) {
6249 		bp->irq_tbl[i].vector = msix_ent[i].vector;
6250 		snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6251 		bp->irq_tbl[i].handler = bnx2_msi_1shot;
6252 	}
6253 }
6254 
6255 static int
6256 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6257 {
6258 	int cpus = netif_get_num_default_rss_queues();
6259 	int msix_vecs;
6260 
6261 	if (!bp->num_req_rx_rings)
6262 		msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6263 	else if (!bp->num_req_tx_rings)
6264 		msix_vecs = max(cpus, bp->num_req_rx_rings);
6265 	else
6266 		msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6267 
6268 	msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6269 
6270 	bp->irq_tbl[0].handler = bnx2_interrupt;
6271 	strcpy(bp->irq_tbl[0].name, bp->dev->name);
6272 	bp->irq_nvecs = 1;
6273 	bp->irq_tbl[0].vector = bp->pdev->irq;
6274 
6275 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6276 		bnx2_enable_msix(bp, msix_vecs);
6277 
6278 	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6279 	    !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6280 		if (pci_enable_msi(bp->pdev) == 0) {
6281 			bp->flags |= BNX2_FLAG_USING_MSI;
6282 			if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6283 				bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6284 				bp->irq_tbl[0].handler = bnx2_msi_1shot;
6285 			} else
6286 				bp->irq_tbl[0].handler = bnx2_msi;
6287 
6288 			bp->irq_tbl[0].vector = bp->pdev->irq;
6289 		}
6290 	}
6291 
6292 	if (!bp->num_req_tx_rings)
6293 		bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6294 	else
6295 		bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6296 
6297 	if (!bp->num_req_rx_rings)
6298 		bp->num_rx_rings = bp->irq_nvecs;
6299 	else
6300 		bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6301 
6302 	netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6303 
6304 	return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6305 }
6306 
6307 /* Called with rtnl_lock */
6308 static int
6309 bnx2_open(struct net_device *dev)
6310 {
6311 	struct bnx2 *bp = netdev_priv(dev);
6312 	int rc;
6313 
6314 	rc = bnx2_request_firmware(bp);
6315 	if (rc < 0)
6316 		goto out;
6317 
6318 	netif_carrier_off(dev);
6319 
6320 	bnx2_set_power_state(bp, PCI_D0);
6321 	bnx2_disable_int(bp);
6322 
6323 	rc = bnx2_setup_int_mode(bp, disable_msi);
6324 	if (rc)
6325 		goto open_err;
6326 	bnx2_init_napi(bp);
6327 	bnx2_napi_enable(bp);
6328 	rc = bnx2_alloc_mem(bp);
6329 	if (rc)
6330 		goto open_err;
6331 
6332 	rc = bnx2_request_irq(bp);
6333 	if (rc)
6334 		goto open_err;
6335 
6336 	rc = bnx2_init_nic(bp, 1);
6337 	if (rc)
6338 		goto open_err;
6339 
6340 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6341 
6342 	atomic_set(&bp->intr_sem, 0);
6343 
6344 	memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6345 
6346 	bnx2_enable_int(bp);
6347 
6348 	if (bp->flags & BNX2_FLAG_USING_MSI) {
6349 		/* Test MSI to make sure it is working
6350 		 * If MSI test fails, go back to INTx mode
6351 		 */
6352 		if (bnx2_test_intr(bp) != 0) {
6353 			netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6354 
6355 			bnx2_disable_int(bp);
6356 			bnx2_free_irq(bp);
6357 
6358 			bnx2_setup_int_mode(bp, 1);
6359 
6360 			rc = bnx2_init_nic(bp, 0);
6361 
6362 			if (!rc)
6363 				rc = bnx2_request_irq(bp);
6364 
6365 			if (rc) {
6366 				del_timer_sync(&bp->timer);
6367 				goto open_err;
6368 			}
6369 			bnx2_enable_int(bp);
6370 		}
6371 	}
6372 	if (bp->flags & BNX2_FLAG_USING_MSI)
6373 		netdev_info(dev, "using MSI\n");
6374 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6375 		netdev_info(dev, "using MSIX\n");
6376 
6377 	netif_tx_start_all_queues(dev);
6378 out:
6379 	return rc;
6380 
6381 open_err:
6382 	bnx2_napi_disable(bp);
6383 	bnx2_free_skbs(bp);
6384 	bnx2_free_irq(bp);
6385 	bnx2_free_mem(bp);
6386 	bnx2_del_napi(bp);
6387 	bnx2_release_firmware(bp);
6388 	goto out;
6389 }
6390 
6391 static void
6392 bnx2_reset_task(struct work_struct *work)
6393 {
6394 	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6395 	int rc;
6396 	u16 pcicmd;
6397 
6398 	rtnl_lock();
6399 	if (!netif_running(bp->dev)) {
6400 		rtnl_unlock();
6401 		return;
6402 	}
6403 
6404 	bnx2_netif_stop(bp, true);
6405 
6406 	pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6407 	if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6408 		/* in case PCI block has reset */
6409 		pci_restore_state(bp->pdev);
6410 		pci_save_state(bp->pdev);
6411 	}
6412 	rc = bnx2_init_nic(bp, 1);
6413 	if (rc) {
6414 		netdev_err(bp->dev, "failed to reset NIC, closing\n");
6415 		bnx2_napi_enable(bp);
6416 		dev_close(bp->dev);
6417 		rtnl_unlock();
6418 		return;
6419 	}
6420 
6421 	atomic_set(&bp->intr_sem, 1);
6422 	bnx2_netif_start(bp, true);
6423 	rtnl_unlock();
6424 }
6425 
6426 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6427 
6428 static void
6429 bnx2_dump_ftq(struct bnx2 *bp)
6430 {
6431 	int i;
6432 	u32 reg, bdidx, cid, valid;
6433 	struct net_device *dev = bp->dev;
6434 	static const struct ftq_reg {
6435 		char *name;
6436 		u32 off;
6437 	} ftq_arr[] = {
6438 		BNX2_FTQ_ENTRY(RV2P_P),
6439 		BNX2_FTQ_ENTRY(RV2P_T),
6440 		BNX2_FTQ_ENTRY(RV2P_M),
6441 		BNX2_FTQ_ENTRY(TBDR_),
6442 		BNX2_FTQ_ENTRY(TDMA_),
6443 		BNX2_FTQ_ENTRY(TXP_),
6444 		BNX2_FTQ_ENTRY(TXP_),
6445 		BNX2_FTQ_ENTRY(TPAT_),
6446 		BNX2_FTQ_ENTRY(RXP_C),
6447 		BNX2_FTQ_ENTRY(RXP_),
6448 		BNX2_FTQ_ENTRY(COM_COMXQ_),
6449 		BNX2_FTQ_ENTRY(COM_COMTQ_),
6450 		BNX2_FTQ_ENTRY(COM_COMQ_),
6451 		BNX2_FTQ_ENTRY(CP_CPQ_),
6452 	};
6453 
6454 	netdev_err(dev, "<--- start FTQ dump --->\n");
6455 	for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6456 		netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6457 			   bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6458 
6459 	netdev_err(dev, "CPU states:\n");
6460 	for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6461 		netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6462 			   reg, bnx2_reg_rd_ind(bp, reg),
6463 			   bnx2_reg_rd_ind(bp, reg + 4),
6464 			   bnx2_reg_rd_ind(bp, reg + 8),
6465 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6466 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6467 			   bnx2_reg_rd_ind(bp, reg + 0x20));
6468 
6469 	netdev_err(dev, "<--- end FTQ dump --->\n");
6470 	netdev_err(dev, "<--- start TBDC dump --->\n");
6471 	netdev_err(dev, "TBDC free cnt: %ld\n",
6472 		   BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6473 	netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6474 	for (i = 0; i < 0x20; i++) {
6475 		int j = 0;
6476 
6477 		BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6478 		BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6479 			BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6480 		BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6481 		while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6482 			BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6483 			j++;
6484 
6485 		cid = BNX2_RD(bp, BNX2_TBDC_CID);
6486 		bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6487 		valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6488 		netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6489 			   i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6490 			   bdidx >> 24, (valid >> 8) & 0x0ff);
6491 	}
6492 	netdev_err(dev, "<--- end TBDC dump --->\n");
6493 }
6494 
6495 static void
6496 bnx2_dump_state(struct bnx2 *bp)
6497 {
6498 	struct net_device *dev = bp->dev;
6499 	u32 val1, val2;
6500 
6501 	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6502 	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6503 		   atomic_read(&bp->intr_sem), val1);
6504 	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6505 	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6506 	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6507 	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6508 		   BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6509 		   BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6510 	netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6511 		   BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6512 	netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6513 		   BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6514 	if (bp->flags & BNX2_FLAG_USING_MSIX)
6515 		netdev_err(dev, "DEBUG: PBA[%08x]\n",
6516 			   BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6517 }
6518 
6519 static void
6520 bnx2_tx_timeout(struct net_device *dev)
6521 {
6522 	struct bnx2 *bp = netdev_priv(dev);
6523 
6524 	bnx2_dump_ftq(bp);
6525 	bnx2_dump_state(bp);
6526 	bnx2_dump_mcp_state(bp);
6527 
6528 	/* This allows the netif to be shutdown gracefully before resetting */
6529 	schedule_work(&bp->reset_task);
6530 }
6531 
6532 /* Called with netif_tx_lock.
6533  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6534  * netif_wake_queue().
6535  */
6536 static netdev_tx_t
6537 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6538 {
6539 	struct bnx2 *bp = netdev_priv(dev);
6540 	dma_addr_t mapping;
6541 	struct bnx2_tx_bd *txbd;
6542 	struct bnx2_sw_tx_bd *tx_buf;
6543 	u32 len, vlan_tag_flags, last_frag, mss;
6544 	u16 prod, ring_prod;
6545 	int i;
6546 	struct bnx2_napi *bnapi;
6547 	struct bnx2_tx_ring_info *txr;
6548 	struct netdev_queue *txq;
6549 
6550 	/*  Determine which tx ring we will be placed on */
6551 	i = skb_get_queue_mapping(skb);
6552 	bnapi = &bp->bnx2_napi[i];
6553 	txr = &bnapi->tx_ring;
6554 	txq = netdev_get_tx_queue(dev, i);
6555 
6556 	if (unlikely(bnx2_tx_avail(bp, txr) <
6557 	    (skb_shinfo(skb)->nr_frags + 1))) {
6558 		netif_tx_stop_queue(txq);
6559 		netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6560 
6561 		return NETDEV_TX_BUSY;
6562 	}
6563 	len = skb_headlen(skb);
6564 	prod = txr->tx_prod;
6565 	ring_prod = BNX2_TX_RING_IDX(prod);
6566 
6567 	vlan_tag_flags = 0;
6568 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6569 		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6570 	}
6571 
6572 	if (vlan_tx_tag_present(skb)) {
6573 		vlan_tag_flags |=
6574 			(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6575 	}
6576 
6577 	if ((mss = skb_shinfo(skb)->gso_size)) {
6578 		u32 tcp_opt_len;
6579 		struct iphdr *iph;
6580 
6581 		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6582 
6583 		tcp_opt_len = tcp_optlen(skb);
6584 
6585 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6586 			u32 tcp_off = skb_transport_offset(skb) -
6587 				      sizeof(struct ipv6hdr) - ETH_HLEN;
6588 
6589 			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6590 					  TX_BD_FLAGS_SW_FLAGS;
6591 			if (likely(tcp_off == 0))
6592 				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6593 			else {
6594 				tcp_off >>= 3;
6595 				vlan_tag_flags |= ((tcp_off & 0x3) <<
6596 						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
6597 						  ((tcp_off & 0x10) <<
6598 						   TX_BD_FLAGS_TCP6_OFF4_SHL);
6599 				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6600 			}
6601 		} else {
6602 			iph = ip_hdr(skb);
6603 			if (tcp_opt_len || (iph->ihl > 5)) {
6604 				vlan_tag_flags |= ((iph->ihl - 5) +
6605 						   (tcp_opt_len >> 2)) << 8;
6606 			}
6607 		}
6608 	} else
6609 		mss = 0;
6610 
6611 	mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6612 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6613 		dev_kfree_skb(skb);
6614 		return NETDEV_TX_OK;
6615 	}
6616 
6617 	tx_buf = &txr->tx_buf_ring[ring_prod];
6618 	tx_buf->skb = skb;
6619 	dma_unmap_addr_set(tx_buf, mapping, mapping);
6620 
6621 	txbd = &txr->tx_desc_ring[ring_prod];
6622 
6623 	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6624 	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6625 	txbd->tx_bd_mss_nbytes = len | (mss << 16);
6626 	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6627 
6628 	last_frag = skb_shinfo(skb)->nr_frags;
6629 	tx_buf->nr_frags = last_frag;
6630 	tx_buf->is_gso = skb_is_gso(skb);
6631 
6632 	for (i = 0; i < last_frag; i++) {
6633 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6634 
6635 		prod = BNX2_NEXT_TX_BD(prod);
6636 		ring_prod = BNX2_TX_RING_IDX(prod);
6637 		txbd = &txr->tx_desc_ring[ring_prod];
6638 
6639 		len = skb_frag_size(frag);
6640 		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6641 					   DMA_TO_DEVICE);
6642 		if (dma_mapping_error(&bp->pdev->dev, mapping))
6643 			goto dma_error;
6644 		dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6645 				   mapping);
6646 
6647 		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6648 		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6649 		txbd->tx_bd_mss_nbytes = len | (mss << 16);
6650 		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6651 
6652 	}
6653 	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6654 
6655 	/* Sync BD data before updating TX mailbox */
6656 	wmb();
6657 
6658 	netdev_tx_sent_queue(txq, skb->len);
6659 
6660 	prod = BNX2_NEXT_TX_BD(prod);
6661 	txr->tx_prod_bseq += skb->len;
6662 
6663 	BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6664 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6665 
6666 	mmiowb();
6667 
6668 	txr->tx_prod = prod;
6669 
6670 	if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6671 		netif_tx_stop_queue(txq);
6672 
6673 		/* netif_tx_stop_queue() must be done before checking
6674 		 * tx index in bnx2_tx_avail() below, because in
6675 		 * bnx2_tx_int(), we update tx index before checking for
6676 		 * netif_tx_queue_stopped().
6677 		 */
6678 		smp_mb();
6679 		if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6680 			netif_tx_wake_queue(txq);
6681 	}
6682 
6683 	return NETDEV_TX_OK;
6684 dma_error:
6685 	/* save value of frag that failed */
6686 	last_frag = i;
6687 
6688 	/* start back at beginning and unmap skb */
6689 	prod = txr->tx_prod;
6690 	ring_prod = BNX2_TX_RING_IDX(prod);
6691 	tx_buf = &txr->tx_buf_ring[ring_prod];
6692 	tx_buf->skb = NULL;
6693 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6694 			 skb_headlen(skb), PCI_DMA_TODEVICE);
6695 
6696 	/* unmap remaining mapped pages */
6697 	for (i = 0; i < last_frag; i++) {
6698 		prod = BNX2_NEXT_TX_BD(prod);
6699 		ring_prod = BNX2_TX_RING_IDX(prod);
6700 		tx_buf = &txr->tx_buf_ring[ring_prod];
6701 		dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6702 			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6703 			       PCI_DMA_TODEVICE);
6704 	}
6705 
6706 	dev_kfree_skb(skb);
6707 	return NETDEV_TX_OK;
6708 }
6709 
6710 /* Called with rtnl_lock */
6711 static int
6712 bnx2_close(struct net_device *dev)
6713 {
6714 	struct bnx2 *bp = netdev_priv(dev);
6715 
6716 	bnx2_disable_int_sync(bp);
6717 	bnx2_napi_disable(bp);
6718 	netif_tx_disable(dev);
6719 	del_timer_sync(&bp->timer);
6720 	bnx2_shutdown_chip(bp);
6721 	bnx2_free_irq(bp);
6722 	bnx2_free_skbs(bp);
6723 	bnx2_free_mem(bp);
6724 	bnx2_del_napi(bp);
6725 	bp->link_up = 0;
6726 	netif_carrier_off(bp->dev);
6727 	bnx2_set_power_state(bp, PCI_D3hot);
6728 	return 0;
6729 }
6730 
6731 static void
6732 bnx2_save_stats(struct bnx2 *bp)
6733 {
6734 	u32 *hw_stats = (u32 *) bp->stats_blk;
6735 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6736 	int i;
6737 
6738 	/* The 1st 10 counters are 64-bit counters */
6739 	for (i = 0; i < 20; i += 2) {
6740 		u32 hi;
6741 		u64 lo;
6742 
6743 		hi = temp_stats[i] + hw_stats[i];
6744 		lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6745 		if (lo > 0xffffffff)
6746 			hi++;
6747 		temp_stats[i] = hi;
6748 		temp_stats[i + 1] = lo & 0xffffffff;
6749 	}
6750 
6751 	for ( ; i < sizeof(struct statistics_block) / 4; i++)
6752 		temp_stats[i] += hw_stats[i];
6753 }
6754 
6755 #define GET_64BIT_NET_STATS64(ctr)		\
6756 	(((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6757 
6758 #define GET_64BIT_NET_STATS(ctr)				\
6759 	GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +		\
6760 	GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6761 
6762 #define GET_32BIT_NET_STATS(ctr)				\
6763 	(unsigned long) (bp->stats_blk->ctr +			\
6764 			 bp->temp_stats_blk->ctr)
6765 
6766 static struct rtnl_link_stats64 *
6767 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6768 {
6769 	struct bnx2 *bp = netdev_priv(dev);
6770 
6771 	if (bp->stats_blk == NULL)
6772 		return net_stats;
6773 
6774 	net_stats->rx_packets =
6775 		GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6776 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6777 		GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6778 
6779 	net_stats->tx_packets =
6780 		GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6781 		GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6782 		GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6783 
6784 	net_stats->rx_bytes =
6785 		GET_64BIT_NET_STATS(stat_IfHCInOctets);
6786 
6787 	net_stats->tx_bytes =
6788 		GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6789 
6790 	net_stats->multicast =
6791 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6792 
6793 	net_stats->collisions =
6794 		GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6795 
6796 	net_stats->rx_length_errors =
6797 		GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6798 		GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6799 
6800 	net_stats->rx_over_errors =
6801 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6802 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6803 
6804 	net_stats->rx_frame_errors =
6805 		GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6806 
6807 	net_stats->rx_crc_errors =
6808 		GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6809 
6810 	net_stats->rx_errors = net_stats->rx_length_errors +
6811 		net_stats->rx_over_errors + net_stats->rx_frame_errors +
6812 		net_stats->rx_crc_errors;
6813 
6814 	net_stats->tx_aborted_errors =
6815 		GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6816 		GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6817 
6818 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6819 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6820 		net_stats->tx_carrier_errors = 0;
6821 	else {
6822 		net_stats->tx_carrier_errors =
6823 			GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6824 	}
6825 
6826 	net_stats->tx_errors =
6827 		GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6828 		net_stats->tx_aborted_errors +
6829 		net_stats->tx_carrier_errors;
6830 
6831 	net_stats->rx_missed_errors =
6832 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6833 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6834 		GET_32BIT_NET_STATS(stat_FwRxDrop);
6835 
6836 	return net_stats;
6837 }
6838 
6839 /* All ethtool functions called with rtnl_lock */
6840 
6841 static int
6842 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6843 {
6844 	struct bnx2 *bp = netdev_priv(dev);
6845 	int support_serdes = 0, support_copper = 0;
6846 
6847 	cmd->supported = SUPPORTED_Autoneg;
6848 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6849 		support_serdes = 1;
6850 		support_copper = 1;
6851 	} else if (bp->phy_port == PORT_FIBRE)
6852 		support_serdes = 1;
6853 	else
6854 		support_copper = 1;
6855 
6856 	if (support_serdes) {
6857 		cmd->supported |= SUPPORTED_1000baseT_Full |
6858 			SUPPORTED_FIBRE;
6859 		if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6860 			cmd->supported |= SUPPORTED_2500baseX_Full;
6861 
6862 	}
6863 	if (support_copper) {
6864 		cmd->supported |= SUPPORTED_10baseT_Half |
6865 			SUPPORTED_10baseT_Full |
6866 			SUPPORTED_100baseT_Half |
6867 			SUPPORTED_100baseT_Full |
6868 			SUPPORTED_1000baseT_Full |
6869 			SUPPORTED_TP;
6870 
6871 	}
6872 
6873 	spin_lock_bh(&bp->phy_lock);
6874 	cmd->port = bp->phy_port;
6875 	cmd->advertising = bp->advertising;
6876 
6877 	if (bp->autoneg & AUTONEG_SPEED) {
6878 		cmd->autoneg = AUTONEG_ENABLE;
6879 	} else {
6880 		cmd->autoneg = AUTONEG_DISABLE;
6881 	}
6882 
6883 	if (netif_carrier_ok(dev)) {
6884 		ethtool_cmd_speed_set(cmd, bp->line_speed);
6885 		cmd->duplex = bp->duplex;
6886 	}
6887 	else {
6888 		ethtool_cmd_speed_set(cmd, -1);
6889 		cmd->duplex = -1;
6890 	}
6891 	spin_unlock_bh(&bp->phy_lock);
6892 
6893 	cmd->transceiver = XCVR_INTERNAL;
6894 	cmd->phy_address = bp->phy_addr;
6895 
6896 	return 0;
6897 }
6898 
6899 static int
6900 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6901 {
6902 	struct bnx2 *bp = netdev_priv(dev);
6903 	u8 autoneg = bp->autoneg;
6904 	u8 req_duplex = bp->req_duplex;
6905 	u16 req_line_speed = bp->req_line_speed;
6906 	u32 advertising = bp->advertising;
6907 	int err = -EINVAL;
6908 
6909 	spin_lock_bh(&bp->phy_lock);
6910 
6911 	if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6912 		goto err_out_unlock;
6913 
6914 	if (cmd->port != bp->phy_port &&
6915 	    !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6916 		goto err_out_unlock;
6917 
6918 	/* If device is down, we can store the settings only if the user
6919 	 * is setting the currently active port.
6920 	 */
6921 	if (!netif_running(dev) && cmd->port != bp->phy_port)
6922 		goto err_out_unlock;
6923 
6924 	if (cmd->autoneg == AUTONEG_ENABLE) {
6925 		autoneg |= AUTONEG_SPEED;
6926 
6927 		advertising = cmd->advertising;
6928 		if (cmd->port == PORT_TP) {
6929 			advertising &= ETHTOOL_ALL_COPPER_SPEED;
6930 			if (!advertising)
6931 				advertising = ETHTOOL_ALL_COPPER_SPEED;
6932 		} else {
6933 			advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6934 			if (!advertising)
6935 				advertising = ETHTOOL_ALL_FIBRE_SPEED;
6936 		}
6937 		advertising |= ADVERTISED_Autoneg;
6938 	}
6939 	else {
6940 		u32 speed = ethtool_cmd_speed(cmd);
6941 		if (cmd->port == PORT_FIBRE) {
6942 			if ((speed != SPEED_1000 &&
6943 			     speed != SPEED_2500) ||
6944 			    (cmd->duplex != DUPLEX_FULL))
6945 				goto err_out_unlock;
6946 
6947 			if (speed == SPEED_2500 &&
6948 			    !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6949 				goto err_out_unlock;
6950 		} else if (speed == SPEED_1000 || speed == SPEED_2500)
6951 			goto err_out_unlock;
6952 
6953 		autoneg &= ~AUTONEG_SPEED;
6954 		req_line_speed = speed;
6955 		req_duplex = cmd->duplex;
6956 		advertising = 0;
6957 	}
6958 
6959 	bp->autoneg = autoneg;
6960 	bp->advertising = advertising;
6961 	bp->req_line_speed = req_line_speed;
6962 	bp->req_duplex = req_duplex;
6963 
6964 	err = 0;
6965 	/* If device is down, the new settings will be picked up when it is
6966 	 * brought up.
6967 	 */
6968 	if (netif_running(dev))
6969 		err = bnx2_setup_phy(bp, cmd->port);
6970 
6971 err_out_unlock:
6972 	spin_unlock_bh(&bp->phy_lock);
6973 
6974 	return err;
6975 }
6976 
6977 static void
6978 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6979 {
6980 	struct bnx2 *bp = netdev_priv(dev);
6981 
6982 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
6983 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
6984 	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
6985 	strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
6986 }
6987 
6988 #define BNX2_REGDUMP_LEN		(32 * 1024)
6989 
6990 static int
6991 bnx2_get_regs_len(struct net_device *dev)
6992 {
6993 	return BNX2_REGDUMP_LEN;
6994 }
6995 
6996 static void
6997 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6998 {
6999 	u32 *p = _p, i, offset;
7000 	u8 *orig_p = _p;
7001 	struct bnx2 *bp = netdev_priv(dev);
7002 	static const u32 reg_boundaries[] = {
7003 		0x0000, 0x0098, 0x0400, 0x045c,
7004 		0x0800, 0x0880, 0x0c00, 0x0c10,
7005 		0x0c30, 0x0d08, 0x1000, 0x101c,
7006 		0x1040, 0x1048, 0x1080, 0x10a4,
7007 		0x1400, 0x1490, 0x1498, 0x14f0,
7008 		0x1500, 0x155c, 0x1580, 0x15dc,
7009 		0x1600, 0x1658, 0x1680, 0x16d8,
7010 		0x1800, 0x1820, 0x1840, 0x1854,
7011 		0x1880, 0x1894, 0x1900, 0x1984,
7012 		0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7013 		0x1c80, 0x1c94, 0x1d00, 0x1d84,
7014 		0x2000, 0x2030, 0x23c0, 0x2400,
7015 		0x2800, 0x2820, 0x2830, 0x2850,
7016 		0x2b40, 0x2c10, 0x2fc0, 0x3058,
7017 		0x3c00, 0x3c94, 0x4000, 0x4010,
7018 		0x4080, 0x4090, 0x43c0, 0x4458,
7019 		0x4c00, 0x4c18, 0x4c40, 0x4c54,
7020 		0x4fc0, 0x5010, 0x53c0, 0x5444,
7021 		0x5c00, 0x5c18, 0x5c80, 0x5c90,
7022 		0x5fc0, 0x6000, 0x6400, 0x6428,
7023 		0x6800, 0x6848, 0x684c, 0x6860,
7024 		0x6888, 0x6910, 0x8000
7025 	};
7026 
7027 	regs->version = 0;
7028 
7029 	memset(p, 0, BNX2_REGDUMP_LEN);
7030 
7031 	if (!netif_running(bp->dev))
7032 		return;
7033 
7034 	i = 0;
7035 	offset = reg_boundaries[0];
7036 	p += offset;
7037 	while (offset < BNX2_REGDUMP_LEN) {
7038 		*p++ = BNX2_RD(bp, offset);
7039 		offset += 4;
7040 		if (offset == reg_boundaries[i + 1]) {
7041 			offset = reg_boundaries[i + 2];
7042 			p = (u32 *) (orig_p + offset);
7043 			i += 2;
7044 		}
7045 	}
7046 }
7047 
7048 static void
7049 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7050 {
7051 	struct bnx2 *bp = netdev_priv(dev);
7052 
7053 	if (bp->flags & BNX2_FLAG_NO_WOL) {
7054 		wol->supported = 0;
7055 		wol->wolopts = 0;
7056 	}
7057 	else {
7058 		wol->supported = WAKE_MAGIC;
7059 		if (bp->wol)
7060 			wol->wolopts = WAKE_MAGIC;
7061 		else
7062 			wol->wolopts = 0;
7063 	}
7064 	memset(&wol->sopass, 0, sizeof(wol->sopass));
7065 }
7066 
7067 static int
7068 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7069 {
7070 	struct bnx2 *bp = netdev_priv(dev);
7071 
7072 	if (wol->wolopts & ~WAKE_MAGIC)
7073 		return -EINVAL;
7074 
7075 	if (wol->wolopts & WAKE_MAGIC) {
7076 		if (bp->flags & BNX2_FLAG_NO_WOL)
7077 			return -EINVAL;
7078 
7079 		bp->wol = 1;
7080 	}
7081 	else {
7082 		bp->wol = 0;
7083 	}
7084 	return 0;
7085 }
7086 
7087 static int
7088 bnx2_nway_reset(struct net_device *dev)
7089 {
7090 	struct bnx2 *bp = netdev_priv(dev);
7091 	u32 bmcr;
7092 
7093 	if (!netif_running(dev))
7094 		return -EAGAIN;
7095 
7096 	if (!(bp->autoneg & AUTONEG_SPEED)) {
7097 		return -EINVAL;
7098 	}
7099 
7100 	spin_lock_bh(&bp->phy_lock);
7101 
7102 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7103 		int rc;
7104 
7105 		rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7106 		spin_unlock_bh(&bp->phy_lock);
7107 		return rc;
7108 	}
7109 
7110 	/* Force a link down visible on the other side */
7111 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7112 		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7113 		spin_unlock_bh(&bp->phy_lock);
7114 
7115 		msleep(20);
7116 
7117 		spin_lock_bh(&bp->phy_lock);
7118 
7119 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7120 		bp->serdes_an_pending = 1;
7121 		mod_timer(&bp->timer, jiffies + bp->current_interval);
7122 	}
7123 
7124 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7125 	bmcr &= ~BMCR_LOOPBACK;
7126 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7127 
7128 	spin_unlock_bh(&bp->phy_lock);
7129 
7130 	return 0;
7131 }
7132 
7133 static u32
7134 bnx2_get_link(struct net_device *dev)
7135 {
7136 	struct bnx2 *bp = netdev_priv(dev);
7137 
7138 	return bp->link_up;
7139 }
7140 
7141 static int
7142 bnx2_get_eeprom_len(struct net_device *dev)
7143 {
7144 	struct bnx2 *bp = netdev_priv(dev);
7145 
7146 	if (bp->flash_info == NULL)
7147 		return 0;
7148 
7149 	return (int) bp->flash_size;
7150 }
7151 
7152 static int
7153 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7154 		u8 *eebuf)
7155 {
7156 	struct bnx2 *bp = netdev_priv(dev);
7157 	int rc;
7158 
7159 	if (!netif_running(dev))
7160 		return -EAGAIN;
7161 
7162 	/* parameters already validated in ethtool_get_eeprom */
7163 
7164 	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7165 
7166 	return rc;
7167 }
7168 
7169 static int
7170 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7171 		u8 *eebuf)
7172 {
7173 	struct bnx2 *bp = netdev_priv(dev);
7174 	int rc;
7175 
7176 	if (!netif_running(dev))
7177 		return -EAGAIN;
7178 
7179 	/* parameters already validated in ethtool_set_eeprom */
7180 
7181 	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7182 
7183 	return rc;
7184 }
7185 
7186 static int
7187 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7188 {
7189 	struct bnx2 *bp = netdev_priv(dev);
7190 
7191 	memset(coal, 0, sizeof(struct ethtool_coalesce));
7192 
7193 	coal->rx_coalesce_usecs = bp->rx_ticks;
7194 	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7195 	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7196 	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7197 
7198 	coal->tx_coalesce_usecs = bp->tx_ticks;
7199 	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7200 	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7201 	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7202 
7203 	coal->stats_block_coalesce_usecs = bp->stats_ticks;
7204 
7205 	return 0;
7206 }
7207 
7208 static int
7209 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7210 {
7211 	struct bnx2 *bp = netdev_priv(dev);
7212 
7213 	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7214 	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7215 
7216 	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7217 	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7218 
7219 	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7220 	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7221 
7222 	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7223 	if (bp->rx_quick_cons_trip_int > 0xff)
7224 		bp->rx_quick_cons_trip_int = 0xff;
7225 
7226 	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7227 	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7228 
7229 	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7230 	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7231 
7232 	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7233 	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7234 
7235 	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7236 	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7237 		0xff;
7238 
7239 	bp->stats_ticks = coal->stats_block_coalesce_usecs;
7240 	if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7241 		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7242 			bp->stats_ticks = USEC_PER_SEC;
7243 	}
7244 	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7245 		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7246 	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7247 
7248 	if (netif_running(bp->dev)) {
7249 		bnx2_netif_stop(bp, true);
7250 		bnx2_init_nic(bp, 0);
7251 		bnx2_netif_start(bp, true);
7252 	}
7253 
7254 	return 0;
7255 }
7256 
7257 static void
7258 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7259 {
7260 	struct bnx2 *bp = netdev_priv(dev);
7261 
7262 	ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7263 	ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7264 
7265 	ering->rx_pending = bp->rx_ring_size;
7266 	ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7267 
7268 	ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7269 	ering->tx_pending = bp->tx_ring_size;
7270 }
7271 
7272 static int
7273 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7274 {
7275 	if (netif_running(bp->dev)) {
7276 		/* Reset will erase chipset stats; save them */
7277 		bnx2_save_stats(bp);
7278 
7279 		bnx2_netif_stop(bp, true);
7280 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7281 		if (reset_irq) {
7282 			bnx2_free_irq(bp);
7283 			bnx2_del_napi(bp);
7284 		} else {
7285 			__bnx2_free_irq(bp);
7286 		}
7287 		bnx2_free_skbs(bp);
7288 		bnx2_free_mem(bp);
7289 	}
7290 
7291 	bnx2_set_rx_ring_size(bp, rx);
7292 	bp->tx_ring_size = tx;
7293 
7294 	if (netif_running(bp->dev)) {
7295 		int rc = 0;
7296 
7297 		if (reset_irq) {
7298 			rc = bnx2_setup_int_mode(bp, disable_msi);
7299 			bnx2_init_napi(bp);
7300 		}
7301 
7302 		if (!rc)
7303 			rc = bnx2_alloc_mem(bp);
7304 
7305 		if (!rc)
7306 			rc = bnx2_request_irq(bp);
7307 
7308 		if (!rc)
7309 			rc = bnx2_init_nic(bp, 0);
7310 
7311 		if (rc) {
7312 			bnx2_napi_enable(bp);
7313 			dev_close(bp->dev);
7314 			return rc;
7315 		}
7316 #ifdef BCM_CNIC
7317 		mutex_lock(&bp->cnic_lock);
7318 		/* Let cnic know about the new status block. */
7319 		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7320 			bnx2_setup_cnic_irq_info(bp);
7321 		mutex_unlock(&bp->cnic_lock);
7322 #endif
7323 		bnx2_netif_start(bp, true);
7324 	}
7325 	return 0;
7326 }
7327 
7328 static int
7329 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7330 {
7331 	struct bnx2 *bp = netdev_priv(dev);
7332 	int rc;
7333 
7334 	if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7335 		(ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7336 		(ering->tx_pending <= MAX_SKB_FRAGS)) {
7337 
7338 		return -EINVAL;
7339 	}
7340 	rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7341 				   false);
7342 	return rc;
7343 }
7344 
7345 static void
7346 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7347 {
7348 	struct bnx2 *bp = netdev_priv(dev);
7349 
7350 	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7351 	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7352 	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7353 }
7354 
7355 static int
7356 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7357 {
7358 	struct bnx2 *bp = netdev_priv(dev);
7359 
7360 	bp->req_flow_ctrl = 0;
7361 	if (epause->rx_pause)
7362 		bp->req_flow_ctrl |= FLOW_CTRL_RX;
7363 	if (epause->tx_pause)
7364 		bp->req_flow_ctrl |= FLOW_CTRL_TX;
7365 
7366 	if (epause->autoneg) {
7367 		bp->autoneg |= AUTONEG_FLOW_CTRL;
7368 	}
7369 	else {
7370 		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7371 	}
7372 
7373 	if (netif_running(dev)) {
7374 		spin_lock_bh(&bp->phy_lock);
7375 		bnx2_setup_phy(bp, bp->phy_port);
7376 		spin_unlock_bh(&bp->phy_lock);
7377 	}
7378 
7379 	return 0;
7380 }
7381 
7382 static struct {
7383 	char string[ETH_GSTRING_LEN];
7384 } bnx2_stats_str_arr[] = {
7385 	{ "rx_bytes" },
7386 	{ "rx_error_bytes" },
7387 	{ "tx_bytes" },
7388 	{ "tx_error_bytes" },
7389 	{ "rx_ucast_packets" },
7390 	{ "rx_mcast_packets" },
7391 	{ "rx_bcast_packets" },
7392 	{ "tx_ucast_packets" },
7393 	{ "tx_mcast_packets" },
7394 	{ "tx_bcast_packets" },
7395 	{ "tx_mac_errors" },
7396 	{ "tx_carrier_errors" },
7397 	{ "rx_crc_errors" },
7398 	{ "rx_align_errors" },
7399 	{ "tx_single_collisions" },
7400 	{ "tx_multi_collisions" },
7401 	{ "tx_deferred" },
7402 	{ "tx_excess_collisions" },
7403 	{ "tx_late_collisions" },
7404 	{ "tx_total_collisions" },
7405 	{ "rx_fragments" },
7406 	{ "rx_jabbers" },
7407 	{ "rx_undersize_packets" },
7408 	{ "rx_oversize_packets" },
7409 	{ "rx_64_byte_packets" },
7410 	{ "rx_65_to_127_byte_packets" },
7411 	{ "rx_128_to_255_byte_packets" },
7412 	{ "rx_256_to_511_byte_packets" },
7413 	{ "rx_512_to_1023_byte_packets" },
7414 	{ "rx_1024_to_1522_byte_packets" },
7415 	{ "rx_1523_to_9022_byte_packets" },
7416 	{ "tx_64_byte_packets" },
7417 	{ "tx_65_to_127_byte_packets" },
7418 	{ "tx_128_to_255_byte_packets" },
7419 	{ "tx_256_to_511_byte_packets" },
7420 	{ "tx_512_to_1023_byte_packets" },
7421 	{ "tx_1024_to_1522_byte_packets" },
7422 	{ "tx_1523_to_9022_byte_packets" },
7423 	{ "rx_xon_frames" },
7424 	{ "rx_xoff_frames" },
7425 	{ "tx_xon_frames" },
7426 	{ "tx_xoff_frames" },
7427 	{ "rx_mac_ctrl_frames" },
7428 	{ "rx_filtered_packets" },
7429 	{ "rx_ftq_discards" },
7430 	{ "rx_discards" },
7431 	{ "rx_fw_discards" },
7432 };
7433 
7434 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7435 
7436 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7437 
7438 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7439     STATS_OFFSET32(stat_IfHCInOctets_hi),
7440     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7441     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7442     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7443     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7444     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7445     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7446     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7447     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7448     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7449     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7450     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7451     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7452     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7453     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7454     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7455     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7456     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7457     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7458     STATS_OFFSET32(stat_EtherStatsCollisions),
7459     STATS_OFFSET32(stat_EtherStatsFragments),
7460     STATS_OFFSET32(stat_EtherStatsJabbers),
7461     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7462     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7463     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7464     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7465     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7466     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7467     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7468     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7469     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7470     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7471     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7472     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7473     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7474     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7475     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7476     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7477     STATS_OFFSET32(stat_XonPauseFramesReceived),
7478     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7479     STATS_OFFSET32(stat_OutXonSent),
7480     STATS_OFFSET32(stat_OutXoffSent),
7481     STATS_OFFSET32(stat_MacControlFramesReceived),
7482     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7483     STATS_OFFSET32(stat_IfInFTQDiscards),
7484     STATS_OFFSET32(stat_IfInMBUFDiscards),
7485     STATS_OFFSET32(stat_FwRxDrop),
7486 };
7487 
7488 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7489  * skipped because of errata.
7490  */
7491 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7492 	8,0,8,8,8,8,8,8,8,8,
7493 	4,0,4,4,4,4,4,4,4,4,
7494 	4,4,4,4,4,4,4,4,4,4,
7495 	4,4,4,4,4,4,4,4,4,4,
7496 	4,4,4,4,4,4,4,
7497 };
7498 
7499 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7500 	8,0,8,8,8,8,8,8,8,8,
7501 	4,4,4,4,4,4,4,4,4,4,
7502 	4,4,4,4,4,4,4,4,4,4,
7503 	4,4,4,4,4,4,4,4,4,4,
7504 	4,4,4,4,4,4,4,
7505 };
7506 
7507 #define BNX2_NUM_TESTS 6
7508 
7509 static struct {
7510 	char string[ETH_GSTRING_LEN];
7511 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7512 	{ "register_test (offline)" },
7513 	{ "memory_test (offline)" },
7514 	{ "loopback_test (offline)" },
7515 	{ "nvram_test (online)" },
7516 	{ "interrupt_test (online)" },
7517 	{ "link_test (online)" },
7518 };
7519 
7520 static int
7521 bnx2_get_sset_count(struct net_device *dev, int sset)
7522 {
7523 	switch (sset) {
7524 	case ETH_SS_TEST:
7525 		return BNX2_NUM_TESTS;
7526 	case ETH_SS_STATS:
7527 		return BNX2_NUM_STATS;
7528 	default:
7529 		return -EOPNOTSUPP;
7530 	}
7531 }
7532 
7533 static void
7534 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7535 {
7536 	struct bnx2 *bp = netdev_priv(dev);
7537 
7538 	bnx2_set_power_state(bp, PCI_D0);
7539 
7540 	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7541 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
7542 		int i;
7543 
7544 		bnx2_netif_stop(bp, true);
7545 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7546 		bnx2_free_skbs(bp);
7547 
7548 		if (bnx2_test_registers(bp) != 0) {
7549 			buf[0] = 1;
7550 			etest->flags |= ETH_TEST_FL_FAILED;
7551 		}
7552 		if (bnx2_test_memory(bp) != 0) {
7553 			buf[1] = 1;
7554 			etest->flags |= ETH_TEST_FL_FAILED;
7555 		}
7556 		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7557 			etest->flags |= ETH_TEST_FL_FAILED;
7558 
7559 		if (!netif_running(bp->dev))
7560 			bnx2_shutdown_chip(bp);
7561 		else {
7562 			bnx2_init_nic(bp, 1);
7563 			bnx2_netif_start(bp, true);
7564 		}
7565 
7566 		/* wait for link up */
7567 		for (i = 0; i < 7; i++) {
7568 			if (bp->link_up)
7569 				break;
7570 			msleep_interruptible(1000);
7571 		}
7572 	}
7573 
7574 	if (bnx2_test_nvram(bp) != 0) {
7575 		buf[3] = 1;
7576 		etest->flags |= ETH_TEST_FL_FAILED;
7577 	}
7578 	if (bnx2_test_intr(bp) != 0) {
7579 		buf[4] = 1;
7580 		etest->flags |= ETH_TEST_FL_FAILED;
7581 	}
7582 
7583 	if (bnx2_test_link(bp) != 0) {
7584 		buf[5] = 1;
7585 		etest->flags |= ETH_TEST_FL_FAILED;
7586 
7587 	}
7588 	if (!netif_running(bp->dev))
7589 		bnx2_set_power_state(bp, PCI_D3hot);
7590 }
7591 
7592 static void
7593 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7594 {
7595 	switch (stringset) {
7596 	case ETH_SS_STATS:
7597 		memcpy(buf, bnx2_stats_str_arr,
7598 			sizeof(bnx2_stats_str_arr));
7599 		break;
7600 	case ETH_SS_TEST:
7601 		memcpy(buf, bnx2_tests_str_arr,
7602 			sizeof(bnx2_tests_str_arr));
7603 		break;
7604 	}
7605 }
7606 
7607 static void
7608 bnx2_get_ethtool_stats(struct net_device *dev,
7609 		struct ethtool_stats *stats, u64 *buf)
7610 {
7611 	struct bnx2 *bp = netdev_priv(dev);
7612 	int i;
7613 	u32 *hw_stats = (u32 *) bp->stats_blk;
7614 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7615 	u8 *stats_len_arr = NULL;
7616 
7617 	if (hw_stats == NULL) {
7618 		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7619 		return;
7620 	}
7621 
7622 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7623 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7624 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7625 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7626 		stats_len_arr = bnx2_5706_stats_len_arr;
7627 	else
7628 		stats_len_arr = bnx2_5708_stats_len_arr;
7629 
7630 	for (i = 0; i < BNX2_NUM_STATS; i++) {
7631 		unsigned long offset;
7632 
7633 		if (stats_len_arr[i] == 0) {
7634 			/* skip this counter */
7635 			buf[i] = 0;
7636 			continue;
7637 		}
7638 
7639 		offset = bnx2_stats_offset_arr[i];
7640 		if (stats_len_arr[i] == 4) {
7641 			/* 4-byte counter */
7642 			buf[i] = (u64) *(hw_stats + offset) +
7643 				 *(temp_stats + offset);
7644 			continue;
7645 		}
7646 		/* 8-byte counter */
7647 		buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7648 			 *(hw_stats + offset + 1) +
7649 			 (((u64) *(temp_stats + offset)) << 32) +
7650 			 *(temp_stats + offset + 1);
7651 	}
7652 }
7653 
7654 static int
7655 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7656 {
7657 	struct bnx2 *bp = netdev_priv(dev);
7658 
7659 	switch (state) {
7660 	case ETHTOOL_ID_ACTIVE:
7661 		bnx2_set_power_state(bp, PCI_D0);
7662 
7663 		bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7664 		BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7665 		return 1;	/* cycle on/off once per second */
7666 
7667 	case ETHTOOL_ID_ON:
7668 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7669 			BNX2_EMAC_LED_1000MB_OVERRIDE |
7670 			BNX2_EMAC_LED_100MB_OVERRIDE |
7671 			BNX2_EMAC_LED_10MB_OVERRIDE |
7672 			BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7673 			BNX2_EMAC_LED_TRAFFIC);
7674 		break;
7675 
7676 	case ETHTOOL_ID_OFF:
7677 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7678 		break;
7679 
7680 	case ETHTOOL_ID_INACTIVE:
7681 		BNX2_WR(bp, BNX2_EMAC_LED, 0);
7682 		BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7683 
7684 		if (!netif_running(dev))
7685 			bnx2_set_power_state(bp, PCI_D3hot);
7686 		break;
7687 	}
7688 
7689 	return 0;
7690 }
7691 
7692 static netdev_features_t
7693 bnx2_fix_features(struct net_device *dev, netdev_features_t features)
7694 {
7695 	struct bnx2 *bp = netdev_priv(dev);
7696 
7697 	if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7698 		features |= NETIF_F_HW_VLAN_CTAG_RX;
7699 
7700 	return features;
7701 }
7702 
7703 static int
7704 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7705 {
7706 	struct bnx2 *bp = netdev_priv(dev);
7707 
7708 	/* TSO with VLAN tag won't work with current firmware */
7709 	if (features & NETIF_F_HW_VLAN_CTAG_TX)
7710 		dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7711 	else
7712 		dev->vlan_features &= ~NETIF_F_ALL_TSO;
7713 
7714 	if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7715 	    !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7716 	    netif_running(dev)) {
7717 		bnx2_netif_stop(bp, false);
7718 		dev->features = features;
7719 		bnx2_set_rx_mode(dev);
7720 		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7721 		bnx2_netif_start(bp, false);
7722 		return 1;
7723 	}
7724 
7725 	return 0;
7726 }
7727 
7728 static void bnx2_get_channels(struct net_device *dev,
7729 			      struct ethtool_channels *channels)
7730 {
7731 	struct bnx2 *bp = netdev_priv(dev);
7732 	u32 max_rx_rings = 1;
7733 	u32 max_tx_rings = 1;
7734 
7735 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7736 		max_rx_rings = RX_MAX_RINGS;
7737 		max_tx_rings = TX_MAX_RINGS;
7738 	}
7739 
7740 	channels->max_rx = max_rx_rings;
7741 	channels->max_tx = max_tx_rings;
7742 	channels->max_other = 0;
7743 	channels->max_combined = 0;
7744 	channels->rx_count = bp->num_rx_rings;
7745 	channels->tx_count = bp->num_tx_rings;
7746 	channels->other_count = 0;
7747 	channels->combined_count = 0;
7748 }
7749 
7750 static int bnx2_set_channels(struct net_device *dev,
7751 			      struct ethtool_channels *channels)
7752 {
7753 	struct bnx2 *bp = netdev_priv(dev);
7754 	u32 max_rx_rings = 1;
7755 	u32 max_tx_rings = 1;
7756 	int rc = 0;
7757 
7758 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7759 		max_rx_rings = RX_MAX_RINGS;
7760 		max_tx_rings = TX_MAX_RINGS;
7761 	}
7762 	if (channels->rx_count > max_rx_rings ||
7763 	    channels->tx_count > max_tx_rings)
7764 		return -EINVAL;
7765 
7766 	bp->num_req_rx_rings = channels->rx_count;
7767 	bp->num_req_tx_rings = channels->tx_count;
7768 
7769 	if (netif_running(dev))
7770 		rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7771 					   bp->tx_ring_size, true);
7772 
7773 	return rc;
7774 }
7775 
7776 static const struct ethtool_ops bnx2_ethtool_ops = {
7777 	.get_settings		= bnx2_get_settings,
7778 	.set_settings		= bnx2_set_settings,
7779 	.get_drvinfo		= bnx2_get_drvinfo,
7780 	.get_regs_len		= bnx2_get_regs_len,
7781 	.get_regs		= bnx2_get_regs,
7782 	.get_wol		= bnx2_get_wol,
7783 	.set_wol		= bnx2_set_wol,
7784 	.nway_reset		= bnx2_nway_reset,
7785 	.get_link		= bnx2_get_link,
7786 	.get_eeprom_len		= bnx2_get_eeprom_len,
7787 	.get_eeprom		= bnx2_get_eeprom,
7788 	.set_eeprom		= bnx2_set_eeprom,
7789 	.get_coalesce		= bnx2_get_coalesce,
7790 	.set_coalesce		= bnx2_set_coalesce,
7791 	.get_ringparam		= bnx2_get_ringparam,
7792 	.set_ringparam		= bnx2_set_ringparam,
7793 	.get_pauseparam		= bnx2_get_pauseparam,
7794 	.set_pauseparam		= bnx2_set_pauseparam,
7795 	.self_test		= bnx2_self_test,
7796 	.get_strings		= bnx2_get_strings,
7797 	.set_phys_id		= bnx2_set_phys_id,
7798 	.get_ethtool_stats	= bnx2_get_ethtool_stats,
7799 	.get_sset_count		= bnx2_get_sset_count,
7800 	.get_channels		= bnx2_get_channels,
7801 	.set_channels		= bnx2_set_channels,
7802 };
7803 
7804 /* Called with rtnl_lock */
7805 static int
7806 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7807 {
7808 	struct mii_ioctl_data *data = if_mii(ifr);
7809 	struct bnx2 *bp = netdev_priv(dev);
7810 	int err;
7811 
7812 	switch(cmd) {
7813 	case SIOCGMIIPHY:
7814 		data->phy_id = bp->phy_addr;
7815 
7816 		/* fallthru */
7817 	case SIOCGMIIREG: {
7818 		u32 mii_regval;
7819 
7820 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7821 			return -EOPNOTSUPP;
7822 
7823 		if (!netif_running(dev))
7824 			return -EAGAIN;
7825 
7826 		spin_lock_bh(&bp->phy_lock);
7827 		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7828 		spin_unlock_bh(&bp->phy_lock);
7829 
7830 		data->val_out = mii_regval;
7831 
7832 		return err;
7833 	}
7834 
7835 	case SIOCSMIIREG:
7836 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7837 			return -EOPNOTSUPP;
7838 
7839 		if (!netif_running(dev))
7840 			return -EAGAIN;
7841 
7842 		spin_lock_bh(&bp->phy_lock);
7843 		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7844 		spin_unlock_bh(&bp->phy_lock);
7845 
7846 		return err;
7847 
7848 	default:
7849 		/* do nothing */
7850 		break;
7851 	}
7852 	return -EOPNOTSUPP;
7853 }
7854 
7855 /* Called with rtnl_lock */
7856 static int
7857 bnx2_change_mac_addr(struct net_device *dev, void *p)
7858 {
7859 	struct sockaddr *addr = p;
7860 	struct bnx2 *bp = netdev_priv(dev);
7861 
7862 	if (!is_valid_ether_addr(addr->sa_data))
7863 		return -EADDRNOTAVAIL;
7864 
7865 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7866 	if (netif_running(dev))
7867 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7868 
7869 	return 0;
7870 }
7871 
7872 /* Called with rtnl_lock */
7873 static int
7874 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7875 {
7876 	struct bnx2 *bp = netdev_priv(dev);
7877 
7878 	if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7879 		((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7880 		return -EINVAL;
7881 
7882 	dev->mtu = new_mtu;
7883 	return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7884 				     false);
7885 }
7886 
7887 #ifdef CONFIG_NET_POLL_CONTROLLER
7888 static void
7889 poll_bnx2(struct net_device *dev)
7890 {
7891 	struct bnx2 *bp = netdev_priv(dev);
7892 	int i;
7893 
7894 	for (i = 0; i < bp->irq_nvecs; i++) {
7895 		struct bnx2_irq *irq = &bp->irq_tbl[i];
7896 
7897 		disable_irq(irq->vector);
7898 		irq->handler(irq->vector, &bp->bnx2_napi[i]);
7899 		enable_irq(irq->vector);
7900 	}
7901 }
7902 #endif
7903 
7904 static void
7905 bnx2_get_5709_media(struct bnx2 *bp)
7906 {
7907 	u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7908 	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7909 	u32 strap;
7910 
7911 	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7912 		return;
7913 	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7914 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7915 		return;
7916 	}
7917 
7918 	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7919 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7920 	else
7921 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7922 
7923 	if (bp->func == 0) {
7924 		switch (strap) {
7925 		case 0x4:
7926 		case 0x5:
7927 		case 0x6:
7928 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7929 			return;
7930 		}
7931 	} else {
7932 		switch (strap) {
7933 		case 0x1:
7934 		case 0x2:
7935 		case 0x4:
7936 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7937 			return;
7938 		}
7939 	}
7940 }
7941 
7942 static void
7943 bnx2_get_pci_speed(struct bnx2 *bp)
7944 {
7945 	u32 reg;
7946 
7947 	reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7948 	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7949 		u32 clkreg;
7950 
7951 		bp->flags |= BNX2_FLAG_PCIX;
7952 
7953 		clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7954 
7955 		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7956 		switch (clkreg) {
7957 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7958 			bp->bus_speed_mhz = 133;
7959 			break;
7960 
7961 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7962 			bp->bus_speed_mhz = 100;
7963 			break;
7964 
7965 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7966 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7967 			bp->bus_speed_mhz = 66;
7968 			break;
7969 
7970 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7971 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7972 			bp->bus_speed_mhz = 50;
7973 			break;
7974 
7975 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7976 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7977 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7978 			bp->bus_speed_mhz = 33;
7979 			break;
7980 		}
7981 	}
7982 	else {
7983 		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7984 			bp->bus_speed_mhz = 66;
7985 		else
7986 			bp->bus_speed_mhz = 33;
7987 	}
7988 
7989 	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7990 		bp->flags |= BNX2_FLAG_PCI_32BIT;
7991 
7992 }
7993 
7994 static void
7995 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7996 {
7997 	int rc, i, j;
7998 	u8 *data;
7999 	unsigned int block_end, rosize, len;
8000 
8001 #define BNX2_VPD_NVRAM_OFFSET	0x300
8002 #define BNX2_VPD_LEN		128
8003 #define BNX2_MAX_VER_SLEN	30
8004 
8005 	data = kmalloc(256, GFP_KERNEL);
8006 	if (!data)
8007 		return;
8008 
8009 	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
8010 			     BNX2_VPD_LEN);
8011 	if (rc)
8012 		goto vpd_done;
8013 
8014 	for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8015 		data[i] = data[i + BNX2_VPD_LEN + 3];
8016 		data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8017 		data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8018 		data[i + 3] = data[i + BNX2_VPD_LEN];
8019 	}
8020 
8021 	i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8022 	if (i < 0)
8023 		goto vpd_done;
8024 
8025 	rosize = pci_vpd_lrdt_size(&data[i]);
8026 	i += PCI_VPD_LRDT_TAG_SIZE;
8027 	block_end = i + rosize;
8028 
8029 	if (block_end > BNX2_VPD_LEN)
8030 		goto vpd_done;
8031 
8032 	j = pci_vpd_find_info_keyword(data, i, rosize,
8033 				      PCI_VPD_RO_KEYWORD_MFR_ID);
8034 	if (j < 0)
8035 		goto vpd_done;
8036 
8037 	len = pci_vpd_info_field_size(&data[j]);
8038 
8039 	j += PCI_VPD_INFO_FLD_HDR_SIZE;
8040 	if (j + len > block_end || len != 4 ||
8041 	    memcmp(&data[j], "1028", 4))
8042 		goto vpd_done;
8043 
8044 	j = pci_vpd_find_info_keyword(data, i, rosize,
8045 				      PCI_VPD_RO_KEYWORD_VENDOR0);
8046 	if (j < 0)
8047 		goto vpd_done;
8048 
8049 	len = pci_vpd_info_field_size(&data[j]);
8050 
8051 	j += PCI_VPD_INFO_FLD_HDR_SIZE;
8052 	if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8053 		goto vpd_done;
8054 
8055 	memcpy(bp->fw_version, &data[j], len);
8056 	bp->fw_version[len] = ' ';
8057 
8058 vpd_done:
8059 	kfree(data);
8060 }
8061 
8062 static int
8063 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8064 {
8065 	struct bnx2 *bp;
8066 	int rc, i, j;
8067 	u32 reg;
8068 	u64 dma_mask, persist_dma_mask;
8069 	int err;
8070 
8071 	SET_NETDEV_DEV(dev, &pdev->dev);
8072 	bp = netdev_priv(dev);
8073 
8074 	bp->flags = 0;
8075 	bp->phy_flags = 0;
8076 
8077 	bp->temp_stats_blk =
8078 		kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8079 
8080 	if (bp->temp_stats_blk == NULL) {
8081 		rc = -ENOMEM;
8082 		goto err_out;
8083 	}
8084 
8085 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
8086 	rc = pci_enable_device(pdev);
8087 	if (rc) {
8088 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8089 		goto err_out;
8090 	}
8091 
8092 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8093 		dev_err(&pdev->dev,
8094 			"Cannot find PCI device base address, aborting\n");
8095 		rc = -ENODEV;
8096 		goto err_out_disable;
8097 	}
8098 
8099 	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8100 	if (rc) {
8101 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8102 		goto err_out_disable;
8103 	}
8104 
8105 	pci_set_master(pdev);
8106 
8107 	bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8108 	if (bp->pm_cap == 0) {
8109 		dev_err(&pdev->dev,
8110 			"Cannot find power management capability, aborting\n");
8111 		rc = -EIO;
8112 		goto err_out_release;
8113 	}
8114 
8115 	bp->dev = dev;
8116 	bp->pdev = pdev;
8117 
8118 	spin_lock_init(&bp->phy_lock);
8119 	spin_lock_init(&bp->indirect_lock);
8120 #ifdef BCM_CNIC
8121 	mutex_init(&bp->cnic_lock);
8122 #endif
8123 	INIT_WORK(&bp->reset_task, bnx2_reset_task);
8124 
8125 	bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8126 							 TX_MAX_TSS_RINGS + 1));
8127 	if (!bp->regview) {
8128 		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8129 		rc = -ENOMEM;
8130 		goto err_out_release;
8131 	}
8132 
8133 	bnx2_set_power_state(bp, PCI_D0);
8134 
8135 	/* Configure byte swap and enable write to the reg_window registers.
8136 	 * Rely on CPU to do target byte swapping on big endian systems
8137 	 * The chip's target access swapping will not swap all accesses
8138 	 */
8139 	BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8140 		BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8141 		BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8142 
8143 	bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8144 
8145 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8146 		if (!pci_is_pcie(pdev)) {
8147 			dev_err(&pdev->dev, "Not PCIE, aborting\n");
8148 			rc = -EIO;
8149 			goto err_out_unmap;
8150 		}
8151 		bp->flags |= BNX2_FLAG_PCIE;
8152 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8153 			bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8154 
8155 		/* AER (Advanced Error Reporting) hooks */
8156 		err = pci_enable_pcie_error_reporting(pdev);
8157 		if (!err)
8158 			bp->flags |= BNX2_FLAG_AER_ENABLED;
8159 
8160 	} else {
8161 		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8162 		if (bp->pcix_cap == 0) {
8163 			dev_err(&pdev->dev,
8164 				"Cannot find PCIX capability, aborting\n");
8165 			rc = -EIO;
8166 			goto err_out_unmap;
8167 		}
8168 		bp->flags |= BNX2_FLAG_BROKEN_STATS;
8169 	}
8170 
8171 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8172 	    BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8173 		if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
8174 			bp->flags |= BNX2_FLAG_MSIX_CAP;
8175 	}
8176 
8177 	if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8178 	    BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8179 		if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
8180 			bp->flags |= BNX2_FLAG_MSI_CAP;
8181 	}
8182 
8183 	/* 5708 cannot support DMA addresses > 40-bit.  */
8184 	if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8185 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8186 	else
8187 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8188 
8189 	/* Configure DMA attributes. */
8190 	if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8191 		dev->features |= NETIF_F_HIGHDMA;
8192 		rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8193 		if (rc) {
8194 			dev_err(&pdev->dev,
8195 				"pci_set_consistent_dma_mask failed, aborting\n");
8196 			goto err_out_unmap;
8197 		}
8198 	} else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8199 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8200 		goto err_out_unmap;
8201 	}
8202 
8203 	if (!(bp->flags & BNX2_FLAG_PCIE))
8204 		bnx2_get_pci_speed(bp);
8205 
8206 	/* 5706A0 may falsely detect SERR and PERR. */
8207 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8208 		reg = BNX2_RD(bp, PCI_COMMAND);
8209 		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8210 		BNX2_WR(bp, PCI_COMMAND, reg);
8211 	} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8212 		!(bp->flags & BNX2_FLAG_PCIX)) {
8213 
8214 		dev_err(&pdev->dev,
8215 			"5706 A1 can only be used in a PCIX bus, aborting\n");
8216 		goto err_out_unmap;
8217 	}
8218 
8219 	bnx2_init_nvram(bp);
8220 
8221 	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8222 
8223 	if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8224 		bp->func = 1;
8225 
8226 	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8227 	    BNX2_SHM_HDR_SIGNATURE_SIG) {
8228 		u32 off = bp->func << 2;
8229 
8230 		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8231 	} else
8232 		bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8233 
8234 	/* Get the permanent MAC address.  First we need to make sure the
8235 	 * firmware is actually running.
8236 	 */
8237 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8238 
8239 	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8240 	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8241 		dev_err(&pdev->dev, "Firmware not running, aborting\n");
8242 		rc = -ENODEV;
8243 		goto err_out_unmap;
8244 	}
8245 
8246 	bnx2_read_vpd_fw_ver(bp);
8247 
8248 	j = strlen(bp->fw_version);
8249 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8250 	for (i = 0; i < 3 && j < 24; i++) {
8251 		u8 num, k, skip0;
8252 
8253 		if (i == 0) {
8254 			bp->fw_version[j++] = 'b';
8255 			bp->fw_version[j++] = 'c';
8256 			bp->fw_version[j++] = ' ';
8257 		}
8258 		num = (u8) (reg >> (24 - (i * 8)));
8259 		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8260 			if (num >= k || !skip0 || k == 1) {
8261 				bp->fw_version[j++] = (num / k) + '0';
8262 				skip0 = 0;
8263 			}
8264 		}
8265 		if (i != 2)
8266 			bp->fw_version[j++] = '.';
8267 	}
8268 	reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8269 	if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8270 		bp->wol = 1;
8271 
8272 	if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8273 		bp->flags |= BNX2_FLAG_ASF_ENABLE;
8274 
8275 		for (i = 0; i < 30; i++) {
8276 			reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8277 			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8278 				break;
8279 			msleep(10);
8280 		}
8281 	}
8282 	reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8283 	reg &= BNX2_CONDITION_MFW_RUN_MASK;
8284 	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8285 	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
8286 		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8287 
8288 		if (j < 32)
8289 			bp->fw_version[j++] = ' ';
8290 		for (i = 0; i < 3 && j < 28; i++) {
8291 			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8292 			reg = be32_to_cpu(reg);
8293 			memcpy(&bp->fw_version[j], &reg, 4);
8294 			j += 4;
8295 		}
8296 	}
8297 
8298 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8299 	bp->mac_addr[0] = (u8) (reg >> 8);
8300 	bp->mac_addr[1] = (u8) reg;
8301 
8302 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8303 	bp->mac_addr[2] = (u8) (reg >> 24);
8304 	bp->mac_addr[3] = (u8) (reg >> 16);
8305 	bp->mac_addr[4] = (u8) (reg >> 8);
8306 	bp->mac_addr[5] = (u8) reg;
8307 
8308 	bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8309 	bnx2_set_rx_ring_size(bp, 255);
8310 
8311 	bp->tx_quick_cons_trip_int = 2;
8312 	bp->tx_quick_cons_trip = 20;
8313 	bp->tx_ticks_int = 18;
8314 	bp->tx_ticks = 80;
8315 
8316 	bp->rx_quick_cons_trip_int = 2;
8317 	bp->rx_quick_cons_trip = 12;
8318 	bp->rx_ticks_int = 18;
8319 	bp->rx_ticks = 18;
8320 
8321 	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8322 
8323 	bp->current_interval = BNX2_TIMER_INTERVAL;
8324 
8325 	bp->phy_addr = 1;
8326 
8327 	/* Disable WOL support if we are running on a SERDES chip. */
8328 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8329 		bnx2_get_5709_media(bp);
8330 	else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8331 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8332 
8333 	bp->phy_port = PORT_TP;
8334 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8335 		bp->phy_port = PORT_FIBRE;
8336 		reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8337 		if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8338 			bp->flags |= BNX2_FLAG_NO_WOL;
8339 			bp->wol = 0;
8340 		}
8341 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8342 			/* Don't do parallel detect on this board because of
8343 			 * some board problems.  The link will not go down
8344 			 * if we do parallel detect.
8345 			 */
8346 			if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8347 			    pdev->subsystem_device == 0x310c)
8348 				bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8349 		} else {
8350 			bp->phy_addr = 2;
8351 			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8352 				bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8353 		}
8354 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8355 		   BNX2_CHIP(bp) == BNX2_CHIP_5708)
8356 		bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8357 	else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8358 		 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8359 		  BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8360 		bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8361 
8362 	bnx2_init_fw_cap(bp);
8363 
8364 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8365 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8366 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8367 	    !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8368 		bp->flags |= BNX2_FLAG_NO_WOL;
8369 		bp->wol = 0;
8370 	}
8371 
8372 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8373 		bp->tx_quick_cons_trip_int =
8374 			bp->tx_quick_cons_trip;
8375 		bp->tx_ticks_int = bp->tx_ticks;
8376 		bp->rx_quick_cons_trip_int =
8377 			bp->rx_quick_cons_trip;
8378 		bp->rx_ticks_int = bp->rx_ticks;
8379 		bp->comp_prod_trip_int = bp->comp_prod_trip;
8380 		bp->com_ticks_int = bp->com_ticks;
8381 		bp->cmd_ticks_int = bp->cmd_ticks;
8382 	}
8383 
8384 	/* Disable MSI on 5706 if AMD 8132 bridge is found.
8385 	 *
8386 	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8387 	 * with byte enables disabled on the unused 32-bit word.  This is legal
8388 	 * but causes problems on the AMD 8132 which will eventually stop
8389 	 * responding after a while.
8390 	 *
8391 	 * AMD believes this incompatibility is unique to the 5706, and
8392 	 * prefers to locally disable MSI rather than globally disabling it.
8393 	 */
8394 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8395 		struct pci_dev *amd_8132 = NULL;
8396 
8397 		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8398 						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
8399 						  amd_8132))) {
8400 
8401 			if (amd_8132->revision >= 0x10 &&
8402 			    amd_8132->revision <= 0x13) {
8403 				disable_msi = 1;
8404 				pci_dev_put(amd_8132);
8405 				break;
8406 			}
8407 		}
8408 	}
8409 
8410 	bnx2_set_default_link(bp);
8411 	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8412 
8413 	init_timer(&bp->timer);
8414 	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8415 	bp->timer.data = (unsigned long) bp;
8416 	bp->timer.function = bnx2_timer;
8417 
8418 #ifdef BCM_CNIC
8419 	if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8420 		bp->cnic_eth_dev.max_iscsi_conn =
8421 			(bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8422 			 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8423 	bp->cnic_probe = bnx2_cnic_probe;
8424 #endif
8425 	pci_save_state(pdev);
8426 
8427 	return 0;
8428 
8429 err_out_unmap:
8430 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8431 		pci_disable_pcie_error_reporting(pdev);
8432 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8433 	}
8434 
8435 	pci_iounmap(pdev, bp->regview);
8436 	bp->regview = NULL;
8437 
8438 err_out_release:
8439 	pci_release_regions(pdev);
8440 
8441 err_out_disable:
8442 	pci_disable_device(pdev);
8443 	pci_set_drvdata(pdev, NULL);
8444 
8445 err_out:
8446 	return rc;
8447 }
8448 
8449 static char *
8450 bnx2_bus_string(struct bnx2 *bp, char *str)
8451 {
8452 	char *s = str;
8453 
8454 	if (bp->flags & BNX2_FLAG_PCIE) {
8455 		s += sprintf(s, "PCI Express");
8456 	} else {
8457 		s += sprintf(s, "PCI");
8458 		if (bp->flags & BNX2_FLAG_PCIX)
8459 			s += sprintf(s, "-X");
8460 		if (bp->flags & BNX2_FLAG_PCI_32BIT)
8461 			s += sprintf(s, " 32-bit");
8462 		else
8463 			s += sprintf(s, " 64-bit");
8464 		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8465 	}
8466 	return str;
8467 }
8468 
8469 static void
8470 bnx2_del_napi(struct bnx2 *bp)
8471 {
8472 	int i;
8473 
8474 	for (i = 0; i < bp->irq_nvecs; i++)
8475 		netif_napi_del(&bp->bnx2_napi[i].napi);
8476 }
8477 
8478 static void
8479 bnx2_init_napi(struct bnx2 *bp)
8480 {
8481 	int i;
8482 
8483 	for (i = 0; i < bp->irq_nvecs; i++) {
8484 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8485 		int (*poll)(struct napi_struct *, int);
8486 
8487 		if (i == 0)
8488 			poll = bnx2_poll;
8489 		else
8490 			poll = bnx2_poll_msix;
8491 
8492 		netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8493 		bnapi->bp = bp;
8494 	}
8495 }
8496 
8497 static const struct net_device_ops bnx2_netdev_ops = {
8498 	.ndo_open		= bnx2_open,
8499 	.ndo_start_xmit		= bnx2_start_xmit,
8500 	.ndo_stop		= bnx2_close,
8501 	.ndo_get_stats64	= bnx2_get_stats64,
8502 	.ndo_set_rx_mode	= bnx2_set_rx_mode,
8503 	.ndo_do_ioctl		= bnx2_ioctl,
8504 	.ndo_validate_addr	= eth_validate_addr,
8505 	.ndo_set_mac_address	= bnx2_change_mac_addr,
8506 	.ndo_change_mtu		= bnx2_change_mtu,
8507 	.ndo_fix_features	= bnx2_fix_features,
8508 	.ndo_set_features	= bnx2_set_features,
8509 	.ndo_tx_timeout		= bnx2_tx_timeout,
8510 #ifdef CONFIG_NET_POLL_CONTROLLER
8511 	.ndo_poll_controller	= poll_bnx2,
8512 #endif
8513 };
8514 
8515 static int
8516 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8517 {
8518 	static int version_printed = 0;
8519 	struct net_device *dev;
8520 	struct bnx2 *bp;
8521 	int rc;
8522 	char str[40];
8523 
8524 	if (version_printed++ == 0)
8525 		pr_info("%s", version);
8526 
8527 	/* dev zeroed in init_etherdev */
8528 	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8529 	if (!dev)
8530 		return -ENOMEM;
8531 
8532 	rc = bnx2_init_board(pdev, dev);
8533 	if (rc < 0)
8534 		goto err_free;
8535 
8536 	dev->netdev_ops = &bnx2_netdev_ops;
8537 	dev->watchdog_timeo = TX_TIMEOUT;
8538 	dev->ethtool_ops = &bnx2_ethtool_ops;
8539 
8540 	bp = netdev_priv(dev);
8541 
8542 	pci_set_drvdata(pdev, dev);
8543 
8544 	memcpy(dev->dev_addr, bp->mac_addr, 6);
8545 
8546 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8547 		NETIF_F_TSO | NETIF_F_TSO_ECN |
8548 		NETIF_F_RXHASH | NETIF_F_RXCSUM;
8549 
8550 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8551 		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8552 
8553 	dev->vlan_features = dev->hw_features;
8554 	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8555 	dev->features |= dev->hw_features;
8556 	dev->priv_flags |= IFF_UNICAST_FLT;
8557 
8558 	if ((rc = register_netdev(dev))) {
8559 		dev_err(&pdev->dev, "Cannot register net device\n");
8560 		goto error;
8561 	}
8562 
8563 	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8564 		    "node addr %pM\n", board_info[ent->driver_data].name,
8565 		    ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8566 		    ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8567 		    bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8568 		    pdev->irq, dev->dev_addr);
8569 
8570 	return 0;
8571 
8572 error:
8573 	pci_iounmap(pdev, bp->regview);
8574 	pci_release_regions(pdev);
8575 	pci_disable_device(pdev);
8576 	pci_set_drvdata(pdev, NULL);
8577 err_free:
8578 	free_netdev(dev);
8579 	return rc;
8580 }
8581 
8582 static void
8583 bnx2_remove_one(struct pci_dev *pdev)
8584 {
8585 	struct net_device *dev = pci_get_drvdata(pdev);
8586 	struct bnx2 *bp = netdev_priv(dev);
8587 
8588 	unregister_netdev(dev);
8589 
8590 	del_timer_sync(&bp->timer);
8591 	cancel_work_sync(&bp->reset_task);
8592 
8593 	pci_iounmap(bp->pdev, bp->regview);
8594 
8595 	kfree(bp->temp_stats_blk);
8596 
8597 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8598 		pci_disable_pcie_error_reporting(pdev);
8599 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8600 	}
8601 
8602 	bnx2_release_firmware(bp);
8603 
8604 	free_netdev(dev);
8605 
8606 	pci_release_regions(pdev);
8607 	pci_disable_device(pdev);
8608 	pci_set_drvdata(pdev, NULL);
8609 }
8610 
8611 static int
8612 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8613 {
8614 	struct net_device *dev = pci_get_drvdata(pdev);
8615 	struct bnx2 *bp = netdev_priv(dev);
8616 
8617 	/* PCI register 4 needs to be saved whether netif_running() or not.
8618 	 * MSI address and data need to be saved if using MSI and
8619 	 * netif_running().
8620 	 */
8621 	pci_save_state(pdev);
8622 	if (!netif_running(dev))
8623 		return 0;
8624 
8625 	cancel_work_sync(&bp->reset_task);
8626 	bnx2_netif_stop(bp, true);
8627 	netif_device_detach(dev);
8628 	del_timer_sync(&bp->timer);
8629 	bnx2_shutdown_chip(bp);
8630 	bnx2_free_skbs(bp);
8631 	bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8632 	return 0;
8633 }
8634 
8635 static int
8636 bnx2_resume(struct pci_dev *pdev)
8637 {
8638 	struct net_device *dev = pci_get_drvdata(pdev);
8639 	struct bnx2 *bp = netdev_priv(dev);
8640 
8641 	pci_restore_state(pdev);
8642 	if (!netif_running(dev))
8643 		return 0;
8644 
8645 	bnx2_set_power_state(bp, PCI_D0);
8646 	netif_device_attach(dev);
8647 	bnx2_init_nic(bp, 1);
8648 	bnx2_netif_start(bp, true);
8649 	return 0;
8650 }
8651 
8652 /**
8653  * bnx2_io_error_detected - called when PCI error is detected
8654  * @pdev: Pointer to PCI device
8655  * @state: The current pci connection state
8656  *
8657  * This function is called after a PCI bus error affecting
8658  * this device has been detected.
8659  */
8660 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8661 					       pci_channel_state_t state)
8662 {
8663 	struct net_device *dev = pci_get_drvdata(pdev);
8664 	struct bnx2 *bp = netdev_priv(dev);
8665 
8666 	rtnl_lock();
8667 	netif_device_detach(dev);
8668 
8669 	if (state == pci_channel_io_perm_failure) {
8670 		rtnl_unlock();
8671 		return PCI_ERS_RESULT_DISCONNECT;
8672 	}
8673 
8674 	if (netif_running(dev)) {
8675 		bnx2_netif_stop(bp, true);
8676 		del_timer_sync(&bp->timer);
8677 		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8678 	}
8679 
8680 	pci_disable_device(pdev);
8681 	rtnl_unlock();
8682 
8683 	/* Request a slot slot reset. */
8684 	return PCI_ERS_RESULT_NEED_RESET;
8685 }
8686 
8687 /**
8688  * bnx2_io_slot_reset - called after the pci bus has been reset.
8689  * @pdev: Pointer to PCI device
8690  *
8691  * Restart the card from scratch, as if from a cold-boot.
8692  */
8693 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8694 {
8695 	struct net_device *dev = pci_get_drvdata(pdev);
8696 	struct bnx2 *bp = netdev_priv(dev);
8697 	pci_ers_result_t result;
8698 	int err;
8699 
8700 	rtnl_lock();
8701 	if (pci_enable_device(pdev)) {
8702 		dev_err(&pdev->dev,
8703 			"Cannot re-enable PCI device after reset\n");
8704 		result = PCI_ERS_RESULT_DISCONNECT;
8705 	} else {
8706 		pci_set_master(pdev);
8707 		pci_restore_state(pdev);
8708 		pci_save_state(pdev);
8709 
8710 		if (netif_running(dev)) {
8711 			bnx2_set_power_state(bp, PCI_D0);
8712 			bnx2_init_nic(bp, 1);
8713 		}
8714 		result = PCI_ERS_RESULT_RECOVERED;
8715 	}
8716 	rtnl_unlock();
8717 
8718 	if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8719 		return result;
8720 
8721 	err = pci_cleanup_aer_uncorrect_error_status(pdev);
8722 	if (err) {
8723 		dev_err(&pdev->dev,
8724 			"pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8725 			 err); /* non-fatal, continue */
8726 	}
8727 
8728 	return result;
8729 }
8730 
8731 /**
8732  * bnx2_io_resume - called when traffic can start flowing again.
8733  * @pdev: Pointer to PCI device
8734  *
8735  * This callback is called when the error recovery driver tells us that
8736  * its OK to resume normal operation.
8737  */
8738 static void bnx2_io_resume(struct pci_dev *pdev)
8739 {
8740 	struct net_device *dev = pci_get_drvdata(pdev);
8741 	struct bnx2 *bp = netdev_priv(dev);
8742 
8743 	rtnl_lock();
8744 	if (netif_running(dev))
8745 		bnx2_netif_start(bp, true);
8746 
8747 	netif_device_attach(dev);
8748 	rtnl_unlock();
8749 }
8750 
8751 static const struct pci_error_handlers bnx2_err_handler = {
8752 	.error_detected	= bnx2_io_error_detected,
8753 	.slot_reset	= bnx2_io_slot_reset,
8754 	.resume		= bnx2_io_resume,
8755 };
8756 
8757 static struct pci_driver bnx2_pci_driver = {
8758 	.name		= DRV_MODULE_NAME,
8759 	.id_table	= bnx2_pci_tbl,
8760 	.probe		= bnx2_init_one,
8761 	.remove		= bnx2_remove_one,
8762 	.suspend	= bnx2_suspend,
8763 	.resume		= bnx2_resume,
8764 	.err_handler	= &bnx2_err_handler,
8765 };
8766 
8767 static int __init bnx2_init(void)
8768 {
8769 	return pci_register_driver(&bnx2_pci_driver);
8770 }
8771 
8772 static void __exit bnx2_cleanup(void)
8773 {
8774 	pci_unregister_driver(&bnx2_pci_driver);
8775 }
8776 
8777 module_init(bnx2_init);
8778 module_exit(bnx2_cleanup);
8779 
8780 
8781 
8782