1 /* bnx2.c: QLogic NX2 network driver.
2  *
3  * Copyright (c) 2004-2014 Broadcom Corporation
4  * Copyright (c) 2014 QLogic Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  *
10  * Written by: Michael Chan  (mchan@broadcom.com)
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 
18 #include <linux/stringify.h>
19 #include <linux/kernel.h>
20 #include <linux/timer.h>
21 #include <linux/errno.h>
22 #include <linux/ioport.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/interrupt.h>
26 #include <linux/pci.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52 
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59 
60 #define DRV_MODULE_NAME		"bnx2"
61 #define DRV_MODULE_VERSION	"2.2.5"
62 #define DRV_MODULE_RELDATE	"December 20, 2013"
63 #define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.3.fw"
64 #define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
65 #define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1b.fw"
66 #define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-6.0.17.fw"
67 #define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-6.0.17.fw"
68 
69 #define RUN_AT(x) (jiffies + (x))
70 
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT  (5*HZ)
73 
74 static char version[] =
75 	"QLogic NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("QLogic NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86 
87 static int disable_msi = 0;
88 
89 module_param(disable_msi, int, S_IRUGO);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91 
92 typedef enum {
93 	BCM5706 = 0,
94 	NC370T,
95 	NC370I,
96 	BCM5706S,
97 	NC370F,
98 	BCM5708,
99 	BCM5708S,
100 	BCM5709,
101 	BCM5709S,
102 	BCM5716,
103 	BCM5716S,
104 } board_t;
105 
106 /* indexed by board_t, above */
107 static struct {
108 	char *name;
109 } board_info[] = {
110 	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
111 	{ "HP NC370T Multifunction Gigabit Server Adapter" },
112 	{ "HP NC370i Multifunction Gigabit Server Adapter" },
113 	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114 	{ "HP NC370F Multifunction Gigabit Server Adapter" },
115 	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
116 	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117 	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
118 	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119 	{ "Broadcom NetXtreme II BCM5716 1000Base-T" },
120 	{ "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121 	};
122 
123 static const struct pci_device_id bnx2_pci_tbl[] = {
124 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125 	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127 	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133 	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142 	{ PCI_VENDOR_ID_BROADCOM, 0x163b,
143 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144 	{ PCI_VENDOR_ID_BROADCOM, 0x163c,
145 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146 	{ 0, }
147 };
148 
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
153 	/* Slow EEPROM */
154 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157 	 "EEPROM - slow"},
158 	/* Expansion entry 0001 */
159 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162 	 "Entry 0001"},
163 	/* Saifun SA25F010 (non-buffered flash) */
164 	/* strap, cfg1, & write1 need updates */
165 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168 	 "Non-buffered flash (128kB)"},
169 	/* Saifun SA25F020 (non-buffered flash) */
170 	/* strap, cfg1, & write1 need updates */
171 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174 	 "Non-buffered flash (256kB)"},
175 	/* Expansion entry 0100 */
176 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179 	 "Entry 0100"},
180 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
186 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190 	/* Saifun SA25F005 (non-buffered flash) */
191 	/* strap, cfg1, & write1 need updates */
192 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195 	 "Non-buffered flash (64kB)"},
196 	/* Fast EEPROM */
197 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200 	 "EEPROM - fast"},
201 	/* Expansion entry 1001 */
202 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 	 "Entry 1001"},
206 	/* Expansion entry 1010 */
207 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210 	 "Entry 1010"},
211 	/* ATMEL AT45DB011B (buffered flash) */
212 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215 	 "Buffered flash (128kB)"},
216 	/* Expansion entry 1100 */
217 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220 	 "Entry 1100"},
221 	/* Expansion entry 1101 */
222 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225 	 "Entry 1101"},
226 	/* Ateml Expansion entry 1110 */
227 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230 	 "Entry 1110 (Atmel)"},
231 	/* ATMEL AT45DB021B (buffered flash) */
232 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235 	 "Buffered flash (256kB)"},
236 };
237 
238 static const struct flash_spec flash_5709 = {
239 	.flags		= BNX2_NV_BUFFERED,
240 	.page_bits	= BCM5709_FLASH_PAGE_BITS,
241 	.page_size	= BCM5709_FLASH_PAGE_SIZE,
242 	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
243 	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
244 	.name		= "5709 Buffered flash (256kB)",
245 };
246 
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248 
249 static void bnx2_init_napi(struct bnx2 *bp);
250 static void bnx2_del_napi(struct bnx2 *bp);
251 
252 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
253 {
254 	u32 diff;
255 
256 	/* Tell compiler to fetch tx_prod and tx_cons from memory. */
257 	barrier();
258 
259 	/* The ring uses 256 indices for 255 entries, one of them
260 	 * needs to be skipped.
261 	 */
262 	diff = txr->tx_prod - txr->tx_cons;
263 	if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
264 		diff &= 0xffff;
265 		if (diff == BNX2_TX_DESC_CNT)
266 			diff = BNX2_MAX_TX_DESC_CNT;
267 	}
268 	return bp->tx_ring_size - diff;
269 }
270 
271 static u32
272 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
273 {
274 	u32 val;
275 
276 	spin_lock_bh(&bp->indirect_lock);
277 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
278 	val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
279 	spin_unlock_bh(&bp->indirect_lock);
280 	return val;
281 }
282 
283 static void
284 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
285 {
286 	spin_lock_bh(&bp->indirect_lock);
287 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
288 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
289 	spin_unlock_bh(&bp->indirect_lock);
290 }
291 
292 static void
293 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
294 {
295 	bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
296 }
297 
298 static u32
299 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
300 {
301 	return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
302 }
303 
304 static void
305 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
306 {
307 	offset += cid_addr;
308 	spin_lock_bh(&bp->indirect_lock);
309 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
310 		int i;
311 
312 		BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
313 		BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
314 			offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
315 		for (i = 0; i < 5; i++) {
316 			val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
317 			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
318 				break;
319 			udelay(5);
320 		}
321 	} else {
322 		BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
323 		BNX2_WR(bp, BNX2_CTX_DATA, val);
324 	}
325 	spin_unlock_bh(&bp->indirect_lock);
326 }
327 
328 #ifdef BCM_CNIC
329 static int
330 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
331 {
332 	struct bnx2 *bp = netdev_priv(dev);
333 	struct drv_ctl_io *io = &info->data.io;
334 
335 	switch (info->cmd) {
336 	case DRV_CTL_IO_WR_CMD:
337 		bnx2_reg_wr_ind(bp, io->offset, io->data);
338 		break;
339 	case DRV_CTL_IO_RD_CMD:
340 		io->data = bnx2_reg_rd_ind(bp, io->offset);
341 		break;
342 	case DRV_CTL_CTX_WR_CMD:
343 		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
344 		break;
345 	default:
346 		return -EINVAL;
347 	}
348 	return 0;
349 }
350 
351 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
352 {
353 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
354 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
355 	int sb_id;
356 
357 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
358 		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
359 		bnapi->cnic_present = 0;
360 		sb_id = bp->irq_nvecs;
361 		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
362 	} else {
363 		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
364 		bnapi->cnic_tag = bnapi->last_status_idx;
365 		bnapi->cnic_present = 1;
366 		sb_id = 0;
367 		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
368 	}
369 
370 	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
371 	cp->irq_arr[0].status_blk = (void *)
372 		((unsigned long) bnapi->status_blk.msi +
373 		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
374 	cp->irq_arr[0].status_blk_num = sb_id;
375 	cp->num_irq = 1;
376 }
377 
378 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
379 			      void *data)
380 {
381 	struct bnx2 *bp = netdev_priv(dev);
382 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
383 
384 	if (ops == NULL)
385 		return -EINVAL;
386 
387 	if (cp->drv_state & CNIC_DRV_STATE_REGD)
388 		return -EBUSY;
389 
390 	if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
391 		return -ENODEV;
392 
393 	bp->cnic_data = data;
394 	rcu_assign_pointer(bp->cnic_ops, ops);
395 
396 	cp->num_irq = 0;
397 	cp->drv_state = CNIC_DRV_STATE_REGD;
398 
399 	bnx2_setup_cnic_irq_info(bp);
400 
401 	return 0;
402 }
403 
404 static int bnx2_unregister_cnic(struct net_device *dev)
405 {
406 	struct bnx2 *bp = netdev_priv(dev);
407 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
408 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
409 
410 	mutex_lock(&bp->cnic_lock);
411 	cp->drv_state = 0;
412 	bnapi->cnic_present = 0;
413 	RCU_INIT_POINTER(bp->cnic_ops, NULL);
414 	mutex_unlock(&bp->cnic_lock);
415 	synchronize_rcu();
416 	return 0;
417 }
418 
419 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
420 {
421 	struct bnx2 *bp = netdev_priv(dev);
422 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
423 
424 	if (!cp->max_iscsi_conn)
425 		return NULL;
426 
427 	cp->drv_owner = THIS_MODULE;
428 	cp->chip_id = bp->chip_id;
429 	cp->pdev = bp->pdev;
430 	cp->io_base = bp->regview;
431 	cp->drv_ctl = bnx2_drv_ctl;
432 	cp->drv_register_cnic = bnx2_register_cnic;
433 	cp->drv_unregister_cnic = bnx2_unregister_cnic;
434 
435 	return cp;
436 }
437 
438 static void
439 bnx2_cnic_stop(struct bnx2 *bp)
440 {
441 	struct cnic_ops *c_ops;
442 	struct cnic_ctl_info info;
443 
444 	mutex_lock(&bp->cnic_lock);
445 	c_ops = rcu_dereference_protected(bp->cnic_ops,
446 					  lockdep_is_held(&bp->cnic_lock));
447 	if (c_ops) {
448 		info.cmd = CNIC_CTL_STOP_CMD;
449 		c_ops->cnic_ctl(bp->cnic_data, &info);
450 	}
451 	mutex_unlock(&bp->cnic_lock);
452 }
453 
454 static void
455 bnx2_cnic_start(struct bnx2 *bp)
456 {
457 	struct cnic_ops *c_ops;
458 	struct cnic_ctl_info info;
459 
460 	mutex_lock(&bp->cnic_lock);
461 	c_ops = rcu_dereference_protected(bp->cnic_ops,
462 					  lockdep_is_held(&bp->cnic_lock));
463 	if (c_ops) {
464 		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
465 			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
466 
467 			bnapi->cnic_tag = bnapi->last_status_idx;
468 		}
469 		info.cmd = CNIC_CTL_START_CMD;
470 		c_ops->cnic_ctl(bp->cnic_data, &info);
471 	}
472 	mutex_unlock(&bp->cnic_lock);
473 }
474 
475 #else
476 
477 static void
478 bnx2_cnic_stop(struct bnx2 *bp)
479 {
480 }
481 
482 static void
483 bnx2_cnic_start(struct bnx2 *bp)
484 {
485 }
486 
487 #endif
488 
489 static int
490 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
491 {
492 	u32 val1;
493 	int i, ret;
494 
495 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
496 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
497 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
498 
499 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
500 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
501 
502 		udelay(40);
503 	}
504 
505 	val1 = (bp->phy_addr << 21) | (reg << 16) |
506 		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
507 		BNX2_EMAC_MDIO_COMM_START_BUSY;
508 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
509 
510 	for (i = 0; i < 50; i++) {
511 		udelay(10);
512 
513 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
514 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
515 			udelay(5);
516 
517 			val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
518 			val1 &= BNX2_EMAC_MDIO_COMM_DATA;
519 
520 			break;
521 		}
522 	}
523 
524 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
525 		*val = 0x0;
526 		ret = -EBUSY;
527 	}
528 	else {
529 		*val = val1;
530 		ret = 0;
531 	}
532 
533 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
534 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
535 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
536 
537 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
538 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
539 
540 		udelay(40);
541 	}
542 
543 	return ret;
544 }
545 
546 static int
547 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
548 {
549 	u32 val1;
550 	int i, ret;
551 
552 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
553 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
554 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
555 
556 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
557 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
558 
559 		udelay(40);
560 	}
561 
562 	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
563 		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
564 		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
565 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
566 
567 	for (i = 0; i < 50; i++) {
568 		udelay(10);
569 
570 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
571 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
572 			udelay(5);
573 			break;
574 		}
575 	}
576 
577 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
578         	ret = -EBUSY;
579 	else
580 		ret = 0;
581 
582 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
583 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
584 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
585 
586 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
587 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
588 
589 		udelay(40);
590 	}
591 
592 	return ret;
593 }
594 
595 static void
596 bnx2_disable_int(struct bnx2 *bp)
597 {
598 	int i;
599 	struct bnx2_napi *bnapi;
600 
601 	for (i = 0; i < bp->irq_nvecs; i++) {
602 		bnapi = &bp->bnx2_napi[i];
603 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
604 		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
605 	}
606 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
607 }
608 
609 static void
610 bnx2_enable_int(struct bnx2 *bp)
611 {
612 	int i;
613 	struct bnx2_napi *bnapi;
614 
615 	for (i = 0; i < bp->irq_nvecs; i++) {
616 		bnapi = &bp->bnx2_napi[i];
617 
618 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
619 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
620 			BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
621 			bnapi->last_status_idx);
622 
623 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
624 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
625 			bnapi->last_status_idx);
626 	}
627 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
628 }
629 
630 static void
631 bnx2_disable_int_sync(struct bnx2 *bp)
632 {
633 	int i;
634 
635 	atomic_inc(&bp->intr_sem);
636 	if (!netif_running(bp->dev))
637 		return;
638 
639 	bnx2_disable_int(bp);
640 	for (i = 0; i < bp->irq_nvecs; i++)
641 		synchronize_irq(bp->irq_tbl[i].vector);
642 }
643 
644 static void
645 bnx2_napi_disable(struct bnx2 *bp)
646 {
647 	int i;
648 
649 	for (i = 0; i < bp->irq_nvecs; i++)
650 		napi_disable(&bp->bnx2_napi[i].napi);
651 }
652 
653 static void
654 bnx2_napi_enable(struct bnx2 *bp)
655 {
656 	int i;
657 
658 	for (i = 0; i < bp->irq_nvecs; i++)
659 		napi_enable(&bp->bnx2_napi[i].napi);
660 }
661 
662 static void
663 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
664 {
665 	if (stop_cnic)
666 		bnx2_cnic_stop(bp);
667 	if (netif_running(bp->dev)) {
668 		bnx2_napi_disable(bp);
669 		netif_tx_disable(bp->dev);
670 	}
671 	bnx2_disable_int_sync(bp);
672 	netif_carrier_off(bp->dev);	/* prevent tx timeout */
673 }
674 
675 static void
676 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
677 {
678 	if (atomic_dec_and_test(&bp->intr_sem)) {
679 		if (netif_running(bp->dev)) {
680 			netif_tx_wake_all_queues(bp->dev);
681 			spin_lock_bh(&bp->phy_lock);
682 			if (bp->link_up)
683 				netif_carrier_on(bp->dev);
684 			spin_unlock_bh(&bp->phy_lock);
685 			bnx2_napi_enable(bp);
686 			bnx2_enable_int(bp);
687 			if (start_cnic)
688 				bnx2_cnic_start(bp);
689 		}
690 	}
691 }
692 
693 static void
694 bnx2_free_tx_mem(struct bnx2 *bp)
695 {
696 	int i;
697 
698 	for (i = 0; i < bp->num_tx_rings; i++) {
699 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
700 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
701 
702 		if (txr->tx_desc_ring) {
703 			dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
704 					  txr->tx_desc_ring,
705 					  txr->tx_desc_mapping);
706 			txr->tx_desc_ring = NULL;
707 		}
708 		kfree(txr->tx_buf_ring);
709 		txr->tx_buf_ring = NULL;
710 	}
711 }
712 
713 static void
714 bnx2_free_rx_mem(struct bnx2 *bp)
715 {
716 	int i;
717 
718 	for (i = 0; i < bp->num_rx_rings; i++) {
719 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
720 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
721 		int j;
722 
723 		for (j = 0; j < bp->rx_max_ring; j++) {
724 			if (rxr->rx_desc_ring[j])
725 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
726 						  rxr->rx_desc_ring[j],
727 						  rxr->rx_desc_mapping[j]);
728 			rxr->rx_desc_ring[j] = NULL;
729 		}
730 		vfree(rxr->rx_buf_ring);
731 		rxr->rx_buf_ring = NULL;
732 
733 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
734 			if (rxr->rx_pg_desc_ring[j])
735 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
736 						  rxr->rx_pg_desc_ring[j],
737 						  rxr->rx_pg_desc_mapping[j]);
738 			rxr->rx_pg_desc_ring[j] = NULL;
739 		}
740 		vfree(rxr->rx_pg_ring);
741 		rxr->rx_pg_ring = NULL;
742 	}
743 }
744 
745 static int
746 bnx2_alloc_tx_mem(struct bnx2 *bp)
747 {
748 	int i;
749 
750 	for (i = 0; i < bp->num_tx_rings; i++) {
751 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
752 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
753 
754 		txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
755 		if (txr->tx_buf_ring == NULL)
756 			return -ENOMEM;
757 
758 		txr->tx_desc_ring =
759 			dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
760 					   &txr->tx_desc_mapping, GFP_KERNEL);
761 		if (txr->tx_desc_ring == NULL)
762 			return -ENOMEM;
763 	}
764 	return 0;
765 }
766 
767 static int
768 bnx2_alloc_rx_mem(struct bnx2 *bp)
769 {
770 	int i;
771 
772 	for (i = 0; i < bp->num_rx_rings; i++) {
773 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
774 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
775 		int j;
776 
777 		rxr->rx_buf_ring =
778 			vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
779 		if (rxr->rx_buf_ring == NULL)
780 			return -ENOMEM;
781 
782 		for (j = 0; j < bp->rx_max_ring; j++) {
783 			rxr->rx_desc_ring[j] =
784 				dma_alloc_coherent(&bp->pdev->dev,
785 						   RXBD_RING_SIZE,
786 						   &rxr->rx_desc_mapping[j],
787 						   GFP_KERNEL);
788 			if (rxr->rx_desc_ring[j] == NULL)
789 				return -ENOMEM;
790 
791 		}
792 
793 		if (bp->rx_pg_ring_size) {
794 			rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
795 						  bp->rx_max_pg_ring);
796 			if (rxr->rx_pg_ring == NULL)
797 				return -ENOMEM;
798 
799 		}
800 
801 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
802 			rxr->rx_pg_desc_ring[j] =
803 				dma_alloc_coherent(&bp->pdev->dev,
804 						   RXBD_RING_SIZE,
805 						   &rxr->rx_pg_desc_mapping[j],
806 						   GFP_KERNEL);
807 			if (rxr->rx_pg_desc_ring[j] == NULL)
808 				return -ENOMEM;
809 
810 		}
811 	}
812 	return 0;
813 }
814 
815 static void
816 bnx2_free_mem(struct bnx2 *bp)
817 {
818 	int i;
819 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
820 
821 	bnx2_free_tx_mem(bp);
822 	bnx2_free_rx_mem(bp);
823 
824 	for (i = 0; i < bp->ctx_pages; i++) {
825 		if (bp->ctx_blk[i]) {
826 			dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
827 					  bp->ctx_blk[i],
828 					  bp->ctx_blk_mapping[i]);
829 			bp->ctx_blk[i] = NULL;
830 		}
831 	}
832 	if (bnapi->status_blk.msi) {
833 		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
834 				  bnapi->status_blk.msi,
835 				  bp->status_blk_mapping);
836 		bnapi->status_blk.msi = NULL;
837 		bp->stats_blk = NULL;
838 	}
839 }
840 
841 static int
842 bnx2_alloc_mem(struct bnx2 *bp)
843 {
844 	int i, status_blk_size, err;
845 	struct bnx2_napi *bnapi;
846 	void *status_blk;
847 
848 	/* Combine status and statistics blocks into one allocation. */
849 	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
850 	if (bp->flags & BNX2_FLAG_MSIX_CAP)
851 		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
852 						 BNX2_SBLK_MSIX_ALIGN_SIZE);
853 	bp->status_stats_size = status_blk_size +
854 				sizeof(struct statistics_block);
855 
856 	status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
857 					 &bp->status_blk_mapping, GFP_KERNEL);
858 	if (status_blk == NULL)
859 		goto alloc_mem_err;
860 
861 	bnapi = &bp->bnx2_napi[0];
862 	bnapi->status_blk.msi = status_blk;
863 	bnapi->hw_tx_cons_ptr =
864 		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
865 	bnapi->hw_rx_cons_ptr =
866 		&bnapi->status_blk.msi->status_rx_quick_consumer_index0;
867 	if (bp->flags & BNX2_FLAG_MSIX_CAP) {
868 		for (i = 1; i < bp->irq_nvecs; i++) {
869 			struct status_block_msix *sblk;
870 
871 			bnapi = &bp->bnx2_napi[i];
872 
873 			sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
874 			bnapi->status_blk.msix = sblk;
875 			bnapi->hw_tx_cons_ptr =
876 				&sblk->status_tx_quick_consumer_index;
877 			bnapi->hw_rx_cons_ptr =
878 				&sblk->status_rx_quick_consumer_index;
879 			bnapi->int_num = i << 24;
880 		}
881 	}
882 
883 	bp->stats_blk = status_blk + status_blk_size;
884 
885 	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
886 
887 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
888 		bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
889 		if (bp->ctx_pages == 0)
890 			bp->ctx_pages = 1;
891 		for (i = 0; i < bp->ctx_pages; i++) {
892 			bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
893 						BNX2_PAGE_SIZE,
894 						&bp->ctx_blk_mapping[i],
895 						GFP_KERNEL);
896 			if (bp->ctx_blk[i] == NULL)
897 				goto alloc_mem_err;
898 		}
899 	}
900 
901 	err = bnx2_alloc_rx_mem(bp);
902 	if (err)
903 		goto alloc_mem_err;
904 
905 	err = bnx2_alloc_tx_mem(bp);
906 	if (err)
907 		goto alloc_mem_err;
908 
909 	return 0;
910 
911 alloc_mem_err:
912 	bnx2_free_mem(bp);
913 	return -ENOMEM;
914 }
915 
916 static void
917 bnx2_report_fw_link(struct bnx2 *bp)
918 {
919 	u32 fw_link_status = 0;
920 
921 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
922 		return;
923 
924 	if (bp->link_up) {
925 		u32 bmsr;
926 
927 		switch (bp->line_speed) {
928 		case SPEED_10:
929 			if (bp->duplex == DUPLEX_HALF)
930 				fw_link_status = BNX2_LINK_STATUS_10HALF;
931 			else
932 				fw_link_status = BNX2_LINK_STATUS_10FULL;
933 			break;
934 		case SPEED_100:
935 			if (bp->duplex == DUPLEX_HALF)
936 				fw_link_status = BNX2_LINK_STATUS_100HALF;
937 			else
938 				fw_link_status = BNX2_LINK_STATUS_100FULL;
939 			break;
940 		case SPEED_1000:
941 			if (bp->duplex == DUPLEX_HALF)
942 				fw_link_status = BNX2_LINK_STATUS_1000HALF;
943 			else
944 				fw_link_status = BNX2_LINK_STATUS_1000FULL;
945 			break;
946 		case SPEED_2500:
947 			if (bp->duplex == DUPLEX_HALF)
948 				fw_link_status = BNX2_LINK_STATUS_2500HALF;
949 			else
950 				fw_link_status = BNX2_LINK_STATUS_2500FULL;
951 			break;
952 		}
953 
954 		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
955 
956 		if (bp->autoneg) {
957 			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
958 
959 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
960 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
961 
962 			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
963 			    bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
964 				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
965 			else
966 				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
967 		}
968 	}
969 	else
970 		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
971 
972 	bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
973 }
974 
975 static char *
976 bnx2_xceiver_str(struct bnx2 *bp)
977 {
978 	return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
979 		((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
980 		 "Copper");
981 }
982 
983 static void
984 bnx2_report_link(struct bnx2 *bp)
985 {
986 	if (bp->link_up) {
987 		netif_carrier_on(bp->dev);
988 		netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
989 			    bnx2_xceiver_str(bp),
990 			    bp->line_speed,
991 			    bp->duplex == DUPLEX_FULL ? "full" : "half");
992 
993 		if (bp->flow_ctrl) {
994 			if (bp->flow_ctrl & FLOW_CTRL_RX) {
995 				pr_cont(", receive ");
996 				if (bp->flow_ctrl & FLOW_CTRL_TX)
997 					pr_cont("& transmit ");
998 			}
999 			else {
1000 				pr_cont(", transmit ");
1001 			}
1002 			pr_cont("flow control ON");
1003 		}
1004 		pr_cont("\n");
1005 	} else {
1006 		netif_carrier_off(bp->dev);
1007 		netdev_err(bp->dev, "NIC %s Link is Down\n",
1008 			   bnx2_xceiver_str(bp));
1009 	}
1010 
1011 	bnx2_report_fw_link(bp);
1012 }
1013 
1014 static void
1015 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1016 {
1017 	u32 local_adv, remote_adv;
1018 
1019 	bp->flow_ctrl = 0;
1020 	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1021 		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1022 
1023 		if (bp->duplex == DUPLEX_FULL) {
1024 			bp->flow_ctrl = bp->req_flow_ctrl;
1025 		}
1026 		return;
1027 	}
1028 
1029 	if (bp->duplex != DUPLEX_FULL) {
1030 		return;
1031 	}
1032 
1033 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1034 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1035 		u32 val;
1036 
1037 		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1038 		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1039 			bp->flow_ctrl |= FLOW_CTRL_TX;
1040 		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1041 			bp->flow_ctrl |= FLOW_CTRL_RX;
1042 		return;
1043 	}
1044 
1045 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1046 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1047 
1048 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1049 		u32 new_local_adv = 0;
1050 		u32 new_remote_adv = 0;
1051 
1052 		if (local_adv & ADVERTISE_1000XPAUSE)
1053 			new_local_adv |= ADVERTISE_PAUSE_CAP;
1054 		if (local_adv & ADVERTISE_1000XPSE_ASYM)
1055 			new_local_adv |= ADVERTISE_PAUSE_ASYM;
1056 		if (remote_adv & ADVERTISE_1000XPAUSE)
1057 			new_remote_adv |= ADVERTISE_PAUSE_CAP;
1058 		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1059 			new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1060 
1061 		local_adv = new_local_adv;
1062 		remote_adv = new_remote_adv;
1063 	}
1064 
1065 	/* See Table 28B-3 of 802.3ab-1999 spec. */
1066 	if (local_adv & ADVERTISE_PAUSE_CAP) {
1067 		if(local_adv & ADVERTISE_PAUSE_ASYM) {
1068 	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
1069 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1070 			}
1071 			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1072 				bp->flow_ctrl = FLOW_CTRL_RX;
1073 			}
1074 		}
1075 		else {
1076 			if (remote_adv & ADVERTISE_PAUSE_CAP) {
1077 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1078 			}
1079 		}
1080 	}
1081 	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1082 		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1083 			(remote_adv & ADVERTISE_PAUSE_ASYM)) {
1084 
1085 			bp->flow_ctrl = FLOW_CTRL_TX;
1086 		}
1087 	}
1088 }
1089 
1090 static int
1091 bnx2_5709s_linkup(struct bnx2 *bp)
1092 {
1093 	u32 val, speed;
1094 
1095 	bp->link_up = 1;
1096 
1097 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1098 	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1099 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1100 
1101 	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1102 		bp->line_speed = bp->req_line_speed;
1103 		bp->duplex = bp->req_duplex;
1104 		return 0;
1105 	}
1106 	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1107 	switch (speed) {
1108 		case MII_BNX2_GP_TOP_AN_SPEED_10:
1109 			bp->line_speed = SPEED_10;
1110 			break;
1111 		case MII_BNX2_GP_TOP_AN_SPEED_100:
1112 			bp->line_speed = SPEED_100;
1113 			break;
1114 		case MII_BNX2_GP_TOP_AN_SPEED_1G:
1115 		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1116 			bp->line_speed = SPEED_1000;
1117 			break;
1118 		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1119 			bp->line_speed = SPEED_2500;
1120 			break;
1121 	}
1122 	if (val & MII_BNX2_GP_TOP_AN_FD)
1123 		bp->duplex = DUPLEX_FULL;
1124 	else
1125 		bp->duplex = DUPLEX_HALF;
1126 	return 0;
1127 }
1128 
1129 static int
1130 bnx2_5708s_linkup(struct bnx2 *bp)
1131 {
1132 	u32 val;
1133 
1134 	bp->link_up = 1;
1135 	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1136 	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1137 		case BCM5708S_1000X_STAT1_SPEED_10:
1138 			bp->line_speed = SPEED_10;
1139 			break;
1140 		case BCM5708S_1000X_STAT1_SPEED_100:
1141 			bp->line_speed = SPEED_100;
1142 			break;
1143 		case BCM5708S_1000X_STAT1_SPEED_1G:
1144 			bp->line_speed = SPEED_1000;
1145 			break;
1146 		case BCM5708S_1000X_STAT1_SPEED_2G5:
1147 			bp->line_speed = SPEED_2500;
1148 			break;
1149 	}
1150 	if (val & BCM5708S_1000X_STAT1_FD)
1151 		bp->duplex = DUPLEX_FULL;
1152 	else
1153 		bp->duplex = DUPLEX_HALF;
1154 
1155 	return 0;
1156 }
1157 
1158 static int
1159 bnx2_5706s_linkup(struct bnx2 *bp)
1160 {
1161 	u32 bmcr, local_adv, remote_adv, common;
1162 
1163 	bp->link_up = 1;
1164 	bp->line_speed = SPEED_1000;
1165 
1166 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1167 	if (bmcr & BMCR_FULLDPLX) {
1168 		bp->duplex = DUPLEX_FULL;
1169 	}
1170 	else {
1171 		bp->duplex = DUPLEX_HALF;
1172 	}
1173 
1174 	if (!(bmcr & BMCR_ANENABLE)) {
1175 		return 0;
1176 	}
1177 
1178 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1179 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1180 
1181 	common = local_adv & remote_adv;
1182 	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1183 
1184 		if (common & ADVERTISE_1000XFULL) {
1185 			bp->duplex = DUPLEX_FULL;
1186 		}
1187 		else {
1188 			bp->duplex = DUPLEX_HALF;
1189 		}
1190 	}
1191 
1192 	return 0;
1193 }
1194 
1195 static int
1196 bnx2_copper_linkup(struct bnx2 *bp)
1197 {
1198 	u32 bmcr;
1199 
1200 	bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1201 
1202 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1203 	if (bmcr & BMCR_ANENABLE) {
1204 		u32 local_adv, remote_adv, common;
1205 
1206 		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1207 		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1208 
1209 		common = local_adv & (remote_adv >> 2);
1210 		if (common & ADVERTISE_1000FULL) {
1211 			bp->line_speed = SPEED_1000;
1212 			bp->duplex = DUPLEX_FULL;
1213 		}
1214 		else if (common & ADVERTISE_1000HALF) {
1215 			bp->line_speed = SPEED_1000;
1216 			bp->duplex = DUPLEX_HALF;
1217 		}
1218 		else {
1219 			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1220 			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1221 
1222 			common = local_adv & remote_adv;
1223 			if (common & ADVERTISE_100FULL) {
1224 				bp->line_speed = SPEED_100;
1225 				bp->duplex = DUPLEX_FULL;
1226 			}
1227 			else if (common & ADVERTISE_100HALF) {
1228 				bp->line_speed = SPEED_100;
1229 				bp->duplex = DUPLEX_HALF;
1230 			}
1231 			else if (common & ADVERTISE_10FULL) {
1232 				bp->line_speed = SPEED_10;
1233 				bp->duplex = DUPLEX_FULL;
1234 			}
1235 			else if (common & ADVERTISE_10HALF) {
1236 				bp->line_speed = SPEED_10;
1237 				bp->duplex = DUPLEX_HALF;
1238 			}
1239 			else {
1240 				bp->line_speed = 0;
1241 				bp->link_up = 0;
1242 			}
1243 		}
1244 	}
1245 	else {
1246 		if (bmcr & BMCR_SPEED100) {
1247 			bp->line_speed = SPEED_100;
1248 		}
1249 		else {
1250 			bp->line_speed = SPEED_10;
1251 		}
1252 		if (bmcr & BMCR_FULLDPLX) {
1253 			bp->duplex = DUPLEX_FULL;
1254 		}
1255 		else {
1256 			bp->duplex = DUPLEX_HALF;
1257 		}
1258 	}
1259 
1260 	if (bp->link_up) {
1261 		u32 ext_status;
1262 
1263 		bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1264 		if (ext_status & EXT_STATUS_MDIX)
1265 			bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1266 	}
1267 
1268 	return 0;
1269 }
1270 
1271 static void
1272 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1273 {
1274 	u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1275 
1276 	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1277 	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1278 	val |= 0x02 << 8;
1279 
1280 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1281 		val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1282 
1283 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1284 }
1285 
1286 static void
1287 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1288 {
1289 	int i;
1290 	u32 cid;
1291 
1292 	for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1293 		if (i == 1)
1294 			cid = RX_RSS_CID;
1295 		bnx2_init_rx_context(bp, cid);
1296 	}
1297 }
1298 
1299 static void
1300 bnx2_set_mac_link(struct bnx2 *bp)
1301 {
1302 	u32 val;
1303 
1304 	BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1305 	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1306 		(bp->duplex == DUPLEX_HALF)) {
1307 		BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1308 	}
1309 
1310 	/* Configure the EMAC mode register. */
1311 	val = BNX2_RD(bp, BNX2_EMAC_MODE);
1312 
1313 	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1314 		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1315 		BNX2_EMAC_MODE_25G_MODE);
1316 
1317 	if (bp->link_up) {
1318 		switch (bp->line_speed) {
1319 			case SPEED_10:
1320 				if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1321 					val |= BNX2_EMAC_MODE_PORT_MII_10M;
1322 					break;
1323 				}
1324 				/* fall through */
1325 			case SPEED_100:
1326 				val |= BNX2_EMAC_MODE_PORT_MII;
1327 				break;
1328 			case SPEED_2500:
1329 				val |= BNX2_EMAC_MODE_25G_MODE;
1330 				/* fall through */
1331 			case SPEED_1000:
1332 				val |= BNX2_EMAC_MODE_PORT_GMII;
1333 				break;
1334 		}
1335 	}
1336 	else {
1337 		val |= BNX2_EMAC_MODE_PORT_GMII;
1338 	}
1339 
1340 	/* Set the MAC to operate in the appropriate duplex mode. */
1341 	if (bp->duplex == DUPLEX_HALF)
1342 		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1343 	BNX2_WR(bp, BNX2_EMAC_MODE, val);
1344 
1345 	/* Enable/disable rx PAUSE. */
1346 	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1347 
1348 	if (bp->flow_ctrl & FLOW_CTRL_RX)
1349 		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1350 	BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1351 
1352 	/* Enable/disable tx PAUSE. */
1353 	val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1354 	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1355 
1356 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1357 		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1358 	BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1359 
1360 	/* Acknowledge the interrupt. */
1361 	BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1362 
1363 	bnx2_init_all_rx_contexts(bp);
1364 }
1365 
1366 static void
1367 bnx2_enable_bmsr1(struct bnx2 *bp)
1368 {
1369 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1370 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1371 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1372 			       MII_BNX2_BLK_ADDR_GP_STATUS);
1373 }
1374 
1375 static void
1376 bnx2_disable_bmsr1(struct bnx2 *bp)
1377 {
1378 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1379 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1380 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1381 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1382 }
1383 
1384 static int
1385 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1386 {
1387 	u32 up1;
1388 	int ret = 1;
1389 
1390 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1391 		return 0;
1392 
1393 	if (bp->autoneg & AUTONEG_SPEED)
1394 		bp->advertising |= ADVERTISED_2500baseX_Full;
1395 
1396 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1397 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1398 
1399 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1400 	if (!(up1 & BCM5708S_UP1_2G5)) {
1401 		up1 |= BCM5708S_UP1_2G5;
1402 		bnx2_write_phy(bp, bp->mii_up1, up1);
1403 		ret = 0;
1404 	}
1405 
1406 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1407 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1408 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1409 
1410 	return ret;
1411 }
1412 
1413 static int
1414 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1415 {
1416 	u32 up1;
1417 	int ret = 0;
1418 
1419 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1420 		return 0;
1421 
1422 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1423 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1424 
1425 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1426 	if (up1 & BCM5708S_UP1_2G5) {
1427 		up1 &= ~BCM5708S_UP1_2G5;
1428 		bnx2_write_phy(bp, bp->mii_up1, up1);
1429 		ret = 1;
1430 	}
1431 
1432 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1433 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1434 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1435 
1436 	return ret;
1437 }
1438 
1439 static void
1440 bnx2_enable_forced_2g5(struct bnx2 *bp)
1441 {
1442 	u32 uninitialized_var(bmcr);
1443 	int err;
1444 
1445 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1446 		return;
1447 
1448 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1449 		u32 val;
1450 
1451 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1452 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1453 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1454 			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1455 			val |= MII_BNX2_SD_MISC1_FORCE |
1456 				MII_BNX2_SD_MISC1_FORCE_2_5G;
1457 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1458 		}
1459 
1460 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1461 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1462 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1463 
1464 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1465 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1466 		if (!err)
1467 			bmcr |= BCM5708S_BMCR_FORCE_2500;
1468 	} else {
1469 		return;
1470 	}
1471 
1472 	if (err)
1473 		return;
1474 
1475 	if (bp->autoneg & AUTONEG_SPEED) {
1476 		bmcr &= ~BMCR_ANENABLE;
1477 		if (bp->req_duplex == DUPLEX_FULL)
1478 			bmcr |= BMCR_FULLDPLX;
1479 	}
1480 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1481 }
1482 
1483 static void
1484 bnx2_disable_forced_2g5(struct bnx2 *bp)
1485 {
1486 	u32 uninitialized_var(bmcr);
1487 	int err;
1488 
1489 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1490 		return;
1491 
1492 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1493 		u32 val;
1494 
1495 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1496 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1497 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1498 			val &= ~MII_BNX2_SD_MISC1_FORCE;
1499 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1500 		}
1501 
1502 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1503 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1504 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1505 
1506 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1507 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1508 		if (!err)
1509 			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1510 	} else {
1511 		return;
1512 	}
1513 
1514 	if (err)
1515 		return;
1516 
1517 	if (bp->autoneg & AUTONEG_SPEED)
1518 		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1519 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1520 }
1521 
1522 static void
1523 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1524 {
1525 	u32 val;
1526 
1527 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1528 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1529 	if (start)
1530 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1531 	else
1532 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1533 }
1534 
1535 static int
1536 bnx2_set_link(struct bnx2 *bp)
1537 {
1538 	u32 bmsr;
1539 	u8 link_up;
1540 
1541 	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1542 		bp->link_up = 1;
1543 		return 0;
1544 	}
1545 
1546 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1547 		return 0;
1548 
1549 	link_up = bp->link_up;
1550 
1551 	bnx2_enable_bmsr1(bp);
1552 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1553 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1554 	bnx2_disable_bmsr1(bp);
1555 
1556 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1557 	    (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1558 		u32 val, an_dbg;
1559 
1560 		if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1561 			bnx2_5706s_force_link_dn(bp, 0);
1562 			bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1563 		}
1564 		val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1565 
1566 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1567 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1568 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1569 
1570 		if ((val & BNX2_EMAC_STATUS_LINK) &&
1571 		    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1572 			bmsr |= BMSR_LSTATUS;
1573 		else
1574 			bmsr &= ~BMSR_LSTATUS;
1575 	}
1576 
1577 	if (bmsr & BMSR_LSTATUS) {
1578 		bp->link_up = 1;
1579 
1580 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1581 			if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1582 				bnx2_5706s_linkup(bp);
1583 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1584 				bnx2_5708s_linkup(bp);
1585 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1586 				bnx2_5709s_linkup(bp);
1587 		}
1588 		else {
1589 			bnx2_copper_linkup(bp);
1590 		}
1591 		bnx2_resolve_flow_ctrl(bp);
1592 	}
1593 	else {
1594 		if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1595 		    (bp->autoneg & AUTONEG_SPEED))
1596 			bnx2_disable_forced_2g5(bp);
1597 
1598 		if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1599 			u32 bmcr;
1600 
1601 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1602 			bmcr |= BMCR_ANENABLE;
1603 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1604 
1605 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1606 		}
1607 		bp->link_up = 0;
1608 	}
1609 
1610 	if (bp->link_up != link_up) {
1611 		bnx2_report_link(bp);
1612 	}
1613 
1614 	bnx2_set_mac_link(bp);
1615 
1616 	return 0;
1617 }
1618 
1619 static int
1620 bnx2_reset_phy(struct bnx2 *bp)
1621 {
1622 	int i;
1623 	u32 reg;
1624 
1625         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1626 
1627 #define PHY_RESET_MAX_WAIT 100
1628 	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1629 		udelay(10);
1630 
1631 		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1632 		if (!(reg & BMCR_RESET)) {
1633 			udelay(20);
1634 			break;
1635 		}
1636 	}
1637 	if (i == PHY_RESET_MAX_WAIT) {
1638 		return -EBUSY;
1639 	}
1640 	return 0;
1641 }
1642 
1643 static u32
1644 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1645 {
1646 	u32 adv = 0;
1647 
1648 	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1649 		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1650 
1651 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1652 			adv = ADVERTISE_1000XPAUSE;
1653 		}
1654 		else {
1655 			adv = ADVERTISE_PAUSE_CAP;
1656 		}
1657 	}
1658 	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1659 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1660 			adv = ADVERTISE_1000XPSE_ASYM;
1661 		}
1662 		else {
1663 			adv = ADVERTISE_PAUSE_ASYM;
1664 		}
1665 	}
1666 	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1667 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1668 			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1669 		}
1670 		else {
1671 			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1672 		}
1673 	}
1674 	return adv;
1675 }
1676 
1677 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1678 
1679 static int
1680 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1681 __releases(&bp->phy_lock)
1682 __acquires(&bp->phy_lock)
1683 {
1684 	u32 speed_arg = 0, pause_adv;
1685 
1686 	pause_adv = bnx2_phy_get_pause_adv(bp);
1687 
1688 	if (bp->autoneg & AUTONEG_SPEED) {
1689 		speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1690 		if (bp->advertising & ADVERTISED_10baseT_Half)
1691 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1692 		if (bp->advertising & ADVERTISED_10baseT_Full)
1693 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1694 		if (bp->advertising & ADVERTISED_100baseT_Half)
1695 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1696 		if (bp->advertising & ADVERTISED_100baseT_Full)
1697 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1698 		if (bp->advertising & ADVERTISED_1000baseT_Full)
1699 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1700 		if (bp->advertising & ADVERTISED_2500baseX_Full)
1701 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1702 	} else {
1703 		if (bp->req_line_speed == SPEED_2500)
1704 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1705 		else if (bp->req_line_speed == SPEED_1000)
1706 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1707 		else if (bp->req_line_speed == SPEED_100) {
1708 			if (bp->req_duplex == DUPLEX_FULL)
1709 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1710 			else
1711 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1712 		} else if (bp->req_line_speed == SPEED_10) {
1713 			if (bp->req_duplex == DUPLEX_FULL)
1714 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1715 			else
1716 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1717 		}
1718 	}
1719 
1720 	if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1721 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1722 	if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1723 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1724 
1725 	if (port == PORT_TP)
1726 		speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1727 			     BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1728 
1729 	bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1730 
1731 	spin_unlock_bh(&bp->phy_lock);
1732 	bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1733 	spin_lock_bh(&bp->phy_lock);
1734 
1735 	return 0;
1736 }
1737 
1738 static int
1739 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1740 __releases(&bp->phy_lock)
1741 __acquires(&bp->phy_lock)
1742 {
1743 	u32 adv, bmcr;
1744 	u32 new_adv = 0;
1745 
1746 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1747 		return bnx2_setup_remote_phy(bp, port);
1748 
1749 	if (!(bp->autoneg & AUTONEG_SPEED)) {
1750 		u32 new_bmcr;
1751 		int force_link_down = 0;
1752 
1753 		if (bp->req_line_speed == SPEED_2500) {
1754 			if (!bnx2_test_and_enable_2g5(bp))
1755 				force_link_down = 1;
1756 		} else if (bp->req_line_speed == SPEED_1000) {
1757 			if (bnx2_test_and_disable_2g5(bp))
1758 				force_link_down = 1;
1759 		}
1760 		bnx2_read_phy(bp, bp->mii_adv, &adv);
1761 		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1762 
1763 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1764 		new_bmcr = bmcr & ~BMCR_ANENABLE;
1765 		new_bmcr |= BMCR_SPEED1000;
1766 
1767 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1768 			if (bp->req_line_speed == SPEED_2500)
1769 				bnx2_enable_forced_2g5(bp);
1770 			else if (bp->req_line_speed == SPEED_1000) {
1771 				bnx2_disable_forced_2g5(bp);
1772 				new_bmcr &= ~0x2000;
1773 			}
1774 
1775 		} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1776 			if (bp->req_line_speed == SPEED_2500)
1777 				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1778 			else
1779 				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1780 		}
1781 
1782 		if (bp->req_duplex == DUPLEX_FULL) {
1783 			adv |= ADVERTISE_1000XFULL;
1784 			new_bmcr |= BMCR_FULLDPLX;
1785 		}
1786 		else {
1787 			adv |= ADVERTISE_1000XHALF;
1788 			new_bmcr &= ~BMCR_FULLDPLX;
1789 		}
1790 		if ((new_bmcr != bmcr) || (force_link_down)) {
1791 			/* Force a link down visible on the other side */
1792 			if (bp->link_up) {
1793 				bnx2_write_phy(bp, bp->mii_adv, adv &
1794 					       ~(ADVERTISE_1000XFULL |
1795 						 ADVERTISE_1000XHALF));
1796 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1797 					BMCR_ANRESTART | BMCR_ANENABLE);
1798 
1799 				bp->link_up = 0;
1800 				netif_carrier_off(bp->dev);
1801 				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1802 				bnx2_report_link(bp);
1803 			}
1804 			bnx2_write_phy(bp, bp->mii_adv, adv);
1805 			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1806 		} else {
1807 			bnx2_resolve_flow_ctrl(bp);
1808 			bnx2_set_mac_link(bp);
1809 		}
1810 		return 0;
1811 	}
1812 
1813 	bnx2_test_and_enable_2g5(bp);
1814 
1815 	if (bp->advertising & ADVERTISED_1000baseT_Full)
1816 		new_adv |= ADVERTISE_1000XFULL;
1817 
1818 	new_adv |= bnx2_phy_get_pause_adv(bp);
1819 
1820 	bnx2_read_phy(bp, bp->mii_adv, &adv);
1821 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1822 
1823 	bp->serdes_an_pending = 0;
1824 	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1825 		/* Force a link down visible on the other side */
1826 		if (bp->link_up) {
1827 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1828 			spin_unlock_bh(&bp->phy_lock);
1829 			msleep(20);
1830 			spin_lock_bh(&bp->phy_lock);
1831 		}
1832 
1833 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
1834 		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1835 			BMCR_ANENABLE);
1836 		/* Speed up link-up time when the link partner
1837 		 * does not autonegotiate which is very common
1838 		 * in blade servers. Some blade servers use
1839 		 * IPMI for kerboard input and it's important
1840 		 * to minimize link disruptions. Autoneg. involves
1841 		 * exchanging base pages plus 3 next pages and
1842 		 * normally completes in about 120 msec.
1843 		 */
1844 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1845 		bp->serdes_an_pending = 1;
1846 		mod_timer(&bp->timer, jiffies + bp->current_interval);
1847 	} else {
1848 		bnx2_resolve_flow_ctrl(bp);
1849 		bnx2_set_mac_link(bp);
1850 	}
1851 
1852 	return 0;
1853 }
1854 
1855 #define ETHTOOL_ALL_FIBRE_SPEED						\
1856 	(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?			\
1857 		(ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1858 		(ADVERTISED_1000baseT_Full)
1859 
1860 #define ETHTOOL_ALL_COPPER_SPEED					\
1861 	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
1862 	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
1863 	ADVERTISED_1000baseT_Full)
1864 
1865 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1866 	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1867 
1868 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1869 
1870 static void
1871 bnx2_set_default_remote_link(struct bnx2 *bp)
1872 {
1873 	u32 link;
1874 
1875 	if (bp->phy_port == PORT_TP)
1876 		link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1877 	else
1878 		link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1879 
1880 	if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1881 		bp->req_line_speed = 0;
1882 		bp->autoneg |= AUTONEG_SPEED;
1883 		bp->advertising = ADVERTISED_Autoneg;
1884 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1885 			bp->advertising |= ADVERTISED_10baseT_Half;
1886 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1887 			bp->advertising |= ADVERTISED_10baseT_Full;
1888 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1889 			bp->advertising |= ADVERTISED_100baseT_Half;
1890 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1891 			bp->advertising |= ADVERTISED_100baseT_Full;
1892 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1893 			bp->advertising |= ADVERTISED_1000baseT_Full;
1894 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1895 			bp->advertising |= ADVERTISED_2500baseX_Full;
1896 	} else {
1897 		bp->autoneg = 0;
1898 		bp->advertising = 0;
1899 		bp->req_duplex = DUPLEX_FULL;
1900 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1901 			bp->req_line_speed = SPEED_10;
1902 			if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1903 				bp->req_duplex = DUPLEX_HALF;
1904 		}
1905 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1906 			bp->req_line_speed = SPEED_100;
1907 			if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1908 				bp->req_duplex = DUPLEX_HALF;
1909 		}
1910 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1911 			bp->req_line_speed = SPEED_1000;
1912 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1913 			bp->req_line_speed = SPEED_2500;
1914 	}
1915 }
1916 
1917 static void
1918 bnx2_set_default_link(struct bnx2 *bp)
1919 {
1920 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1921 		bnx2_set_default_remote_link(bp);
1922 		return;
1923 	}
1924 
1925 	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1926 	bp->req_line_speed = 0;
1927 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1928 		u32 reg;
1929 
1930 		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1931 
1932 		reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1933 		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1934 		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1935 			bp->autoneg = 0;
1936 			bp->req_line_speed = bp->line_speed = SPEED_1000;
1937 			bp->req_duplex = DUPLEX_FULL;
1938 		}
1939 	} else
1940 		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1941 }
1942 
1943 static void
1944 bnx2_send_heart_beat(struct bnx2 *bp)
1945 {
1946 	u32 msg;
1947 	u32 addr;
1948 
1949 	spin_lock(&bp->indirect_lock);
1950 	msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1951 	addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1952 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1953 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1954 	spin_unlock(&bp->indirect_lock);
1955 }
1956 
1957 static void
1958 bnx2_remote_phy_event(struct bnx2 *bp)
1959 {
1960 	u32 msg;
1961 	u8 link_up = bp->link_up;
1962 	u8 old_port;
1963 
1964 	msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1965 
1966 	if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1967 		bnx2_send_heart_beat(bp);
1968 
1969 	msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1970 
1971 	if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1972 		bp->link_up = 0;
1973 	else {
1974 		u32 speed;
1975 
1976 		bp->link_up = 1;
1977 		speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1978 		bp->duplex = DUPLEX_FULL;
1979 		switch (speed) {
1980 			case BNX2_LINK_STATUS_10HALF:
1981 				bp->duplex = DUPLEX_HALF;
1982 				/* fall through */
1983 			case BNX2_LINK_STATUS_10FULL:
1984 				bp->line_speed = SPEED_10;
1985 				break;
1986 			case BNX2_LINK_STATUS_100HALF:
1987 				bp->duplex = DUPLEX_HALF;
1988 				/* fall through */
1989 			case BNX2_LINK_STATUS_100BASE_T4:
1990 			case BNX2_LINK_STATUS_100FULL:
1991 				bp->line_speed = SPEED_100;
1992 				break;
1993 			case BNX2_LINK_STATUS_1000HALF:
1994 				bp->duplex = DUPLEX_HALF;
1995 				/* fall through */
1996 			case BNX2_LINK_STATUS_1000FULL:
1997 				bp->line_speed = SPEED_1000;
1998 				break;
1999 			case BNX2_LINK_STATUS_2500HALF:
2000 				bp->duplex = DUPLEX_HALF;
2001 				/* fall through */
2002 			case BNX2_LINK_STATUS_2500FULL:
2003 				bp->line_speed = SPEED_2500;
2004 				break;
2005 			default:
2006 				bp->line_speed = 0;
2007 				break;
2008 		}
2009 
2010 		bp->flow_ctrl = 0;
2011 		if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2012 		    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2013 			if (bp->duplex == DUPLEX_FULL)
2014 				bp->flow_ctrl = bp->req_flow_ctrl;
2015 		} else {
2016 			if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2017 				bp->flow_ctrl |= FLOW_CTRL_TX;
2018 			if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2019 				bp->flow_ctrl |= FLOW_CTRL_RX;
2020 		}
2021 
2022 		old_port = bp->phy_port;
2023 		if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2024 			bp->phy_port = PORT_FIBRE;
2025 		else
2026 			bp->phy_port = PORT_TP;
2027 
2028 		if (old_port != bp->phy_port)
2029 			bnx2_set_default_link(bp);
2030 
2031 	}
2032 	if (bp->link_up != link_up)
2033 		bnx2_report_link(bp);
2034 
2035 	bnx2_set_mac_link(bp);
2036 }
2037 
2038 static int
2039 bnx2_set_remote_link(struct bnx2 *bp)
2040 {
2041 	u32 evt_code;
2042 
2043 	evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2044 	switch (evt_code) {
2045 		case BNX2_FW_EVT_CODE_LINK_EVENT:
2046 			bnx2_remote_phy_event(bp);
2047 			break;
2048 		case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2049 		default:
2050 			bnx2_send_heart_beat(bp);
2051 			break;
2052 	}
2053 	return 0;
2054 }
2055 
2056 static int
2057 bnx2_setup_copper_phy(struct bnx2 *bp)
2058 __releases(&bp->phy_lock)
2059 __acquires(&bp->phy_lock)
2060 {
2061 	u32 bmcr, adv_reg, new_adv = 0;
2062 	u32 new_bmcr;
2063 
2064 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2065 
2066 	bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2067 	adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2068 		    ADVERTISE_PAUSE_ASYM);
2069 
2070 	new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2071 
2072 	if (bp->autoneg & AUTONEG_SPEED) {
2073 		u32 adv1000_reg;
2074 		u32 new_adv1000 = 0;
2075 
2076 		new_adv |= bnx2_phy_get_pause_adv(bp);
2077 
2078 		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2079 		adv1000_reg &= PHY_ALL_1000_SPEED;
2080 
2081 		new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2082 		if ((adv1000_reg != new_adv1000) ||
2083 			(adv_reg != new_adv) ||
2084 			((bmcr & BMCR_ANENABLE) == 0)) {
2085 
2086 			bnx2_write_phy(bp, bp->mii_adv, new_adv);
2087 			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2088 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2089 				BMCR_ANENABLE);
2090 		}
2091 		else if (bp->link_up) {
2092 			/* Flow ctrl may have changed from auto to forced */
2093 			/* or vice-versa. */
2094 
2095 			bnx2_resolve_flow_ctrl(bp);
2096 			bnx2_set_mac_link(bp);
2097 		}
2098 		return 0;
2099 	}
2100 
2101 	/* advertise nothing when forcing speed */
2102 	if (adv_reg != new_adv)
2103 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
2104 
2105 	new_bmcr = 0;
2106 	if (bp->req_line_speed == SPEED_100) {
2107 		new_bmcr |= BMCR_SPEED100;
2108 	}
2109 	if (bp->req_duplex == DUPLEX_FULL) {
2110 		new_bmcr |= BMCR_FULLDPLX;
2111 	}
2112 	if (new_bmcr != bmcr) {
2113 		u32 bmsr;
2114 
2115 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2116 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2117 
2118 		if (bmsr & BMSR_LSTATUS) {
2119 			/* Force link down */
2120 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2121 			spin_unlock_bh(&bp->phy_lock);
2122 			msleep(50);
2123 			spin_lock_bh(&bp->phy_lock);
2124 
2125 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2126 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2127 		}
2128 
2129 		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2130 
2131 		/* Normally, the new speed is setup after the link has
2132 		 * gone down and up again. In some cases, link will not go
2133 		 * down so we need to set up the new speed here.
2134 		 */
2135 		if (bmsr & BMSR_LSTATUS) {
2136 			bp->line_speed = bp->req_line_speed;
2137 			bp->duplex = bp->req_duplex;
2138 			bnx2_resolve_flow_ctrl(bp);
2139 			bnx2_set_mac_link(bp);
2140 		}
2141 	} else {
2142 		bnx2_resolve_flow_ctrl(bp);
2143 		bnx2_set_mac_link(bp);
2144 	}
2145 	return 0;
2146 }
2147 
2148 static int
2149 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2150 __releases(&bp->phy_lock)
2151 __acquires(&bp->phy_lock)
2152 {
2153 	if (bp->loopback == MAC_LOOPBACK)
2154 		return 0;
2155 
2156 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2157 		return bnx2_setup_serdes_phy(bp, port);
2158 	}
2159 	else {
2160 		return bnx2_setup_copper_phy(bp);
2161 	}
2162 }
2163 
2164 static int
2165 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2166 {
2167 	u32 val;
2168 
2169 	bp->mii_bmcr = MII_BMCR + 0x10;
2170 	bp->mii_bmsr = MII_BMSR + 0x10;
2171 	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2172 	bp->mii_adv = MII_ADVERTISE + 0x10;
2173 	bp->mii_lpa = MII_LPA + 0x10;
2174 	bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2175 
2176 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2177 	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2178 
2179 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2180 	if (reset_phy)
2181 		bnx2_reset_phy(bp);
2182 
2183 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2184 
2185 	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2186 	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2187 	val |= MII_BNX2_SD_1000XCTL1_FIBER;
2188 	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2189 
2190 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2191 	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2192 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2193 		val |= BCM5708S_UP1_2G5;
2194 	else
2195 		val &= ~BCM5708S_UP1_2G5;
2196 	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2197 
2198 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2199 	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2200 	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2201 	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2202 
2203 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2204 
2205 	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2206 	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2207 	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2208 
2209 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2210 
2211 	return 0;
2212 }
2213 
2214 static int
2215 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2216 {
2217 	u32 val;
2218 
2219 	if (reset_phy)
2220 		bnx2_reset_phy(bp);
2221 
2222 	bp->mii_up1 = BCM5708S_UP1;
2223 
2224 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2225 	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2226 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2227 
2228 	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2229 	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2230 	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2231 
2232 	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2233 	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2234 	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2235 
2236 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2237 		bnx2_read_phy(bp, BCM5708S_UP1, &val);
2238 		val |= BCM5708S_UP1_2G5;
2239 		bnx2_write_phy(bp, BCM5708S_UP1, val);
2240 	}
2241 
2242 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2243 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2244 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2245 		/* increase tx signal amplitude */
2246 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2247 			       BCM5708S_BLK_ADDR_TX_MISC);
2248 		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2249 		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2250 		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2251 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2252 	}
2253 
2254 	val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2255 	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2256 
2257 	if (val) {
2258 		u32 is_backplane;
2259 
2260 		is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2261 		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2262 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2263 				       BCM5708S_BLK_ADDR_TX_MISC);
2264 			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2265 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2266 				       BCM5708S_BLK_ADDR_DIG);
2267 		}
2268 	}
2269 	return 0;
2270 }
2271 
2272 static int
2273 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2274 {
2275 	if (reset_phy)
2276 		bnx2_reset_phy(bp);
2277 
2278 	bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2279 
2280 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2281 		BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2282 
2283 	if (bp->dev->mtu > 1500) {
2284 		u32 val;
2285 
2286 		/* Set extended packet length bit */
2287 		bnx2_write_phy(bp, 0x18, 0x7);
2288 		bnx2_read_phy(bp, 0x18, &val);
2289 		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2290 
2291 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2292 		bnx2_read_phy(bp, 0x1c, &val);
2293 		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2294 	}
2295 	else {
2296 		u32 val;
2297 
2298 		bnx2_write_phy(bp, 0x18, 0x7);
2299 		bnx2_read_phy(bp, 0x18, &val);
2300 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2301 
2302 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2303 		bnx2_read_phy(bp, 0x1c, &val);
2304 		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2305 	}
2306 
2307 	return 0;
2308 }
2309 
2310 static int
2311 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2312 {
2313 	u32 val;
2314 
2315 	if (reset_phy)
2316 		bnx2_reset_phy(bp);
2317 
2318 	if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2319 		bnx2_write_phy(bp, 0x18, 0x0c00);
2320 		bnx2_write_phy(bp, 0x17, 0x000a);
2321 		bnx2_write_phy(bp, 0x15, 0x310b);
2322 		bnx2_write_phy(bp, 0x17, 0x201f);
2323 		bnx2_write_phy(bp, 0x15, 0x9506);
2324 		bnx2_write_phy(bp, 0x17, 0x401f);
2325 		bnx2_write_phy(bp, 0x15, 0x14e2);
2326 		bnx2_write_phy(bp, 0x18, 0x0400);
2327 	}
2328 
2329 	if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2330 		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2331 			       MII_BNX2_DSP_EXPAND_REG | 0x8);
2332 		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2333 		val &= ~(1 << 8);
2334 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2335 	}
2336 
2337 	if (bp->dev->mtu > 1500) {
2338 		/* Set extended packet length bit */
2339 		bnx2_write_phy(bp, 0x18, 0x7);
2340 		bnx2_read_phy(bp, 0x18, &val);
2341 		bnx2_write_phy(bp, 0x18, val | 0x4000);
2342 
2343 		bnx2_read_phy(bp, 0x10, &val);
2344 		bnx2_write_phy(bp, 0x10, val | 0x1);
2345 	}
2346 	else {
2347 		bnx2_write_phy(bp, 0x18, 0x7);
2348 		bnx2_read_phy(bp, 0x18, &val);
2349 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2350 
2351 		bnx2_read_phy(bp, 0x10, &val);
2352 		bnx2_write_phy(bp, 0x10, val & ~0x1);
2353 	}
2354 
2355 	/* ethernet@wirespeed */
2356 	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2357 	bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2358 	val |=  AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2359 
2360 	/* auto-mdix */
2361 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2362 		val |=  AUX_CTL_MISC_CTL_AUTOMDIX;
2363 
2364 	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2365 	return 0;
2366 }
2367 
2368 
2369 static int
2370 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2371 __releases(&bp->phy_lock)
2372 __acquires(&bp->phy_lock)
2373 {
2374 	u32 val;
2375 	int rc = 0;
2376 
2377 	bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2378 	bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2379 
2380 	bp->mii_bmcr = MII_BMCR;
2381 	bp->mii_bmsr = MII_BMSR;
2382 	bp->mii_bmsr1 = MII_BMSR;
2383 	bp->mii_adv = MII_ADVERTISE;
2384 	bp->mii_lpa = MII_LPA;
2385 
2386 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2387 
2388 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2389 		goto setup_phy;
2390 
2391 	bnx2_read_phy(bp, MII_PHYSID1, &val);
2392 	bp->phy_id = val << 16;
2393 	bnx2_read_phy(bp, MII_PHYSID2, &val);
2394 	bp->phy_id |= val & 0xffff;
2395 
2396 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2397 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2398 			rc = bnx2_init_5706s_phy(bp, reset_phy);
2399 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2400 			rc = bnx2_init_5708s_phy(bp, reset_phy);
2401 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2402 			rc = bnx2_init_5709s_phy(bp, reset_phy);
2403 	}
2404 	else {
2405 		rc = bnx2_init_copper_phy(bp, reset_phy);
2406 	}
2407 
2408 setup_phy:
2409 	if (!rc)
2410 		rc = bnx2_setup_phy(bp, bp->phy_port);
2411 
2412 	return rc;
2413 }
2414 
2415 static int
2416 bnx2_set_mac_loopback(struct bnx2 *bp)
2417 {
2418 	u32 mac_mode;
2419 
2420 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2421 	mac_mode &= ~BNX2_EMAC_MODE_PORT;
2422 	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2423 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2424 	bp->link_up = 1;
2425 	return 0;
2426 }
2427 
2428 static int bnx2_test_link(struct bnx2 *);
2429 
2430 static int
2431 bnx2_set_phy_loopback(struct bnx2 *bp)
2432 {
2433 	u32 mac_mode;
2434 	int rc, i;
2435 
2436 	spin_lock_bh(&bp->phy_lock);
2437 	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2438 			    BMCR_SPEED1000);
2439 	spin_unlock_bh(&bp->phy_lock);
2440 	if (rc)
2441 		return rc;
2442 
2443 	for (i = 0; i < 10; i++) {
2444 		if (bnx2_test_link(bp) == 0)
2445 			break;
2446 		msleep(100);
2447 	}
2448 
2449 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2450 	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2451 		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2452 		      BNX2_EMAC_MODE_25G_MODE);
2453 
2454 	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2455 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2456 	bp->link_up = 1;
2457 	return 0;
2458 }
2459 
2460 static void
2461 bnx2_dump_mcp_state(struct bnx2 *bp)
2462 {
2463 	struct net_device *dev = bp->dev;
2464 	u32 mcp_p0, mcp_p1;
2465 
2466 	netdev_err(dev, "<--- start MCP states dump --->\n");
2467 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2468 		mcp_p0 = BNX2_MCP_STATE_P0;
2469 		mcp_p1 = BNX2_MCP_STATE_P1;
2470 	} else {
2471 		mcp_p0 = BNX2_MCP_STATE_P0_5708;
2472 		mcp_p1 = BNX2_MCP_STATE_P1_5708;
2473 	}
2474 	netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2475 		   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2476 	netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2477 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2478 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2479 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2480 	netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2481 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2482 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2483 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2484 	netdev_err(dev, "DEBUG: shmem states:\n");
2485 	netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2486 		   bnx2_shmem_rd(bp, BNX2_DRV_MB),
2487 		   bnx2_shmem_rd(bp, BNX2_FW_MB),
2488 		   bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2489 	pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2490 	netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2491 		   bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2492 		   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2493 	pr_cont(" condition[%08x]\n",
2494 		bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2495 	DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2496 	DP_SHMEM_LINE(bp, 0x3cc);
2497 	DP_SHMEM_LINE(bp, 0x3dc);
2498 	DP_SHMEM_LINE(bp, 0x3ec);
2499 	netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2500 	netdev_err(dev, "<--- end MCP states dump --->\n");
2501 }
2502 
2503 static int
2504 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2505 {
2506 	int i;
2507 	u32 val;
2508 
2509 	bp->fw_wr_seq++;
2510 	msg_data |= bp->fw_wr_seq;
2511 	bp->fw_last_msg = msg_data;
2512 
2513 	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2514 
2515 	if (!ack)
2516 		return 0;
2517 
2518 	/* wait for an acknowledgement. */
2519 	for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2520 		msleep(10);
2521 
2522 		val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2523 
2524 		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2525 			break;
2526 	}
2527 	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2528 		return 0;
2529 
2530 	/* If we timed out, inform the firmware that this is the case. */
2531 	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2532 		msg_data &= ~BNX2_DRV_MSG_CODE;
2533 		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2534 
2535 		bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2536 		if (!silent) {
2537 			pr_err("fw sync timeout, reset code = %x\n", msg_data);
2538 			bnx2_dump_mcp_state(bp);
2539 		}
2540 
2541 		return -EBUSY;
2542 	}
2543 
2544 	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2545 		return -EIO;
2546 
2547 	return 0;
2548 }
2549 
2550 static int
2551 bnx2_init_5709_context(struct bnx2 *bp)
2552 {
2553 	int i, ret = 0;
2554 	u32 val;
2555 
2556 	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2557 	val |= (BNX2_PAGE_BITS - 8) << 16;
2558 	BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2559 	for (i = 0; i < 10; i++) {
2560 		val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2561 		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2562 			break;
2563 		udelay(2);
2564 	}
2565 	if (val & BNX2_CTX_COMMAND_MEM_INIT)
2566 		return -EBUSY;
2567 
2568 	for (i = 0; i < bp->ctx_pages; i++) {
2569 		int j;
2570 
2571 		if (bp->ctx_blk[i])
2572 			memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2573 		else
2574 			return -ENOMEM;
2575 
2576 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2577 			(bp->ctx_blk_mapping[i] & 0xffffffff) |
2578 			BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2579 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2580 			(u64) bp->ctx_blk_mapping[i] >> 32);
2581 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2582 			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2583 		for (j = 0; j < 10; j++) {
2584 
2585 			val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2586 			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2587 				break;
2588 			udelay(5);
2589 		}
2590 		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2591 			ret = -EBUSY;
2592 			break;
2593 		}
2594 	}
2595 	return ret;
2596 }
2597 
2598 static void
2599 bnx2_init_context(struct bnx2 *bp)
2600 {
2601 	u32 vcid;
2602 
2603 	vcid = 96;
2604 	while (vcid) {
2605 		u32 vcid_addr, pcid_addr, offset;
2606 		int i;
2607 
2608 		vcid--;
2609 
2610 		if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2611 			u32 new_vcid;
2612 
2613 			vcid_addr = GET_PCID_ADDR(vcid);
2614 			if (vcid & 0x8) {
2615 				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2616 			}
2617 			else {
2618 				new_vcid = vcid;
2619 			}
2620 			pcid_addr = GET_PCID_ADDR(new_vcid);
2621 		}
2622 		else {
2623 	    		vcid_addr = GET_CID_ADDR(vcid);
2624 			pcid_addr = vcid_addr;
2625 		}
2626 
2627 		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2628 			vcid_addr += (i << PHY_CTX_SHIFT);
2629 			pcid_addr += (i << PHY_CTX_SHIFT);
2630 
2631 			BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2632 			BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2633 
2634 			/* Zero out the context. */
2635 			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2636 				bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2637 		}
2638 	}
2639 }
2640 
2641 static int
2642 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2643 {
2644 	u16 *good_mbuf;
2645 	u32 good_mbuf_cnt;
2646 	u32 val;
2647 
2648 	good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2649 	if (good_mbuf == NULL)
2650 		return -ENOMEM;
2651 
2652 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2653 		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2654 
2655 	good_mbuf_cnt = 0;
2656 
2657 	/* Allocate a bunch of mbufs and save the good ones in an array. */
2658 	val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2659 	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2660 		bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2661 				BNX2_RBUF_COMMAND_ALLOC_REQ);
2662 
2663 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2664 
2665 		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2666 
2667 		/* The addresses with Bit 9 set are bad memory blocks. */
2668 		if (!(val & (1 << 9))) {
2669 			good_mbuf[good_mbuf_cnt] = (u16) val;
2670 			good_mbuf_cnt++;
2671 		}
2672 
2673 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2674 	}
2675 
2676 	/* Free the good ones back to the mbuf pool thus discarding
2677 	 * all the bad ones. */
2678 	while (good_mbuf_cnt) {
2679 		good_mbuf_cnt--;
2680 
2681 		val = good_mbuf[good_mbuf_cnt];
2682 		val = (val << 9) | val | 1;
2683 
2684 		bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2685 	}
2686 	kfree(good_mbuf);
2687 	return 0;
2688 }
2689 
2690 static void
2691 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2692 {
2693 	u32 val;
2694 
2695 	val = (mac_addr[0] << 8) | mac_addr[1];
2696 
2697 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2698 
2699 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2700 		(mac_addr[4] << 8) | mac_addr[5];
2701 
2702 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2703 }
2704 
2705 static inline int
2706 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2707 {
2708 	dma_addr_t mapping;
2709 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2710 	struct bnx2_rx_bd *rxbd =
2711 		&rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2712 	struct page *page = alloc_page(gfp);
2713 
2714 	if (!page)
2715 		return -ENOMEM;
2716 	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2717 			       PCI_DMA_FROMDEVICE);
2718 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2719 		__free_page(page);
2720 		return -EIO;
2721 	}
2722 
2723 	rx_pg->page = page;
2724 	dma_unmap_addr_set(rx_pg, mapping, mapping);
2725 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2726 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2727 	return 0;
2728 }
2729 
2730 static void
2731 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2732 {
2733 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2734 	struct page *page = rx_pg->page;
2735 
2736 	if (!page)
2737 		return;
2738 
2739 	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2740 		       PAGE_SIZE, PCI_DMA_FROMDEVICE);
2741 
2742 	__free_page(page);
2743 	rx_pg->page = NULL;
2744 }
2745 
2746 static inline int
2747 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2748 {
2749 	u8 *data;
2750 	struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2751 	dma_addr_t mapping;
2752 	struct bnx2_rx_bd *rxbd =
2753 		&rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2754 
2755 	data = kmalloc(bp->rx_buf_size, gfp);
2756 	if (!data)
2757 		return -ENOMEM;
2758 
2759 	mapping = dma_map_single(&bp->pdev->dev,
2760 				 get_l2_fhdr(data),
2761 				 bp->rx_buf_use_size,
2762 				 PCI_DMA_FROMDEVICE);
2763 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2764 		kfree(data);
2765 		return -EIO;
2766 	}
2767 
2768 	rx_buf->data = data;
2769 	dma_unmap_addr_set(rx_buf, mapping, mapping);
2770 
2771 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2772 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2773 
2774 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2775 
2776 	return 0;
2777 }
2778 
2779 static int
2780 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2781 {
2782 	struct status_block *sblk = bnapi->status_blk.msi;
2783 	u32 new_link_state, old_link_state;
2784 	int is_set = 1;
2785 
2786 	new_link_state = sblk->status_attn_bits & event;
2787 	old_link_state = sblk->status_attn_bits_ack & event;
2788 	if (new_link_state != old_link_state) {
2789 		if (new_link_state)
2790 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2791 		else
2792 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2793 	} else
2794 		is_set = 0;
2795 
2796 	return is_set;
2797 }
2798 
2799 static void
2800 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2801 {
2802 	spin_lock(&bp->phy_lock);
2803 
2804 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2805 		bnx2_set_link(bp);
2806 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2807 		bnx2_set_remote_link(bp);
2808 
2809 	spin_unlock(&bp->phy_lock);
2810 
2811 }
2812 
2813 static inline u16
2814 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2815 {
2816 	u16 cons;
2817 
2818 	/* Tell compiler that status block fields can change. */
2819 	barrier();
2820 	cons = *bnapi->hw_tx_cons_ptr;
2821 	barrier();
2822 	if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2823 		cons++;
2824 	return cons;
2825 }
2826 
2827 static int
2828 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2829 {
2830 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2831 	u16 hw_cons, sw_cons, sw_ring_cons;
2832 	int tx_pkt = 0, index;
2833 	unsigned int tx_bytes = 0;
2834 	struct netdev_queue *txq;
2835 
2836 	index = (bnapi - bp->bnx2_napi);
2837 	txq = netdev_get_tx_queue(bp->dev, index);
2838 
2839 	hw_cons = bnx2_get_hw_tx_cons(bnapi);
2840 	sw_cons = txr->tx_cons;
2841 
2842 	while (sw_cons != hw_cons) {
2843 		struct bnx2_sw_tx_bd *tx_buf;
2844 		struct sk_buff *skb;
2845 		int i, last;
2846 
2847 		sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2848 
2849 		tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2850 		skb = tx_buf->skb;
2851 
2852 		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2853 		prefetch(&skb->end);
2854 
2855 		/* partial BD completions possible with TSO packets */
2856 		if (tx_buf->is_gso) {
2857 			u16 last_idx, last_ring_idx;
2858 
2859 			last_idx = sw_cons + tx_buf->nr_frags + 1;
2860 			last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2861 			if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2862 				last_idx++;
2863 			}
2864 			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2865 				break;
2866 			}
2867 		}
2868 
2869 		dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2870 			skb_headlen(skb), PCI_DMA_TODEVICE);
2871 
2872 		tx_buf->skb = NULL;
2873 		last = tx_buf->nr_frags;
2874 
2875 		for (i = 0; i < last; i++) {
2876 			struct bnx2_sw_tx_bd *tx_buf;
2877 
2878 			sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2879 
2880 			tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2881 			dma_unmap_page(&bp->pdev->dev,
2882 				dma_unmap_addr(tx_buf, mapping),
2883 				skb_frag_size(&skb_shinfo(skb)->frags[i]),
2884 				PCI_DMA_TODEVICE);
2885 		}
2886 
2887 		sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2888 
2889 		tx_bytes += skb->len;
2890 		dev_kfree_skb_any(skb);
2891 		tx_pkt++;
2892 		if (tx_pkt == budget)
2893 			break;
2894 
2895 		if (hw_cons == sw_cons)
2896 			hw_cons = bnx2_get_hw_tx_cons(bnapi);
2897 	}
2898 
2899 	netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2900 	txr->hw_tx_cons = hw_cons;
2901 	txr->tx_cons = sw_cons;
2902 
2903 	/* Need to make the tx_cons update visible to bnx2_start_xmit()
2904 	 * before checking for netif_tx_queue_stopped().  Without the
2905 	 * memory barrier, there is a small possibility that bnx2_start_xmit()
2906 	 * will miss it and cause the queue to be stopped forever.
2907 	 */
2908 	smp_mb();
2909 
2910 	if (unlikely(netif_tx_queue_stopped(txq)) &&
2911 		     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2912 		__netif_tx_lock(txq, smp_processor_id());
2913 		if ((netif_tx_queue_stopped(txq)) &&
2914 		    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2915 			netif_tx_wake_queue(txq);
2916 		__netif_tx_unlock(txq);
2917 	}
2918 
2919 	return tx_pkt;
2920 }
2921 
2922 static void
2923 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2924 			struct sk_buff *skb, int count)
2925 {
2926 	struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2927 	struct bnx2_rx_bd *cons_bd, *prod_bd;
2928 	int i;
2929 	u16 hw_prod, prod;
2930 	u16 cons = rxr->rx_pg_cons;
2931 
2932 	cons_rx_pg = &rxr->rx_pg_ring[cons];
2933 
2934 	/* The caller was unable to allocate a new page to replace the
2935 	 * last one in the frags array, so we need to recycle that page
2936 	 * and then free the skb.
2937 	 */
2938 	if (skb) {
2939 		struct page *page;
2940 		struct skb_shared_info *shinfo;
2941 
2942 		shinfo = skb_shinfo(skb);
2943 		shinfo->nr_frags--;
2944 		page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2945 		__skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2946 
2947 		cons_rx_pg->page = page;
2948 		dev_kfree_skb(skb);
2949 	}
2950 
2951 	hw_prod = rxr->rx_pg_prod;
2952 
2953 	for (i = 0; i < count; i++) {
2954 		prod = BNX2_RX_PG_RING_IDX(hw_prod);
2955 
2956 		prod_rx_pg = &rxr->rx_pg_ring[prod];
2957 		cons_rx_pg = &rxr->rx_pg_ring[cons];
2958 		cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2959 						[BNX2_RX_IDX(cons)];
2960 		prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2961 						[BNX2_RX_IDX(prod)];
2962 
2963 		if (prod != cons) {
2964 			prod_rx_pg->page = cons_rx_pg->page;
2965 			cons_rx_pg->page = NULL;
2966 			dma_unmap_addr_set(prod_rx_pg, mapping,
2967 				dma_unmap_addr(cons_rx_pg, mapping));
2968 
2969 			prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2970 			prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2971 
2972 		}
2973 		cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2974 		hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2975 	}
2976 	rxr->rx_pg_prod = hw_prod;
2977 	rxr->rx_pg_cons = cons;
2978 }
2979 
2980 static inline void
2981 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2982 		   u8 *data, u16 cons, u16 prod)
2983 {
2984 	struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2985 	struct bnx2_rx_bd *cons_bd, *prod_bd;
2986 
2987 	cons_rx_buf = &rxr->rx_buf_ring[cons];
2988 	prod_rx_buf = &rxr->rx_buf_ring[prod];
2989 
2990 	dma_sync_single_for_device(&bp->pdev->dev,
2991 		dma_unmap_addr(cons_rx_buf, mapping),
2992 		BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2993 
2994 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2995 
2996 	prod_rx_buf->data = data;
2997 
2998 	if (cons == prod)
2999 		return;
3000 
3001 	dma_unmap_addr_set(prod_rx_buf, mapping,
3002 			dma_unmap_addr(cons_rx_buf, mapping));
3003 
3004 	cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3005 	prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3006 	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3007 	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3008 }
3009 
3010 static struct sk_buff *
3011 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3012 	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3013 	    u32 ring_idx)
3014 {
3015 	int err;
3016 	u16 prod = ring_idx & 0xffff;
3017 	struct sk_buff *skb;
3018 
3019 	err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3020 	if (unlikely(err)) {
3021 		bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3022 error:
3023 		if (hdr_len) {
3024 			unsigned int raw_len = len + 4;
3025 			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3026 
3027 			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3028 		}
3029 		return NULL;
3030 	}
3031 
3032 	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3033 			 PCI_DMA_FROMDEVICE);
3034 	skb = build_skb(data, 0);
3035 	if (!skb) {
3036 		kfree(data);
3037 		goto error;
3038 	}
3039 	skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3040 	if (hdr_len == 0) {
3041 		skb_put(skb, len);
3042 		return skb;
3043 	} else {
3044 		unsigned int i, frag_len, frag_size, pages;
3045 		struct bnx2_sw_pg *rx_pg;
3046 		u16 pg_cons = rxr->rx_pg_cons;
3047 		u16 pg_prod = rxr->rx_pg_prod;
3048 
3049 		frag_size = len + 4 - hdr_len;
3050 		pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3051 		skb_put(skb, hdr_len);
3052 
3053 		for (i = 0; i < pages; i++) {
3054 			dma_addr_t mapping_old;
3055 
3056 			frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3057 			if (unlikely(frag_len <= 4)) {
3058 				unsigned int tail = 4 - frag_len;
3059 
3060 				rxr->rx_pg_cons = pg_cons;
3061 				rxr->rx_pg_prod = pg_prod;
3062 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3063 							pages - i);
3064 				skb->len -= tail;
3065 				if (i == 0) {
3066 					skb->tail -= tail;
3067 				} else {
3068 					skb_frag_t *frag =
3069 						&skb_shinfo(skb)->frags[i - 1];
3070 					skb_frag_size_sub(frag, tail);
3071 					skb->data_len -= tail;
3072 				}
3073 				return skb;
3074 			}
3075 			rx_pg = &rxr->rx_pg_ring[pg_cons];
3076 
3077 			/* Don't unmap yet.  If we're unable to allocate a new
3078 			 * page, we need to recycle the page and the DMA addr.
3079 			 */
3080 			mapping_old = dma_unmap_addr(rx_pg, mapping);
3081 			if (i == pages - 1)
3082 				frag_len -= 4;
3083 
3084 			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3085 			rx_pg->page = NULL;
3086 
3087 			err = bnx2_alloc_rx_page(bp, rxr,
3088 						 BNX2_RX_PG_RING_IDX(pg_prod),
3089 						 GFP_ATOMIC);
3090 			if (unlikely(err)) {
3091 				rxr->rx_pg_cons = pg_cons;
3092 				rxr->rx_pg_prod = pg_prod;
3093 				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3094 							pages - i);
3095 				return NULL;
3096 			}
3097 
3098 			dma_unmap_page(&bp->pdev->dev, mapping_old,
3099 				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
3100 
3101 			frag_size -= frag_len;
3102 			skb->data_len += frag_len;
3103 			skb->truesize += PAGE_SIZE;
3104 			skb->len += frag_len;
3105 
3106 			pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3107 			pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3108 		}
3109 		rxr->rx_pg_prod = pg_prod;
3110 		rxr->rx_pg_cons = pg_cons;
3111 	}
3112 	return skb;
3113 }
3114 
3115 static inline u16
3116 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3117 {
3118 	u16 cons;
3119 
3120 	/* Tell compiler that status block fields can change. */
3121 	barrier();
3122 	cons = *bnapi->hw_rx_cons_ptr;
3123 	barrier();
3124 	if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3125 		cons++;
3126 	return cons;
3127 }
3128 
3129 static int
3130 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3131 {
3132 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3133 	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3134 	struct l2_fhdr *rx_hdr;
3135 	int rx_pkt = 0, pg_ring_used = 0;
3136 
3137 	if (budget <= 0)
3138 		return rx_pkt;
3139 
3140 	hw_cons = bnx2_get_hw_rx_cons(bnapi);
3141 	sw_cons = rxr->rx_cons;
3142 	sw_prod = rxr->rx_prod;
3143 
3144 	/* Memory barrier necessary as speculative reads of the rx
3145 	 * buffer can be ahead of the index in the status block
3146 	 */
3147 	rmb();
3148 	while (sw_cons != hw_cons) {
3149 		unsigned int len, hdr_len;
3150 		u32 status;
3151 		struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3152 		struct sk_buff *skb;
3153 		dma_addr_t dma_addr;
3154 		u8 *data;
3155 		u16 next_ring_idx;
3156 
3157 		sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3158 		sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3159 
3160 		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3161 		data = rx_buf->data;
3162 		rx_buf->data = NULL;
3163 
3164 		rx_hdr = get_l2_fhdr(data);
3165 		prefetch(rx_hdr);
3166 
3167 		dma_addr = dma_unmap_addr(rx_buf, mapping);
3168 
3169 		dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3170 			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3171 			PCI_DMA_FROMDEVICE);
3172 
3173 		next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3174 		next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3175 		prefetch(get_l2_fhdr(next_rx_buf->data));
3176 
3177 		len = rx_hdr->l2_fhdr_pkt_len;
3178 		status = rx_hdr->l2_fhdr_status;
3179 
3180 		hdr_len = 0;
3181 		if (status & L2_FHDR_STATUS_SPLIT) {
3182 			hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3183 			pg_ring_used = 1;
3184 		} else if (len > bp->rx_jumbo_thresh) {
3185 			hdr_len = bp->rx_jumbo_thresh;
3186 			pg_ring_used = 1;
3187 		}
3188 
3189 		if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3190 				       L2_FHDR_ERRORS_PHY_DECODE |
3191 				       L2_FHDR_ERRORS_ALIGNMENT |
3192 				       L2_FHDR_ERRORS_TOO_SHORT |
3193 				       L2_FHDR_ERRORS_GIANT_FRAME))) {
3194 
3195 			bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3196 					  sw_ring_prod);
3197 			if (pg_ring_used) {
3198 				int pages;
3199 
3200 				pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3201 
3202 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3203 			}
3204 			goto next_rx;
3205 		}
3206 
3207 		len -= 4;
3208 
3209 		if (len <= bp->rx_copy_thresh) {
3210 			skb = netdev_alloc_skb(bp->dev, len + 6);
3211 			if (skb == NULL) {
3212 				bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3213 						  sw_ring_prod);
3214 				goto next_rx;
3215 			}
3216 
3217 			/* aligned copy */
3218 			memcpy(skb->data,
3219 			       (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3220 			       len + 6);
3221 			skb_reserve(skb, 6);
3222 			skb_put(skb, len);
3223 
3224 			bnx2_reuse_rx_data(bp, rxr, data,
3225 				sw_ring_cons, sw_ring_prod);
3226 
3227 		} else {
3228 			skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3229 					  (sw_ring_cons << 16) | sw_ring_prod);
3230 			if (!skb)
3231 				goto next_rx;
3232 		}
3233 		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3234 		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3235 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3236 
3237 		skb->protocol = eth_type_trans(skb, bp->dev);
3238 
3239 		if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3240 			(ntohs(skb->protocol) != 0x8100)) {
3241 
3242 			dev_kfree_skb(skb);
3243 			goto next_rx;
3244 
3245 		}
3246 
3247 		skb_checksum_none_assert(skb);
3248 		if ((bp->dev->features & NETIF_F_RXCSUM) &&
3249 			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
3250 			L2_FHDR_STATUS_UDP_DATAGRAM))) {
3251 
3252 			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3253 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3254 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3255 		}
3256 		if ((bp->dev->features & NETIF_F_RXHASH) &&
3257 		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3258 		     L2_FHDR_STATUS_USE_RXHASH))
3259 			skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3260 				     PKT_HASH_TYPE_L3);
3261 
3262 		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3263 		napi_gro_receive(&bnapi->napi, skb);
3264 		rx_pkt++;
3265 
3266 next_rx:
3267 		sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3268 		sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3269 
3270 		if ((rx_pkt == budget))
3271 			break;
3272 
3273 		/* Refresh hw_cons to see if there is new work */
3274 		if (sw_cons == hw_cons) {
3275 			hw_cons = bnx2_get_hw_rx_cons(bnapi);
3276 			rmb();
3277 		}
3278 	}
3279 	rxr->rx_cons = sw_cons;
3280 	rxr->rx_prod = sw_prod;
3281 
3282 	if (pg_ring_used)
3283 		BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3284 
3285 	BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3286 
3287 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3288 
3289 	mmiowb();
3290 
3291 	return rx_pkt;
3292 
3293 }
3294 
3295 /* MSI ISR - The only difference between this and the INTx ISR
3296  * is that the MSI interrupt is always serviced.
3297  */
3298 static irqreturn_t
3299 bnx2_msi(int irq, void *dev_instance)
3300 {
3301 	struct bnx2_napi *bnapi = dev_instance;
3302 	struct bnx2 *bp = bnapi->bp;
3303 
3304 	prefetch(bnapi->status_blk.msi);
3305 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3306 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3307 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3308 
3309 	/* Return here if interrupt is disabled. */
3310 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3311 		return IRQ_HANDLED;
3312 
3313 	napi_schedule(&bnapi->napi);
3314 
3315 	return IRQ_HANDLED;
3316 }
3317 
3318 static irqreturn_t
3319 bnx2_msi_1shot(int irq, void *dev_instance)
3320 {
3321 	struct bnx2_napi *bnapi = dev_instance;
3322 	struct bnx2 *bp = bnapi->bp;
3323 
3324 	prefetch(bnapi->status_blk.msi);
3325 
3326 	/* Return here if interrupt is disabled. */
3327 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3328 		return IRQ_HANDLED;
3329 
3330 	napi_schedule(&bnapi->napi);
3331 
3332 	return IRQ_HANDLED;
3333 }
3334 
3335 static irqreturn_t
3336 bnx2_interrupt(int irq, void *dev_instance)
3337 {
3338 	struct bnx2_napi *bnapi = dev_instance;
3339 	struct bnx2 *bp = bnapi->bp;
3340 	struct status_block *sblk = bnapi->status_blk.msi;
3341 
3342 	/* When using INTx, it is possible for the interrupt to arrive
3343 	 * at the CPU before the status block posted prior to the
3344 	 * interrupt. Reading a register will flush the status block.
3345 	 * When using MSI, the MSI message will always complete after
3346 	 * the status block write.
3347 	 */
3348 	if ((sblk->status_idx == bnapi->last_status_idx) &&
3349 	    (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3350 	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3351 		return IRQ_NONE;
3352 
3353 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3354 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3355 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3356 
3357 	/* Read back to deassert IRQ immediately to avoid too many
3358 	 * spurious interrupts.
3359 	 */
3360 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3361 
3362 	/* Return here if interrupt is shared and is disabled. */
3363 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3364 		return IRQ_HANDLED;
3365 
3366 	if (napi_schedule_prep(&bnapi->napi)) {
3367 		bnapi->last_status_idx = sblk->status_idx;
3368 		__napi_schedule(&bnapi->napi);
3369 	}
3370 
3371 	return IRQ_HANDLED;
3372 }
3373 
3374 static inline int
3375 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3376 {
3377 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3378 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3379 
3380 	if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3381 	    (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3382 		return 1;
3383 	return 0;
3384 }
3385 
3386 #define STATUS_ATTN_EVENTS	(STATUS_ATTN_BITS_LINK_STATE | \
3387 				 STATUS_ATTN_BITS_TIMER_ABORT)
3388 
3389 static inline int
3390 bnx2_has_work(struct bnx2_napi *bnapi)
3391 {
3392 	struct status_block *sblk = bnapi->status_blk.msi;
3393 
3394 	if (bnx2_has_fast_work(bnapi))
3395 		return 1;
3396 
3397 #ifdef BCM_CNIC
3398 	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3399 		return 1;
3400 #endif
3401 
3402 	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3403 	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3404 		return 1;
3405 
3406 	return 0;
3407 }
3408 
3409 static void
3410 bnx2_chk_missed_msi(struct bnx2 *bp)
3411 {
3412 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3413 	u32 msi_ctrl;
3414 
3415 	if (bnx2_has_work(bnapi)) {
3416 		msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3417 		if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3418 			return;
3419 
3420 		if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3421 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3422 				~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3423 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3424 			bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3425 		}
3426 	}
3427 
3428 	bp->idle_chk_status_idx = bnapi->last_status_idx;
3429 }
3430 
3431 #ifdef BCM_CNIC
3432 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3433 {
3434 	struct cnic_ops *c_ops;
3435 
3436 	if (!bnapi->cnic_present)
3437 		return;
3438 
3439 	rcu_read_lock();
3440 	c_ops = rcu_dereference(bp->cnic_ops);
3441 	if (c_ops)
3442 		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3443 						      bnapi->status_blk.msi);
3444 	rcu_read_unlock();
3445 }
3446 #endif
3447 
3448 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3449 {
3450 	struct status_block *sblk = bnapi->status_blk.msi;
3451 	u32 status_attn_bits = sblk->status_attn_bits;
3452 	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3453 
3454 	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3455 	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3456 
3457 		bnx2_phy_int(bp, bnapi);
3458 
3459 		/* This is needed to take care of transient status
3460 		 * during link changes.
3461 		 */
3462 		BNX2_WR(bp, BNX2_HC_COMMAND,
3463 			bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3464 		BNX2_RD(bp, BNX2_HC_COMMAND);
3465 	}
3466 }
3467 
3468 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3469 			  int work_done, int budget)
3470 {
3471 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3472 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3473 
3474 	if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3475 		bnx2_tx_int(bp, bnapi, 0);
3476 
3477 	if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3478 		work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3479 
3480 	return work_done;
3481 }
3482 
3483 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3484 {
3485 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3486 	struct bnx2 *bp = bnapi->bp;
3487 	int work_done = 0;
3488 	struct status_block_msix *sblk = bnapi->status_blk.msix;
3489 
3490 	while (1) {
3491 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3492 		if (unlikely(work_done >= budget))
3493 			break;
3494 
3495 		bnapi->last_status_idx = sblk->status_idx;
3496 		/* status idx must be read before checking for more work. */
3497 		rmb();
3498 		if (likely(!bnx2_has_fast_work(bnapi))) {
3499 
3500 			napi_complete(napi);
3501 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3502 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3503 				bnapi->last_status_idx);
3504 			break;
3505 		}
3506 	}
3507 	return work_done;
3508 }
3509 
3510 static int bnx2_poll(struct napi_struct *napi, int budget)
3511 {
3512 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3513 	struct bnx2 *bp = bnapi->bp;
3514 	int work_done = 0;
3515 	struct status_block *sblk = bnapi->status_blk.msi;
3516 
3517 	while (1) {
3518 		bnx2_poll_link(bp, bnapi);
3519 
3520 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3521 
3522 #ifdef BCM_CNIC
3523 		bnx2_poll_cnic(bp, bnapi);
3524 #endif
3525 
3526 		/* bnapi->last_status_idx is used below to tell the hw how
3527 		 * much work has been processed, so we must read it before
3528 		 * checking for more work.
3529 		 */
3530 		bnapi->last_status_idx = sblk->status_idx;
3531 
3532 		if (unlikely(work_done >= budget))
3533 			break;
3534 
3535 		rmb();
3536 		if (likely(!bnx2_has_work(bnapi))) {
3537 			napi_complete(napi);
3538 			if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3539 				BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3540 					BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3541 					bnapi->last_status_idx);
3542 				break;
3543 			}
3544 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3545 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3546 				BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3547 				bnapi->last_status_idx);
3548 
3549 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3550 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3551 				bnapi->last_status_idx);
3552 			break;
3553 		}
3554 	}
3555 
3556 	return work_done;
3557 }
3558 
3559 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3560  * from set_multicast.
3561  */
3562 static void
3563 bnx2_set_rx_mode(struct net_device *dev)
3564 {
3565 	struct bnx2 *bp = netdev_priv(dev);
3566 	u32 rx_mode, sort_mode;
3567 	struct netdev_hw_addr *ha;
3568 	int i;
3569 
3570 	if (!netif_running(dev))
3571 		return;
3572 
3573 	spin_lock_bh(&bp->phy_lock);
3574 
3575 	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3576 				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3577 	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3578 	if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3579 	     (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3580 		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3581 	if (dev->flags & IFF_PROMISC) {
3582 		/* Promiscuous mode. */
3583 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3584 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3585 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3586 	}
3587 	else if (dev->flags & IFF_ALLMULTI) {
3588 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3589 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3590 				0xffffffff);
3591         	}
3592 		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3593 	}
3594 	else {
3595 		/* Accept one or more multicast(s). */
3596 		u32 mc_filter[NUM_MC_HASH_REGISTERS];
3597 		u32 regidx;
3598 		u32 bit;
3599 		u32 crc;
3600 
3601 		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3602 
3603 		netdev_for_each_mc_addr(ha, dev) {
3604 			crc = ether_crc_le(ETH_ALEN, ha->addr);
3605 			bit = crc & 0xff;
3606 			regidx = (bit & 0xe0) >> 5;
3607 			bit &= 0x1f;
3608 			mc_filter[regidx] |= (1 << bit);
3609 		}
3610 
3611 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3612 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3613 				mc_filter[i]);
3614 		}
3615 
3616 		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3617 	}
3618 
3619 	if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3620 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3621 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3622 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3623 	} else if (!(dev->flags & IFF_PROMISC)) {
3624 		/* Add all entries into to the match filter list */
3625 		i = 0;
3626 		netdev_for_each_uc_addr(ha, dev) {
3627 			bnx2_set_mac_addr(bp, ha->addr,
3628 					  i + BNX2_START_UNICAST_ADDRESS_INDEX);
3629 			sort_mode |= (1 <<
3630 				      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3631 			i++;
3632 		}
3633 
3634 	}
3635 
3636 	if (rx_mode != bp->rx_mode) {
3637 		bp->rx_mode = rx_mode;
3638 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3639 	}
3640 
3641 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3642 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3643 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3644 
3645 	spin_unlock_bh(&bp->phy_lock);
3646 }
3647 
3648 static int
3649 check_fw_section(const struct firmware *fw,
3650 		 const struct bnx2_fw_file_section *section,
3651 		 u32 alignment, bool non_empty)
3652 {
3653 	u32 offset = be32_to_cpu(section->offset);
3654 	u32 len = be32_to_cpu(section->len);
3655 
3656 	if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3657 		return -EINVAL;
3658 	if ((non_empty && len == 0) || len > fw->size - offset ||
3659 	    len & (alignment - 1))
3660 		return -EINVAL;
3661 	return 0;
3662 }
3663 
3664 static int
3665 check_mips_fw_entry(const struct firmware *fw,
3666 		    const struct bnx2_mips_fw_file_entry *entry)
3667 {
3668 	if (check_fw_section(fw, &entry->text, 4, true) ||
3669 	    check_fw_section(fw, &entry->data, 4, false) ||
3670 	    check_fw_section(fw, &entry->rodata, 4, false))
3671 		return -EINVAL;
3672 	return 0;
3673 }
3674 
3675 static void bnx2_release_firmware(struct bnx2 *bp)
3676 {
3677 	if (bp->rv2p_firmware) {
3678 		release_firmware(bp->mips_firmware);
3679 		release_firmware(bp->rv2p_firmware);
3680 		bp->rv2p_firmware = NULL;
3681 	}
3682 }
3683 
3684 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3685 {
3686 	const char *mips_fw_file, *rv2p_fw_file;
3687 	const struct bnx2_mips_fw_file *mips_fw;
3688 	const struct bnx2_rv2p_fw_file *rv2p_fw;
3689 	int rc;
3690 
3691 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3692 		mips_fw_file = FW_MIPS_FILE_09;
3693 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3694 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3695 			rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3696 		else
3697 			rv2p_fw_file = FW_RV2P_FILE_09;
3698 	} else {
3699 		mips_fw_file = FW_MIPS_FILE_06;
3700 		rv2p_fw_file = FW_RV2P_FILE_06;
3701 	}
3702 
3703 	rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3704 	if (rc) {
3705 		pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3706 		goto out;
3707 	}
3708 
3709 	rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3710 	if (rc) {
3711 		pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3712 		goto err_release_mips_firmware;
3713 	}
3714 	mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3715 	rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3716 	if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3717 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3718 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3719 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3720 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3721 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3722 		pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3723 		rc = -EINVAL;
3724 		goto err_release_firmware;
3725 	}
3726 	if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3727 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3728 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3729 		pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3730 		rc = -EINVAL;
3731 		goto err_release_firmware;
3732 	}
3733 out:
3734 	return rc;
3735 
3736 err_release_firmware:
3737 	release_firmware(bp->rv2p_firmware);
3738 	bp->rv2p_firmware = NULL;
3739 err_release_mips_firmware:
3740 	release_firmware(bp->mips_firmware);
3741 	goto out;
3742 }
3743 
3744 static int bnx2_request_firmware(struct bnx2 *bp)
3745 {
3746 	return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3747 }
3748 
3749 static u32
3750 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3751 {
3752 	switch (idx) {
3753 	case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3754 		rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3755 		rv2p_code |= RV2P_BD_PAGE_SIZE;
3756 		break;
3757 	}
3758 	return rv2p_code;
3759 }
3760 
3761 static int
3762 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3763 	     const struct bnx2_rv2p_fw_file_entry *fw_entry)
3764 {
3765 	u32 rv2p_code_len, file_offset;
3766 	__be32 *rv2p_code;
3767 	int i;
3768 	u32 val, cmd, addr;
3769 
3770 	rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3771 	file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3772 
3773 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3774 
3775 	if (rv2p_proc == RV2P_PROC1) {
3776 		cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3777 		addr = BNX2_RV2P_PROC1_ADDR_CMD;
3778 	} else {
3779 		cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3780 		addr = BNX2_RV2P_PROC2_ADDR_CMD;
3781 	}
3782 
3783 	for (i = 0; i < rv2p_code_len; i += 8) {
3784 		BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3785 		rv2p_code++;
3786 		BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3787 		rv2p_code++;
3788 
3789 		val = (i / 8) | cmd;
3790 		BNX2_WR(bp, addr, val);
3791 	}
3792 
3793 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3794 	for (i = 0; i < 8; i++) {
3795 		u32 loc, code;
3796 
3797 		loc = be32_to_cpu(fw_entry->fixup[i]);
3798 		if (loc && ((loc * 4) < rv2p_code_len)) {
3799 			code = be32_to_cpu(*(rv2p_code + loc - 1));
3800 			BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3801 			code = be32_to_cpu(*(rv2p_code + loc));
3802 			code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3803 			BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3804 
3805 			val = (loc / 2) | cmd;
3806 			BNX2_WR(bp, addr, val);
3807 		}
3808 	}
3809 
3810 	/* Reset the processor, un-stall is done later. */
3811 	if (rv2p_proc == RV2P_PROC1) {
3812 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3813 	}
3814 	else {
3815 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3816 	}
3817 
3818 	return 0;
3819 }
3820 
3821 static int
3822 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3823 	    const struct bnx2_mips_fw_file_entry *fw_entry)
3824 {
3825 	u32 addr, len, file_offset;
3826 	__be32 *data;
3827 	u32 offset;
3828 	u32 val;
3829 
3830 	/* Halt the CPU. */
3831 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3832 	val |= cpu_reg->mode_value_halt;
3833 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3834 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3835 
3836 	/* Load the Text area. */
3837 	addr = be32_to_cpu(fw_entry->text.addr);
3838 	len = be32_to_cpu(fw_entry->text.len);
3839 	file_offset = be32_to_cpu(fw_entry->text.offset);
3840 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3841 
3842 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3843 	if (len) {
3844 		int j;
3845 
3846 		for (j = 0; j < (len / 4); j++, offset += 4)
3847 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3848 	}
3849 
3850 	/* Load the Data area. */
3851 	addr = be32_to_cpu(fw_entry->data.addr);
3852 	len = be32_to_cpu(fw_entry->data.len);
3853 	file_offset = be32_to_cpu(fw_entry->data.offset);
3854 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3855 
3856 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3857 	if (len) {
3858 		int j;
3859 
3860 		for (j = 0; j < (len / 4); j++, offset += 4)
3861 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3862 	}
3863 
3864 	/* Load the Read-Only area. */
3865 	addr = be32_to_cpu(fw_entry->rodata.addr);
3866 	len = be32_to_cpu(fw_entry->rodata.len);
3867 	file_offset = be32_to_cpu(fw_entry->rodata.offset);
3868 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3869 
3870 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3871 	if (len) {
3872 		int j;
3873 
3874 		for (j = 0; j < (len / 4); j++, offset += 4)
3875 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3876 	}
3877 
3878 	/* Clear the pre-fetch instruction. */
3879 	bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3880 
3881 	val = be32_to_cpu(fw_entry->start_addr);
3882 	bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3883 
3884 	/* Start the CPU. */
3885 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3886 	val &= ~cpu_reg->mode_value_halt;
3887 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3888 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3889 
3890 	return 0;
3891 }
3892 
3893 static int
3894 bnx2_init_cpus(struct bnx2 *bp)
3895 {
3896 	const struct bnx2_mips_fw_file *mips_fw =
3897 		(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3898 	const struct bnx2_rv2p_fw_file *rv2p_fw =
3899 		(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3900 	int rc;
3901 
3902 	/* Initialize the RV2P processor. */
3903 	load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3904 	load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3905 
3906 	/* Initialize the RX Processor. */
3907 	rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3908 	if (rc)
3909 		goto init_cpu_err;
3910 
3911 	/* Initialize the TX Processor. */
3912 	rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3913 	if (rc)
3914 		goto init_cpu_err;
3915 
3916 	/* Initialize the TX Patch-up Processor. */
3917 	rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3918 	if (rc)
3919 		goto init_cpu_err;
3920 
3921 	/* Initialize the Completion Processor. */
3922 	rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3923 	if (rc)
3924 		goto init_cpu_err;
3925 
3926 	/* Initialize the Command Processor. */
3927 	rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3928 
3929 init_cpu_err:
3930 	return rc;
3931 }
3932 
3933 static void
3934 bnx2_setup_wol(struct bnx2 *bp)
3935 {
3936 	int i;
3937 	u32 val, wol_msg;
3938 
3939 	if (bp->wol) {
3940 		u32 advertising;
3941 		u8 autoneg;
3942 
3943 		autoneg = bp->autoneg;
3944 		advertising = bp->advertising;
3945 
3946 		if (bp->phy_port == PORT_TP) {
3947 			bp->autoneg = AUTONEG_SPEED;
3948 			bp->advertising = ADVERTISED_10baseT_Half |
3949 				ADVERTISED_10baseT_Full |
3950 				ADVERTISED_100baseT_Half |
3951 				ADVERTISED_100baseT_Full |
3952 				ADVERTISED_Autoneg;
3953 		}
3954 
3955 		spin_lock_bh(&bp->phy_lock);
3956 		bnx2_setup_phy(bp, bp->phy_port);
3957 		spin_unlock_bh(&bp->phy_lock);
3958 
3959 		bp->autoneg = autoneg;
3960 		bp->advertising = advertising;
3961 
3962 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3963 
3964 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
3965 
3966 		/* Enable port mode. */
3967 		val &= ~BNX2_EMAC_MODE_PORT;
3968 		val |= BNX2_EMAC_MODE_MPKT_RCVD |
3969 		       BNX2_EMAC_MODE_ACPI_RCVD |
3970 		       BNX2_EMAC_MODE_MPKT;
3971 		if (bp->phy_port == PORT_TP) {
3972 			val |= BNX2_EMAC_MODE_PORT_MII;
3973 		} else {
3974 			val |= BNX2_EMAC_MODE_PORT_GMII;
3975 			if (bp->line_speed == SPEED_2500)
3976 				val |= BNX2_EMAC_MODE_25G_MODE;
3977 		}
3978 
3979 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
3980 
3981 		/* receive all multicast */
3982 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3983 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3984 				0xffffffff);
3985 		}
3986 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3987 
3988 		val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
3989 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3990 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
3991 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
3992 
3993 		/* Need to enable EMAC and RPM for WOL. */
3994 		BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3995 			BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3996 			BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3997 			BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3998 
3999 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4000 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4001 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4002 
4003 		wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4004 	} else {
4005 			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4006 	}
4007 
4008 	if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4009 		u32 val;
4010 
4011 		wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4012 		if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4013 			bnx2_fw_sync(bp, wol_msg, 1, 0);
4014 			return;
4015 		}
4016 		/* Tell firmware not to power down the PHY yet, otherwise
4017 		 * the chip will take a long time to respond to MMIO reads.
4018 		 */
4019 		val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4020 		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4021 			      val | BNX2_PORT_FEATURE_ASF_ENABLED);
4022 		bnx2_fw_sync(bp, wol_msg, 1, 0);
4023 		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4024 	}
4025 
4026 }
4027 
4028 static int
4029 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4030 {
4031 	switch (state) {
4032 	case PCI_D0: {
4033 		u32 val;
4034 
4035 		pci_enable_wake(bp->pdev, PCI_D0, false);
4036 		pci_set_power_state(bp->pdev, PCI_D0);
4037 
4038 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
4039 		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4040 		val &= ~BNX2_EMAC_MODE_MPKT;
4041 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
4042 
4043 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4044 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4045 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4046 		break;
4047 	}
4048 	case PCI_D3hot: {
4049 		bnx2_setup_wol(bp);
4050 		pci_wake_from_d3(bp->pdev, bp->wol);
4051 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4052 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4053 
4054 			if (bp->wol)
4055 				pci_set_power_state(bp->pdev, PCI_D3hot);
4056 			break;
4057 
4058 		}
4059 		if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4060 			u32 val;
4061 
4062 			/* Tell firmware not to power down the PHY yet,
4063 			 * otherwise the other port may not respond to
4064 			 * MMIO reads.
4065 			 */
4066 			val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4067 			val &= ~BNX2_CONDITION_PM_STATE_MASK;
4068 			val |= BNX2_CONDITION_PM_STATE_UNPREP;
4069 			bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4070 		}
4071 		pci_set_power_state(bp->pdev, PCI_D3hot);
4072 
4073 		/* No more memory access after this point until
4074 		 * device is brought back to D0.
4075 		 */
4076 		break;
4077 	}
4078 	default:
4079 		return -EINVAL;
4080 	}
4081 	return 0;
4082 }
4083 
4084 static int
4085 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4086 {
4087 	u32 val;
4088 	int j;
4089 
4090 	/* Request access to the flash interface. */
4091 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4092 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4093 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4094 		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4095 			break;
4096 
4097 		udelay(5);
4098 	}
4099 
4100 	if (j >= NVRAM_TIMEOUT_COUNT)
4101 		return -EBUSY;
4102 
4103 	return 0;
4104 }
4105 
4106 static int
4107 bnx2_release_nvram_lock(struct bnx2 *bp)
4108 {
4109 	int j;
4110 	u32 val;
4111 
4112 	/* Relinquish nvram interface. */
4113 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4114 
4115 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4116 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4117 		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4118 			break;
4119 
4120 		udelay(5);
4121 	}
4122 
4123 	if (j >= NVRAM_TIMEOUT_COUNT)
4124 		return -EBUSY;
4125 
4126 	return 0;
4127 }
4128 
4129 
4130 static int
4131 bnx2_enable_nvram_write(struct bnx2 *bp)
4132 {
4133 	u32 val;
4134 
4135 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4136 	BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4137 
4138 	if (bp->flash_info->flags & BNX2_NV_WREN) {
4139 		int j;
4140 
4141 		BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4142 		BNX2_WR(bp, BNX2_NVM_COMMAND,
4143 			BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4144 
4145 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4146 			udelay(5);
4147 
4148 			val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4149 			if (val & BNX2_NVM_COMMAND_DONE)
4150 				break;
4151 		}
4152 
4153 		if (j >= NVRAM_TIMEOUT_COUNT)
4154 			return -EBUSY;
4155 	}
4156 	return 0;
4157 }
4158 
4159 static void
4160 bnx2_disable_nvram_write(struct bnx2 *bp)
4161 {
4162 	u32 val;
4163 
4164 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4165 	BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4166 }
4167 
4168 
4169 static void
4170 bnx2_enable_nvram_access(struct bnx2 *bp)
4171 {
4172 	u32 val;
4173 
4174 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4175 	/* Enable both bits, even on read. */
4176 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4177 		val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4178 }
4179 
4180 static void
4181 bnx2_disable_nvram_access(struct bnx2 *bp)
4182 {
4183 	u32 val;
4184 
4185 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4186 	/* Disable both bits, even after read. */
4187 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4188 		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4189 			BNX2_NVM_ACCESS_ENABLE_WR_EN));
4190 }
4191 
4192 static int
4193 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4194 {
4195 	u32 cmd;
4196 	int j;
4197 
4198 	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4199 		/* Buffered flash, no erase needed */
4200 		return 0;
4201 
4202 	/* Build an erase command */
4203 	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4204 	      BNX2_NVM_COMMAND_DOIT;
4205 
4206 	/* Need to clear DONE bit separately. */
4207 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4208 
4209 	/* Address of the NVRAM to read from. */
4210 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4211 
4212 	/* Issue an erase command. */
4213 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4214 
4215 	/* Wait for completion. */
4216 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4217 		u32 val;
4218 
4219 		udelay(5);
4220 
4221 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4222 		if (val & BNX2_NVM_COMMAND_DONE)
4223 			break;
4224 	}
4225 
4226 	if (j >= NVRAM_TIMEOUT_COUNT)
4227 		return -EBUSY;
4228 
4229 	return 0;
4230 }
4231 
4232 static int
4233 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4234 {
4235 	u32 cmd;
4236 	int j;
4237 
4238 	/* Build the command word. */
4239 	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4240 
4241 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4242 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4243 		offset = ((offset / bp->flash_info->page_size) <<
4244 			   bp->flash_info->page_bits) +
4245 			  (offset % bp->flash_info->page_size);
4246 	}
4247 
4248 	/* Need to clear DONE bit separately. */
4249 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4250 
4251 	/* Address of the NVRAM to read from. */
4252 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4253 
4254 	/* Issue a read command. */
4255 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4256 
4257 	/* Wait for completion. */
4258 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4259 		u32 val;
4260 
4261 		udelay(5);
4262 
4263 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4264 		if (val & BNX2_NVM_COMMAND_DONE) {
4265 			__be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4266 			memcpy(ret_val, &v, 4);
4267 			break;
4268 		}
4269 	}
4270 	if (j >= NVRAM_TIMEOUT_COUNT)
4271 		return -EBUSY;
4272 
4273 	return 0;
4274 }
4275 
4276 
4277 static int
4278 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4279 {
4280 	u32 cmd;
4281 	__be32 val32;
4282 	int j;
4283 
4284 	/* Build the command word. */
4285 	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4286 
4287 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4288 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4289 		offset = ((offset / bp->flash_info->page_size) <<
4290 			  bp->flash_info->page_bits) +
4291 			 (offset % bp->flash_info->page_size);
4292 	}
4293 
4294 	/* Need to clear DONE bit separately. */
4295 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4296 
4297 	memcpy(&val32, val, 4);
4298 
4299 	/* Write the data. */
4300 	BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4301 
4302 	/* Address of the NVRAM to write to. */
4303 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4304 
4305 	/* Issue the write command. */
4306 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4307 
4308 	/* Wait for completion. */
4309 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4310 		udelay(5);
4311 
4312 		if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4313 			break;
4314 	}
4315 	if (j >= NVRAM_TIMEOUT_COUNT)
4316 		return -EBUSY;
4317 
4318 	return 0;
4319 }
4320 
4321 static int
4322 bnx2_init_nvram(struct bnx2 *bp)
4323 {
4324 	u32 val;
4325 	int j, entry_count, rc = 0;
4326 	const struct flash_spec *flash;
4327 
4328 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4329 		bp->flash_info = &flash_5709;
4330 		goto get_flash_size;
4331 	}
4332 
4333 	/* Determine the selected interface. */
4334 	val = BNX2_RD(bp, BNX2_NVM_CFG1);
4335 
4336 	entry_count = ARRAY_SIZE(flash_table);
4337 
4338 	if (val & 0x40000000) {
4339 
4340 		/* Flash interface has been reconfigured */
4341 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4342 		     j++, flash++) {
4343 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
4344 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4345 				bp->flash_info = flash;
4346 				break;
4347 			}
4348 		}
4349 	}
4350 	else {
4351 		u32 mask;
4352 		/* Not yet been reconfigured */
4353 
4354 		if (val & (1 << 23))
4355 			mask = FLASH_BACKUP_STRAP_MASK;
4356 		else
4357 			mask = FLASH_STRAP_MASK;
4358 
4359 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4360 			j++, flash++) {
4361 
4362 			if ((val & mask) == (flash->strapping & mask)) {
4363 				bp->flash_info = flash;
4364 
4365 				/* Request access to the flash interface. */
4366 				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4367 					return rc;
4368 
4369 				/* Enable access to flash interface */
4370 				bnx2_enable_nvram_access(bp);
4371 
4372 				/* Reconfigure the flash interface */
4373 				BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4374 				BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4375 				BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4376 				BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4377 
4378 				/* Disable access to flash interface */
4379 				bnx2_disable_nvram_access(bp);
4380 				bnx2_release_nvram_lock(bp);
4381 
4382 				break;
4383 			}
4384 		}
4385 	} /* if (val & 0x40000000) */
4386 
4387 	if (j == entry_count) {
4388 		bp->flash_info = NULL;
4389 		pr_alert("Unknown flash/EEPROM type\n");
4390 		return -ENODEV;
4391 	}
4392 
4393 get_flash_size:
4394 	val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4395 	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4396 	if (val)
4397 		bp->flash_size = val;
4398 	else
4399 		bp->flash_size = bp->flash_info->total_size;
4400 
4401 	return rc;
4402 }
4403 
4404 static int
4405 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4406 		int buf_size)
4407 {
4408 	int rc = 0;
4409 	u32 cmd_flags, offset32, len32, extra;
4410 
4411 	if (buf_size == 0)
4412 		return 0;
4413 
4414 	/* Request access to the flash interface. */
4415 	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4416 		return rc;
4417 
4418 	/* Enable access to flash interface */
4419 	bnx2_enable_nvram_access(bp);
4420 
4421 	len32 = buf_size;
4422 	offset32 = offset;
4423 	extra = 0;
4424 
4425 	cmd_flags = 0;
4426 
4427 	if (offset32 & 3) {
4428 		u8 buf[4];
4429 		u32 pre_len;
4430 
4431 		offset32 &= ~3;
4432 		pre_len = 4 - (offset & 3);
4433 
4434 		if (pre_len >= len32) {
4435 			pre_len = len32;
4436 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4437 				    BNX2_NVM_COMMAND_LAST;
4438 		}
4439 		else {
4440 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4441 		}
4442 
4443 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4444 
4445 		if (rc)
4446 			return rc;
4447 
4448 		memcpy(ret_buf, buf + (offset & 3), pre_len);
4449 
4450 		offset32 += 4;
4451 		ret_buf += pre_len;
4452 		len32 -= pre_len;
4453 	}
4454 	if (len32 & 3) {
4455 		extra = 4 - (len32 & 3);
4456 		len32 = (len32 + 4) & ~3;
4457 	}
4458 
4459 	if (len32 == 4) {
4460 		u8 buf[4];
4461 
4462 		if (cmd_flags)
4463 			cmd_flags = BNX2_NVM_COMMAND_LAST;
4464 		else
4465 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4466 				    BNX2_NVM_COMMAND_LAST;
4467 
4468 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4469 
4470 		memcpy(ret_buf, buf, 4 - extra);
4471 	}
4472 	else if (len32 > 0) {
4473 		u8 buf[4];
4474 
4475 		/* Read the first word. */
4476 		if (cmd_flags)
4477 			cmd_flags = 0;
4478 		else
4479 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4480 
4481 		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4482 
4483 		/* Advance to the next dword. */
4484 		offset32 += 4;
4485 		ret_buf += 4;
4486 		len32 -= 4;
4487 
4488 		while (len32 > 4 && rc == 0) {
4489 			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4490 
4491 			/* Advance to the next dword. */
4492 			offset32 += 4;
4493 			ret_buf += 4;
4494 			len32 -= 4;
4495 		}
4496 
4497 		if (rc)
4498 			return rc;
4499 
4500 		cmd_flags = BNX2_NVM_COMMAND_LAST;
4501 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4502 
4503 		memcpy(ret_buf, buf, 4 - extra);
4504 	}
4505 
4506 	/* Disable access to flash interface */
4507 	bnx2_disable_nvram_access(bp);
4508 
4509 	bnx2_release_nvram_lock(bp);
4510 
4511 	return rc;
4512 }
4513 
4514 static int
4515 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4516 		int buf_size)
4517 {
4518 	u32 written, offset32, len32;
4519 	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4520 	int rc = 0;
4521 	int align_start, align_end;
4522 
4523 	buf = data_buf;
4524 	offset32 = offset;
4525 	len32 = buf_size;
4526 	align_start = align_end = 0;
4527 
4528 	if ((align_start = (offset32 & 3))) {
4529 		offset32 &= ~3;
4530 		len32 += align_start;
4531 		if (len32 < 4)
4532 			len32 = 4;
4533 		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4534 			return rc;
4535 	}
4536 
4537 	if (len32 & 3) {
4538 		align_end = 4 - (len32 & 3);
4539 		len32 += align_end;
4540 		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4541 			return rc;
4542 	}
4543 
4544 	if (align_start || align_end) {
4545 		align_buf = kmalloc(len32, GFP_KERNEL);
4546 		if (align_buf == NULL)
4547 			return -ENOMEM;
4548 		if (align_start) {
4549 			memcpy(align_buf, start, 4);
4550 		}
4551 		if (align_end) {
4552 			memcpy(align_buf + len32 - 4, end, 4);
4553 		}
4554 		memcpy(align_buf + align_start, data_buf, buf_size);
4555 		buf = align_buf;
4556 	}
4557 
4558 	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4559 		flash_buffer = kmalloc(264, GFP_KERNEL);
4560 		if (flash_buffer == NULL) {
4561 			rc = -ENOMEM;
4562 			goto nvram_write_end;
4563 		}
4564 	}
4565 
4566 	written = 0;
4567 	while ((written < len32) && (rc == 0)) {
4568 		u32 page_start, page_end, data_start, data_end;
4569 		u32 addr, cmd_flags;
4570 		int i;
4571 
4572 	        /* Find the page_start addr */
4573 		page_start = offset32 + written;
4574 		page_start -= (page_start % bp->flash_info->page_size);
4575 		/* Find the page_end addr */
4576 		page_end = page_start + bp->flash_info->page_size;
4577 		/* Find the data_start addr */
4578 		data_start = (written == 0) ? offset32 : page_start;
4579 		/* Find the data_end addr */
4580 		data_end = (page_end > offset32 + len32) ?
4581 			(offset32 + len32) : page_end;
4582 
4583 		/* Request access to the flash interface. */
4584 		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4585 			goto nvram_write_end;
4586 
4587 		/* Enable access to flash interface */
4588 		bnx2_enable_nvram_access(bp);
4589 
4590 		cmd_flags = BNX2_NVM_COMMAND_FIRST;
4591 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4592 			int j;
4593 
4594 			/* Read the whole page into the buffer
4595 			 * (non-buffer flash only) */
4596 			for (j = 0; j < bp->flash_info->page_size; j += 4) {
4597 				if (j == (bp->flash_info->page_size - 4)) {
4598 					cmd_flags |= BNX2_NVM_COMMAND_LAST;
4599 				}
4600 				rc = bnx2_nvram_read_dword(bp,
4601 					page_start + j,
4602 					&flash_buffer[j],
4603 					cmd_flags);
4604 
4605 				if (rc)
4606 					goto nvram_write_end;
4607 
4608 				cmd_flags = 0;
4609 			}
4610 		}
4611 
4612 		/* Enable writes to flash interface (unlock write-protect) */
4613 		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4614 			goto nvram_write_end;
4615 
4616 		/* Loop to write back the buffer data from page_start to
4617 		 * data_start */
4618 		i = 0;
4619 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4620 			/* Erase the page */
4621 			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4622 				goto nvram_write_end;
4623 
4624 			/* Re-enable the write again for the actual write */
4625 			bnx2_enable_nvram_write(bp);
4626 
4627 			for (addr = page_start; addr < data_start;
4628 				addr += 4, i += 4) {
4629 
4630 				rc = bnx2_nvram_write_dword(bp, addr,
4631 					&flash_buffer[i], cmd_flags);
4632 
4633 				if (rc != 0)
4634 					goto nvram_write_end;
4635 
4636 				cmd_flags = 0;
4637 			}
4638 		}
4639 
4640 		/* Loop to write the new data from data_start to data_end */
4641 		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4642 			if ((addr == page_end - 4) ||
4643 				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4644 				 (addr == data_end - 4))) {
4645 
4646 				cmd_flags |= BNX2_NVM_COMMAND_LAST;
4647 			}
4648 			rc = bnx2_nvram_write_dword(bp, addr, buf,
4649 				cmd_flags);
4650 
4651 			if (rc != 0)
4652 				goto nvram_write_end;
4653 
4654 			cmd_flags = 0;
4655 			buf += 4;
4656 		}
4657 
4658 		/* Loop to write back the buffer data from data_end
4659 		 * to page_end */
4660 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4661 			for (addr = data_end; addr < page_end;
4662 				addr += 4, i += 4) {
4663 
4664 				if (addr == page_end-4) {
4665 					cmd_flags = BNX2_NVM_COMMAND_LAST;
4666                 		}
4667 				rc = bnx2_nvram_write_dword(bp, addr,
4668 					&flash_buffer[i], cmd_flags);
4669 
4670 				if (rc != 0)
4671 					goto nvram_write_end;
4672 
4673 				cmd_flags = 0;
4674 			}
4675 		}
4676 
4677 		/* Disable writes to flash interface (lock write-protect) */
4678 		bnx2_disable_nvram_write(bp);
4679 
4680 		/* Disable access to flash interface */
4681 		bnx2_disable_nvram_access(bp);
4682 		bnx2_release_nvram_lock(bp);
4683 
4684 		/* Increment written */
4685 		written += data_end - data_start;
4686 	}
4687 
4688 nvram_write_end:
4689 	kfree(flash_buffer);
4690 	kfree(align_buf);
4691 	return rc;
4692 }
4693 
4694 static void
4695 bnx2_init_fw_cap(struct bnx2 *bp)
4696 {
4697 	u32 val, sig = 0;
4698 
4699 	bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4700 	bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4701 
4702 	if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4703 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4704 
4705 	val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4706 	if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4707 		return;
4708 
4709 	if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4710 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4711 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4712 	}
4713 
4714 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4715 	    (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4716 		u32 link;
4717 
4718 		bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4719 
4720 		link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4721 		if (link & BNX2_LINK_STATUS_SERDES_LINK)
4722 			bp->phy_port = PORT_FIBRE;
4723 		else
4724 			bp->phy_port = PORT_TP;
4725 
4726 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4727 		       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4728 	}
4729 
4730 	if (netif_running(bp->dev) && sig)
4731 		bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4732 }
4733 
4734 static void
4735 bnx2_setup_msix_tbl(struct bnx2 *bp)
4736 {
4737 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4738 
4739 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4740 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4741 }
4742 
4743 static int
4744 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4745 {
4746 	u32 val;
4747 	int i, rc = 0;
4748 	u8 old_port;
4749 
4750 	/* Wait for the current PCI transaction to complete before
4751 	 * issuing a reset. */
4752 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4753 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4754 		BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4755 			BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4756 			BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4757 			BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4758 			BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4759 		val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4760 		udelay(5);
4761 	} else {  /* 5709 */
4762 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4763 		val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4764 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4765 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4766 
4767 		for (i = 0; i < 100; i++) {
4768 			msleep(1);
4769 			val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4770 			if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4771 				break;
4772 		}
4773 	}
4774 
4775 	/* Wait for the firmware to tell us it is ok to issue a reset. */
4776 	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4777 
4778 	/* Deposit a driver reset signature so the firmware knows that
4779 	 * this is a soft reset. */
4780 	bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4781 		      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4782 
4783 	/* Do a dummy read to force the chip to complete all current transaction
4784 	 * before we issue a reset. */
4785 	val = BNX2_RD(bp, BNX2_MISC_ID);
4786 
4787 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4788 		BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4789 		BNX2_RD(bp, BNX2_MISC_COMMAND);
4790 		udelay(5);
4791 
4792 		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4793 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4794 
4795 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4796 
4797 	} else {
4798 		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4799 		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4800 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4801 
4802 		/* Chip reset. */
4803 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4804 
4805 		/* Reading back any register after chip reset will hang the
4806 		 * bus on 5706 A0 and A1.  The msleep below provides plenty
4807 		 * of margin for write posting.
4808 		 */
4809 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4810 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4811 			msleep(20);
4812 
4813 		/* Reset takes approximate 30 usec */
4814 		for (i = 0; i < 10; i++) {
4815 			val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4816 			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4817 				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4818 				break;
4819 			udelay(10);
4820 		}
4821 
4822 		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4823 			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4824 			pr_err("Chip reset did not complete\n");
4825 			return -EBUSY;
4826 		}
4827 	}
4828 
4829 	/* Make sure byte swapping is properly configured. */
4830 	val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4831 	if (val != 0x01020304) {
4832 		pr_err("Chip not in correct endian mode\n");
4833 		return -ENODEV;
4834 	}
4835 
4836 	/* Wait for the firmware to finish its initialization. */
4837 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4838 	if (rc)
4839 		return rc;
4840 
4841 	spin_lock_bh(&bp->phy_lock);
4842 	old_port = bp->phy_port;
4843 	bnx2_init_fw_cap(bp);
4844 	if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4845 	    old_port != bp->phy_port)
4846 		bnx2_set_default_remote_link(bp);
4847 	spin_unlock_bh(&bp->phy_lock);
4848 
4849 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4850 		/* Adjust the voltage regular to two steps lower.  The default
4851 		 * of this register is 0x0000000e. */
4852 		BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4853 
4854 		/* Remove bad rbuf memory from the free pool. */
4855 		rc = bnx2_alloc_bad_rbuf(bp);
4856 	}
4857 
4858 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4859 		bnx2_setup_msix_tbl(bp);
4860 		/* Prevent MSIX table reads and write from timing out */
4861 		BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4862 			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4863 	}
4864 
4865 	return rc;
4866 }
4867 
4868 static int
4869 bnx2_init_chip(struct bnx2 *bp)
4870 {
4871 	u32 val, mtu;
4872 	int rc, i;
4873 
4874 	/* Make sure the interrupt is not active. */
4875 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4876 
4877 	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4878 	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4879 #ifdef __BIG_ENDIAN
4880 	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4881 #endif
4882 	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4883 	      DMA_READ_CHANS << 12 |
4884 	      DMA_WRITE_CHANS << 16;
4885 
4886 	val |= (0x2 << 20) | (1 << 11);
4887 
4888 	if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4889 		val |= (1 << 23);
4890 
4891 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4892 	    (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4893 	    !(bp->flags & BNX2_FLAG_PCIX))
4894 		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4895 
4896 	BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4897 
4898 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4899 		val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4900 		val |= BNX2_TDMA_CONFIG_ONE_DMA;
4901 		BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4902 	}
4903 
4904 	if (bp->flags & BNX2_FLAG_PCIX) {
4905 		u16 val16;
4906 
4907 		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4908 				     &val16);
4909 		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4910 				      val16 & ~PCI_X_CMD_ERO);
4911 	}
4912 
4913 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4914 		BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4915 		BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4916 		BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4917 
4918 	/* Initialize context mapping and zero out the quick contexts.  The
4919 	 * context block must have already been enabled. */
4920 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4921 		rc = bnx2_init_5709_context(bp);
4922 		if (rc)
4923 			return rc;
4924 	} else
4925 		bnx2_init_context(bp);
4926 
4927 	if ((rc = bnx2_init_cpus(bp)) != 0)
4928 		return rc;
4929 
4930 	bnx2_init_nvram(bp);
4931 
4932 	bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4933 
4934 	val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4935 	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4936 	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4937 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4938 		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4939 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4940 			val |= BNX2_MQ_CONFIG_HALT_DIS;
4941 	}
4942 
4943 	BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4944 
4945 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4946 	BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4947 	BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4948 
4949 	val = (BNX2_PAGE_BITS - 8) << 24;
4950 	BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4951 
4952 	/* Configure page size. */
4953 	val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4954 	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4955 	val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4956 	BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4957 
4958 	val = bp->mac_addr[0] +
4959 	      (bp->mac_addr[1] << 8) +
4960 	      (bp->mac_addr[2] << 16) +
4961 	      bp->mac_addr[3] +
4962 	      (bp->mac_addr[4] << 8) +
4963 	      (bp->mac_addr[5] << 16);
4964 	BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4965 
4966 	/* Program the MTU.  Also include 4 bytes for CRC32. */
4967 	mtu = bp->dev->mtu;
4968 	val = mtu + ETH_HLEN + ETH_FCS_LEN;
4969 	if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4970 		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4971 	BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4972 
4973 	if (mtu < 1500)
4974 		mtu = 1500;
4975 
4976 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4977 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4978 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4979 
4980 	memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4981 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4982 		bp->bnx2_napi[i].last_status_idx = 0;
4983 
4984 	bp->idle_chk_status_idx = 0xffff;
4985 
4986 	bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4987 
4988 	/* Set up how to generate a link change interrupt. */
4989 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4990 
4991 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
4992 		(u64) bp->status_blk_mapping & 0xffffffff);
4993 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4994 
4995 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4996 		(u64) bp->stats_blk_mapping & 0xffffffff);
4997 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4998 		(u64) bp->stats_blk_mapping >> 32);
4999 
5000 	BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5001 		(bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
5002 
5003 	BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5004 		(bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5005 
5006 	BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5007 		(bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5008 
5009 	BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5010 
5011 	BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5012 
5013 	BNX2_WR(bp, BNX2_HC_COM_TICKS,
5014 		(bp->com_ticks_int << 16) | bp->com_ticks);
5015 
5016 	BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5017 		(bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5018 
5019 	if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5020 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5021 	else
5022 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5023 	BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
5024 
5025 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5026 		val = BNX2_HC_CONFIG_COLLECT_STATS;
5027 	else {
5028 		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5029 		      BNX2_HC_CONFIG_COLLECT_STATS;
5030 	}
5031 
5032 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
5033 		BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5034 			BNX2_HC_MSIX_BIT_VECTOR_VAL);
5035 
5036 		val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5037 	}
5038 
5039 	if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5040 		val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5041 
5042 	BNX2_WR(bp, BNX2_HC_CONFIG, val);
5043 
5044 	if (bp->rx_ticks < 25)
5045 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5046 	else
5047 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5048 
5049 	for (i = 1; i < bp->irq_nvecs; i++) {
5050 		u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5051 			   BNX2_HC_SB_CONFIG_1;
5052 
5053 		BNX2_WR(bp, base,
5054 			BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5055 			BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5056 			BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5057 
5058 		BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5059 			(bp->tx_quick_cons_trip_int << 16) |
5060 			 bp->tx_quick_cons_trip);
5061 
5062 		BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5063 			(bp->tx_ticks_int << 16) | bp->tx_ticks);
5064 
5065 		BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5066 			(bp->rx_quick_cons_trip_int << 16) |
5067 			bp->rx_quick_cons_trip);
5068 
5069 		BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5070 			(bp->rx_ticks_int << 16) | bp->rx_ticks);
5071 	}
5072 
5073 	/* Clear internal stats counters. */
5074 	BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5075 
5076 	BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5077 
5078 	/* Initialize the receive filter. */
5079 	bnx2_set_rx_mode(bp->dev);
5080 
5081 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5082 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5083 		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5084 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5085 	}
5086 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5087 			  1, 0);
5088 
5089 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5090 	BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5091 
5092 	udelay(20);
5093 
5094 	bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5095 
5096 	return rc;
5097 }
5098 
5099 static void
5100 bnx2_clear_ring_states(struct bnx2 *bp)
5101 {
5102 	struct bnx2_napi *bnapi;
5103 	struct bnx2_tx_ring_info *txr;
5104 	struct bnx2_rx_ring_info *rxr;
5105 	int i;
5106 
5107 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5108 		bnapi = &bp->bnx2_napi[i];
5109 		txr = &bnapi->tx_ring;
5110 		rxr = &bnapi->rx_ring;
5111 
5112 		txr->tx_cons = 0;
5113 		txr->hw_tx_cons = 0;
5114 		rxr->rx_prod_bseq = 0;
5115 		rxr->rx_prod = 0;
5116 		rxr->rx_cons = 0;
5117 		rxr->rx_pg_prod = 0;
5118 		rxr->rx_pg_cons = 0;
5119 	}
5120 }
5121 
5122 static void
5123 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5124 {
5125 	u32 val, offset0, offset1, offset2, offset3;
5126 	u32 cid_addr = GET_CID_ADDR(cid);
5127 
5128 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5129 		offset0 = BNX2_L2CTX_TYPE_XI;
5130 		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5131 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5132 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5133 	} else {
5134 		offset0 = BNX2_L2CTX_TYPE;
5135 		offset1 = BNX2_L2CTX_CMD_TYPE;
5136 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5137 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5138 	}
5139 	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5140 	bnx2_ctx_wr(bp, cid_addr, offset0, val);
5141 
5142 	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5143 	bnx2_ctx_wr(bp, cid_addr, offset1, val);
5144 
5145 	val = (u64) txr->tx_desc_mapping >> 32;
5146 	bnx2_ctx_wr(bp, cid_addr, offset2, val);
5147 
5148 	val = (u64) txr->tx_desc_mapping & 0xffffffff;
5149 	bnx2_ctx_wr(bp, cid_addr, offset3, val);
5150 }
5151 
5152 static void
5153 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5154 {
5155 	struct bnx2_tx_bd *txbd;
5156 	u32 cid = TX_CID;
5157 	struct bnx2_napi *bnapi;
5158 	struct bnx2_tx_ring_info *txr;
5159 
5160 	bnapi = &bp->bnx2_napi[ring_num];
5161 	txr = &bnapi->tx_ring;
5162 
5163 	if (ring_num == 0)
5164 		cid = TX_CID;
5165 	else
5166 		cid = TX_TSS_CID + ring_num - 1;
5167 
5168 	bp->tx_wake_thresh = bp->tx_ring_size / 2;
5169 
5170 	txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5171 
5172 	txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5173 	txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5174 
5175 	txr->tx_prod = 0;
5176 	txr->tx_prod_bseq = 0;
5177 
5178 	txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5179 	txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5180 
5181 	bnx2_init_tx_context(bp, cid, txr);
5182 }
5183 
5184 static void
5185 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5186 		     u32 buf_size, int num_rings)
5187 {
5188 	int i;
5189 	struct bnx2_rx_bd *rxbd;
5190 
5191 	for (i = 0; i < num_rings; i++) {
5192 		int j;
5193 
5194 		rxbd = &rx_ring[i][0];
5195 		for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5196 			rxbd->rx_bd_len = buf_size;
5197 			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5198 		}
5199 		if (i == (num_rings - 1))
5200 			j = 0;
5201 		else
5202 			j = i + 1;
5203 		rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5204 		rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5205 	}
5206 }
5207 
5208 static void
5209 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5210 {
5211 	int i;
5212 	u16 prod, ring_prod;
5213 	u32 cid, rx_cid_addr, val;
5214 	struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5215 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5216 
5217 	if (ring_num == 0)
5218 		cid = RX_CID;
5219 	else
5220 		cid = RX_RSS_CID + ring_num - 1;
5221 
5222 	rx_cid_addr = GET_CID_ADDR(cid);
5223 
5224 	bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5225 			     bp->rx_buf_use_size, bp->rx_max_ring);
5226 
5227 	bnx2_init_rx_context(bp, cid);
5228 
5229 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5230 		val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5231 		BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5232 	}
5233 
5234 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5235 	if (bp->rx_pg_ring_size) {
5236 		bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5237 				     rxr->rx_pg_desc_mapping,
5238 				     PAGE_SIZE, bp->rx_max_pg_ring);
5239 		val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5240 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5241 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5242 		       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5243 
5244 		val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5245 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5246 
5247 		val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5248 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5249 
5250 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5251 			BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5252 	}
5253 
5254 	val = (u64) rxr->rx_desc_mapping[0] >> 32;
5255 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5256 
5257 	val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5258 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5259 
5260 	ring_prod = prod = rxr->rx_pg_prod;
5261 	for (i = 0; i < bp->rx_pg_ring_size; i++) {
5262 		if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5263 			netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5264 				    ring_num, i, bp->rx_pg_ring_size);
5265 			break;
5266 		}
5267 		prod = BNX2_NEXT_RX_BD(prod);
5268 		ring_prod = BNX2_RX_PG_RING_IDX(prod);
5269 	}
5270 	rxr->rx_pg_prod = prod;
5271 
5272 	ring_prod = prod = rxr->rx_prod;
5273 	for (i = 0; i < bp->rx_ring_size; i++) {
5274 		if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5275 			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5276 				    ring_num, i, bp->rx_ring_size);
5277 			break;
5278 		}
5279 		prod = BNX2_NEXT_RX_BD(prod);
5280 		ring_prod = BNX2_RX_RING_IDX(prod);
5281 	}
5282 	rxr->rx_prod = prod;
5283 
5284 	rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5285 	rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5286 	rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5287 
5288 	BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5289 	BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5290 
5291 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5292 }
5293 
5294 static void
5295 bnx2_init_all_rings(struct bnx2 *bp)
5296 {
5297 	int i;
5298 	u32 val;
5299 
5300 	bnx2_clear_ring_states(bp);
5301 
5302 	BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5303 	for (i = 0; i < bp->num_tx_rings; i++)
5304 		bnx2_init_tx_ring(bp, i);
5305 
5306 	if (bp->num_tx_rings > 1)
5307 		BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5308 			(TX_TSS_CID << 7));
5309 
5310 	BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5311 	bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5312 
5313 	for (i = 0; i < bp->num_rx_rings; i++)
5314 		bnx2_init_rx_ring(bp, i);
5315 
5316 	if (bp->num_rx_rings > 1) {
5317 		u32 tbl_32 = 0;
5318 
5319 		for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5320 			int shift = (i % 8) << 2;
5321 
5322 			tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5323 			if ((i % 8) == 7) {
5324 				BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5325 				BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5326 					BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5327 					BNX2_RLUP_RSS_COMMAND_WRITE |
5328 					BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5329 				tbl_32 = 0;
5330 			}
5331 		}
5332 
5333 		val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5334 		      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5335 
5336 		BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5337 
5338 	}
5339 }
5340 
5341 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5342 {
5343 	u32 max, num_rings = 1;
5344 
5345 	while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5346 		ring_size -= BNX2_MAX_RX_DESC_CNT;
5347 		num_rings++;
5348 	}
5349 	/* round to next power of 2 */
5350 	max = max_size;
5351 	while ((max & num_rings) == 0)
5352 		max >>= 1;
5353 
5354 	if (num_rings != max)
5355 		max <<= 1;
5356 
5357 	return max;
5358 }
5359 
5360 static void
5361 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5362 {
5363 	u32 rx_size, rx_space, jumbo_size;
5364 
5365 	/* 8 for CRC and VLAN */
5366 	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5367 
5368 	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5369 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5370 
5371 	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5372 	bp->rx_pg_ring_size = 0;
5373 	bp->rx_max_pg_ring = 0;
5374 	bp->rx_max_pg_ring_idx = 0;
5375 	if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5376 		int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5377 
5378 		jumbo_size = size * pages;
5379 		if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5380 			jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5381 
5382 		bp->rx_pg_ring_size = jumbo_size;
5383 		bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5384 							BNX2_MAX_RX_PG_RINGS);
5385 		bp->rx_max_pg_ring_idx =
5386 			(bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5387 		rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5388 		bp->rx_copy_thresh = 0;
5389 	}
5390 
5391 	bp->rx_buf_use_size = rx_size;
5392 	/* hw alignment + build_skb() overhead*/
5393 	bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5394 		NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5395 	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5396 	bp->rx_ring_size = size;
5397 	bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5398 	bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5399 }
5400 
5401 static void
5402 bnx2_free_tx_skbs(struct bnx2 *bp)
5403 {
5404 	int i;
5405 
5406 	for (i = 0; i < bp->num_tx_rings; i++) {
5407 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5408 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5409 		int j;
5410 
5411 		if (txr->tx_buf_ring == NULL)
5412 			continue;
5413 
5414 		for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5415 			struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5416 			struct sk_buff *skb = tx_buf->skb;
5417 			int k, last;
5418 
5419 			if (skb == NULL) {
5420 				j = BNX2_NEXT_TX_BD(j);
5421 				continue;
5422 			}
5423 
5424 			dma_unmap_single(&bp->pdev->dev,
5425 					 dma_unmap_addr(tx_buf, mapping),
5426 					 skb_headlen(skb),
5427 					 PCI_DMA_TODEVICE);
5428 
5429 			tx_buf->skb = NULL;
5430 
5431 			last = tx_buf->nr_frags;
5432 			j = BNX2_NEXT_TX_BD(j);
5433 			for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5434 				tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5435 				dma_unmap_page(&bp->pdev->dev,
5436 					dma_unmap_addr(tx_buf, mapping),
5437 					skb_frag_size(&skb_shinfo(skb)->frags[k]),
5438 					PCI_DMA_TODEVICE);
5439 			}
5440 			dev_kfree_skb(skb);
5441 		}
5442 		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5443 	}
5444 }
5445 
5446 static void
5447 bnx2_free_rx_skbs(struct bnx2 *bp)
5448 {
5449 	int i;
5450 
5451 	for (i = 0; i < bp->num_rx_rings; i++) {
5452 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5453 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5454 		int j;
5455 
5456 		if (rxr->rx_buf_ring == NULL)
5457 			return;
5458 
5459 		for (j = 0; j < bp->rx_max_ring_idx; j++) {
5460 			struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5461 			u8 *data = rx_buf->data;
5462 
5463 			if (data == NULL)
5464 				continue;
5465 
5466 			dma_unmap_single(&bp->pdev->dev,
5467 					 dma_unmap_addr(rx_buf, mapping),
5468 					 bp->rx_buf_use_size,
5469 					 PCI_DMA_FROMDEVICE);
5470 
5471 			rx_buf->data = NULL;
5472 
5473 			kfree(data);
5474 		}
5475 		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5476 			bnx2_free_rx_page(bp, rxr, j);
5477 	}
5478 }
5479 
5480 static void
5481 bnx2_free_skbs(struct bnx2 *bp)
5482 {
5483 	bnx2_free_tx_skbs(bp);
5484 	bnx2_free_rx_skbs(bp);
5485 }
5486 
5487 static int
5488 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5489 {
5490 	int rc;
5491 
5492 	rc = bnx2_reset_chip(bp, reset_code);
5493 	bnx2_free_skbs(bp);
5494 	if (rc)
5495 		return rc;
5496 
5497 	if ((rc = bnx2_init_chip(bp)) != 0)
5498 		return rc;
5499 
5500 	bnx2_init_all_rings(bp);
5501 	return 0;
5502 }
5503 
5504 static int
5505 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5506 {
5507 	int rc;
5508 
5509 	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5510 		return rc;
5511 
5512 	spin_lock_bh(&bp->phy_lock);
5513 	bnx2_init_phy(bp, reset_phy);
5514 	bnx2_set_link(bp);
5515 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5516 		bnx2_remote_phy_event(bp);
5517 	spin_unlock_bh(&bp->phy_lock);
5518 	return 0;
5519 }
5520 
5521 static int
5522 bnx2_shutdown_chip(struct bnx2 *bp)
5523 {
5524 	u32 reset_code;
5525 
5526 	if (bp->flags & BNX2_FLAG_NO_WOL)
5527 		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5528 	else if (bp->wol)
5529 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5530 	else
5531 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5532 
5533 	return bnx2_reset_chip(bp, reset_code);
5534 }
5535 
5536 static int
5537 bnx2_test_registers(struct bnx2 *bp)
5538 {
5539 	int ret;
5540 	int i, is_5709;
5541 	static const struct {
5542 		u16   offset;
5543 		u16   flags;
5544 #define BNX2_FL_NOT_5709	1
5545 		u32   rw_mask;
5546 		u32   ro_mask;
5547 	} reg_tbl[] = {
5548 		{ 0x006c, 0, 0x00000000, 0x0000003f },
5549 		{ 0x0090, 0, 0xffffffff, 0x00000000 },
5550 		{ 0x0094, 0, 0x00000000, 0x00000000 },
5551 
5552 		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5553 		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5554 		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5555 		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5556 		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5557 		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5558 		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5559 		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5560 		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5561 
5562 		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5563 		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5564 		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5565 		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5566 		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5567 		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5568 
5569 		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5570 		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5571 		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5572 
5573 		{ 0x1000, 0, 0x00000000, 0x00000001 },
5574 		{ 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5575 
5576 		{ 0x1408, 0, 0x01c00800, 0x00000000 },
5577 		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
5578 		{ 0x14a8, 0, 0x00000000, 0x000001ff },
5579 		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
5580 		{ 0x14b0, 0, 0x00000002, 0x00000001 },
5581 		{ 0x14b8, 0, 0x00000000, 0x00000000 },
5582 		{ 0x14c0, 0, 0x00000000, 0x00000009 },
5583 		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
5584 		{ 0x14cc, 0, 0x00000000, 0x00000001 },
5585 		{ 0x14d0, 0, 0xffffffff, 0x00000000 },
5586 
5587 		{ 0x1800, 0, 0x00000000, 0x00000001 },
5588 		{ 0x1804, 0, 0x00000000, 0x00000003 },
5589 
5590 		{ 0x2800, 0, 0x00000000, 0x00000001 },
5591 		{ 0x2804, 0, 0x00000000, 0x00003f01 },
5592 		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5593 		{ 0x2810, 0, 0xffff0000, 0x00000000 },
5594 		{ 0x2814, 0, 0xffff0000, 0x00000000 },
5595 		{ 0x2818, 0, 0xffff0000, 0x00000000 },
5596 		{ 0x281c, 0, 0xffff0000, 0x00000000 },
5597 		{ 0x2834, 0, 0xffffffff, 0x00000000 },
5598 		{ 0x2840, 0, 0x00000000, 0xffffffff },
5599 		{ 0x2844, 0, 0x00000000, 0xffffffff },
5600 		{ 0x2848, 0, 0xffffffff, 0x00000000 },
5601 		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },
5602 
5603 		{ 0x2c00, 0, 0x00000000, 0x00000011 },
5604 		{ 0x2c04, 0, 0x00000000, 0x00030007 },
5605 
5606 		{ 0x3c00, 0, 0x00000000, 0x00000001 },
5607 		{ 0x3c04, 0, 0x00000000, 0x00070000 },
5608 		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
5609 		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5610 		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
5611 		{ 0x3c14, 0, 0x00000000, 0xffffffff },
5612 		{ 0x3c18, 0, 0x00000000, 0xffffffff },
5613 		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
5614 		{ 0x3c20, 0, 0xffffff00, 0x00000000 },
5615 
5616 		{ 0x5004, 0, 0x00000000, 0x0000007f },
5617 		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },
5618 
5619 		{ 0x5c00, 0, 0x00000000, 0x00000001 },
5620 		{ 0x5c04, 0, 0x00000000, 0x0003000f },
5621 		{ 0x5c08, 0, 0x00000003, 0x00000000 },
5622 		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5623 		{ 0x5c10, 0, 0x00000000, 0xffffffff },
5624 		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5625 		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
5626 		{ 0x5c88, 0, 0x00000000, 0x00077373 },
5627 		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },
5628 
5629 		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
5630 		{ 0x680c, 0, 0xffffffff, 0x00000000 },
5631 		{ 0x6810, 0, 0xffffffff, 0x00000000 },
5632 		{ 0x6814, 0, 0xffffffff, 0x00000000 },
5633 		{ 0x6818, 0, 0xffffffff, 0x00000000 },
5634 		{ 0x681c, 0, 0xffffffff, 0x00000000 },
5635 		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
5636 		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
5637 		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
5638 		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
5639 		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
5640 		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
5641 		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
5642 		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
5643 		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
5644 		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
5645 		{ 0x684c, 0, 0xffffffff, 0x00000000 },
5646 		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5647 		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5648 		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5649 		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5650 		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
5651 		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5652 
5653 		{ 0xffff, 0, 0x00000000, 0x00000000 },
5654 	};
5655 
5656 	ret = 0;
5657 	is_5709 = 0;
5658 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5659 		is_5709 = 1;
5660 
5661 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5662 		u32 offset, rw_mask, ro_mask, save_val, val;
5663 		u16 flags = reg_tbl[i].flags;
5664 
5665 		if (is_5709 && (flags & BNX2_FL_NOT_5709))
5666 			continue;
5667 
5668 		offset = (u32) reg_tbl[i].offset;
5669 		rw_mask = reg_tbl[i].rw_mask;
5670 		ro_mask = reg_tbl[i].ro_mask;
5671 
5672 		save_val = readl(bp->regview + offset);
5673 
5674 		writel(0, bp->regview + offset);
5675 
5676 		val = readl(bp->regview + offset);
5677 		if ((val & rw_mask) != 0) {
5678 			goto reg_test_err;
5679 		}
5680 
5681 		if ((val & ro_mask) != (save_val & ro_mask)) {
5682 			goto reg_test_err;
5683 		}
5684 
5685 		writel(0xffffffff, bp->regview + offset);
5686 
5687 		val = readl(bp->regview + offset);
5688 		if ((val & rw_mask) != rw_mask) {
5689 			goto reg_test_err;
5690 		}
5691 
5692 		if ((val & ro_mask) != (save_val & ro_mask)) {
5693 			goto reg_test_err;
5694 		}
5695 
5696 		writel(save_val, bp->regview + offset);
5697 		continue;
5698 
5699 reg_test_err:
5700 		writel(save_val, bp->regview + offset);
5701 		ret = -ENODEV;
5702 		break;
5703 	}
5704 	return ret;
5705 }
5706 
5707 static int
5708 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5709 {
5710 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5711 		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5712 	int i;
5713 
5714 	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5715 		u32 offset;
5716 
5717 		for (offset = 0; offset < size; offset += 4) {
5718 
5719 			bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5720 
5721 			if (bnx2_reg_rd_ind(bp, start + offset) !=
5722 				test_pattern[i]) {
5723 				return -ENODEV;
5724 			}
5725 		}
5726 	}
5727 	return 0;
5728 }
5729 
5730 static int
5731 bnx2_test_memory(struct bnx2 *bp)
5732 {
5733 	int ret = 0;
5734 	int i;
5735 	static struct mem_entry {
5736 		u32   offset;
5737 		u32   len;
5738 	} mem_tbl_5706[] = {
5739 		{ 0x60000,  0x4000 },
5740 		{ 0xa0000,  0x3000 },
5741 		{ 0xe0000,  0x4000 },
5742 		{ 0x120000, 0x4000 },
5743 		{ 0x1a0000, 0x4000 },
5744 		{ 0x160000, 0x4000 },
5745 		{ 0xffffffff, 0    },
5746 	},
5747 	mem_tbl_5709[] = {
5748 		{ 0x60000,  0x4000 },
5749 		{ 0xa0000,  0x3000 },
5750 		{ 0xe0000,  0x4000 },
5751 		{ 0x120000, 0x4000 },
5752 		{ 0x1a0000, 0x4000 },
5753 		{ 0xffffffff, 0    },
5754 	};
5755 	struct mem_entry *mem_tbl;
5756 
5757 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5758 		mem_tbl = mem_tbl_5709;
5759 	else
5760 		mem_tbl = mem_tbl_5706;
5761 
5762 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5763 		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5764 			mem_tbl[i].len)) != 0) {
5765 			return ret;
5766 		}
5767 	}
5768 
5769 	return ret;
5770 }
5771 
5772 #define BNX2_MAC_LOOPBACK	0
5773 #define BNX2_PHY_LOOPBACK	1
5774 
5775 static int
5776 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5777 {
5778 	unsigned int pkt_size, num_pkts, i;
5779 	struct sk_buff *skb;
5780 	u8 *data;
5781 	unsigned char *packet;
5782 	u16 rx_start_idx, rx_idx;
5783 	dma_addr_t map;
5784 	struct bnx2_tx_bd *txbd;
5785 	struct bnx2_sw_bd *rx_buf;
5786 	struct l2_fhdr *rx_hdr;
5787 	int ret = -ENODEV;
5788 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5789 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5790 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5791 
5792 	tx_napi = bnapi;
5793 
5794 	txr = &tx_napi->tx_ring;
5795 	rxr = &bnapi->rx_ring;
5796 	if (loopback_mode == BNX2_MAC_LOOPBACK) {
5797 		bp->loopback = MAC_LOOPBACK;
5798 		bnx2_set_mac_loopback(bp);
5799 	}
5800 	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5801 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5802 			return 0;
5803 
5804 		bp->loopback = PHY_LOOPBACK;
5805 		bnx2_set_phy_loopback(bp);
5806 	}
5807 	else
5808 		return -EINVAL;
5809 
5810 	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5811 	skb = netdev_alloc_skb(bp->dev, pkt_size);
5812 	if (!skb)
5813 		return -ENOMEM;
5814 	packet = skb_put(skb, pkt_size);
5815 	memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5816 	memset(packet + ETH_ALEN, 0x0, 8);
5817 	for (i = 14; i < pkt_size; i++)
5818 		packet[i] = (unsigned char) (i & 0xff);
5819 
5820 	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5821 			     PCI_DMA_TODEVICE);
5822 	if (dma_mapping_error(&bp->pdev->dev, map)) {
5823 		dev_kfree_skb(skb);
5824 		return -EIO;
5825 	}
5826 
5827 	BNX2_WR(bp, BNX2_HC_COMMAND,
5828 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5829 
5830 	BNX2_RD(bp, BNX2_HC_COMMAND);
5831 
5832 	udelay(5);
5833 	rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5834 
5835 	num_pkts = 0;
5836 
5837 	txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5838 
5839 	txbd->tx_bd_haddr_hi = (u64) map >> 32;
5840 	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5841 	txbd->tx_bd_mss_nbytes = pkt_size;
5842 	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5843 
5844 	num_pkts++;
5845 	txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5846 	txr->tx_prod_bseq += pkt_size;
5847 
5848 	BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5849 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5850 
5851 	udelay(100);
5852 
5853 	BNX2_WR(bp, BNX2_HC_COMMAND,
5854 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5855 
5856 	BNX2_RD(bp, BNX2_HC_COMMAND);
5857 
5858 	udelay(5);
5859 
5860 	dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5861 	dev_kfree_skb(skb);
5862 
5863 	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5864 		goto loopback_test_done;
5865 
5866 	rx_idx = bnx2_get_hw_rx_cons(bnapi);
5867 	if (rx_idx != rx_start_idx + num_pkts) {
5868 		goto loopback_test_done;
5869 	}
5870 
5871 	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5872 	data = rx_buf->data;
5873 
5874 	rx_hdr = get_l2_fhdr(data);
5875 	data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5876 
5877 	dma_sync_single_for_cpu(&bp->pdev->dev,
5878 		dma_unmap_addr(rx_buf, mapping),
5879 		bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5880 
5881 	if (rx_hdr->l2_fhdr_status &
5882 		(L2_FHDR_ERRORS_BAD_CRC |
5883 		L2_FHDR_ERRORS_PHY_DECODE |
5884 		L2_FHDR_ERRORS_ALIGNMENT |
5885 		L2_FHDR_ERRORS_TOO_SHORT |
5886 		L2_FHDR_ERRORS_GIANT_FRAME)) {
5887 
5888 		goto loopback_test_done;
5889 	}
5890 
5891 	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5892 		goto loopback_test_done;
5893 	}
5894 
5895 	for (i = 14; i < pkt_size; i++) {
5896 		if (*(data + i) != (unsigned char) (i & 0xff)) {
5897 			goto loopback_test_done;
5898 		}
5899 	}
5900 
5901 	ret = 0;
5902 
5903 loopback_test_done:
5904 	bp->loopback = 0;
5905 	return ret;
5906 }
5907 
5908 #define BNX2_MAC_LOOPBACK_FAILED	1
5909 #define BNX2_PHY_LOOPBACK_FAILED	2
5910 #define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
5911 					 BNX2_PHY_LOOPBACK_FAILED)
5912 
5913 static int
5914 bnx2_test_loopback(struct bnx2 *bp)
5915 {
5916 	int rc = 0;
5917 
5918 	if (!netif_running(bp->dev))
5919 		return BNX2_LOOPBACK_FAILED;
5920 
5921 	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5922 	spin_lock_bh(&bp->phy_lock);
5923 	bnx2_init_phy(bp, 1);
5924 	spin_unlock_bh(&bp->phy_lock);
5925 	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5926 		rc |= BNX2_MAC_LOOPBACK_FAILED;
5927 	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5928 		rc |= BNX2_PHY_LOOPBACK_FAILED;
5929 	return rc;
5930 }
5931 
5932 #define NVRAM_SIZE 0x200
5933 #define CRC32_RESIDUAL 0xdebb20e3
5934 
5935 static int
5936 bnx2_test_nvram(struct bnx2 *bp)
5937 {
5938 	__be32 buf[NVRAM_SIZE / 4];
5939 	u8 *data = (u8 *) buf;
5940 	int rc = 0;
5941 	u32 magic, csum;
5942 
5943 	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5944 		goto test_nvram_done;
5945 
5946         magic = be32_to_cpu(buf[0]);
5947 	if (magic != 0x669955aa) {
5948 		rc = -ENODEV;
5949 		goto test_nvram_done;
5950 	}
5951 
5952 	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5953 		goto test_nvram_done;
5954 
5955 	csum = ether_crc_le(0x100, data);
5956 	if (csum != CRC32_RESIDUAL) {
5957 		rc = -ENODEV;
5958 		goto test_nvram_done;
5959 	}
5960 
5961 	csum = ether_crc_le(0x100, data + 0x100);
5962 	if (csum != CRC32_RESIDUAL) {
5963 		rc = -ENODEV;
5964 	}
5965 
5966 test_nvram_done:
5967 	return rc;
5968 }
5969 
5970 static int
5971 bnx2_test_link(struct bnx2 *bp)
5972 {
5973 	u32 bmsr;
5974 
5975 	if (!netif_running(bp->dev))
5976 		return -ENODEV;
5977 
5978 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5979 		if (bp->link_up)
5980 			return 0;
5981 		return -ENODEV;
5982 	}
5983 	spin_lock_bh(&bp->phy_lock);
5984 	bnx2_enable_bmsr1(bp);
5985 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5986 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5987 	bnx2_disable_bmsr1(bp);
5988 	spin_unlock_bh(&bp->phy_lock);
5989 
5990 	if (bmsr & BMSR_LSTATUS) {
5991 		return 0;
5992 	}
5993 	return -ENODEV;
5994 }
5995 
5996 static int
5997 bnx2_test_intr(struct bnx2 *bp)
5998 {
5999 	int i;
6000 	u16 status_idx;
6001 
6002 	if (!netif_running(bp->dev))
6003 		return -ENODEV;
6004 
6005 	status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6006 
6007 	/* This register is not touched during run-time. */
6008 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6009 	BNX2_RD(bp, BNX2_HC_COMMAND);
6010 
6011 	for (i = 0; i < 10; i++) {
6012 		if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6013 			status_idx) {
6014 
6015 			break;
6016 		}
6017 
6018 		msleep_interruptible(10);
6019 	}
6020 	if (i < 10)
6021 		return 0;
6022 
6023 	return -ENODEV;
6024 }
6025 
6026 /* Determining link for parallel detection. */
6027 static int
6028 bnx2_5706_serdes_has_link(struct bnx2 *bp)
6029 {
6030 	u32 mode_ctl, an_dbg, exp;
6031 
6032 	if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6033 		return 0;
6034 
6035 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6036 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6037 
6038 	if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6039 		return 0;
6040 
6041 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6042 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6043 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6044 
6045 	if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6046 		return 0;
6047 
6048 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6049 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6050 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6051 
6052 	if (exp & MII_EXPAND_REG1_RUDI_C)	/* receiving CONFIG */
6053 		return 0;
6054 
6055 	return 1;
6056 }
6057 
6058 static void
6059 bnx2_5706_serdes_timer(struct bnx2 *bp)
6060 {
6061 	int check_link = 1;
6062 
6063 	spin_lock(&bp->phy_lock);
6064 	if (bp->serdes_an_pending) {
6065 		bp->serdes_an_pending--;
6066 		check_link = 0;
6067 	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6068 		u32 bmcr;
6069 
6070 		bp->current_interval = BNX2_TIMER_INTERVAL;
6071 
6072 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6073 
6074 		if (bmcr & BMCR_ANENABLE) {
6075 			if (bnx2_5706_serdes_has_link(bp)) {
6076 				bmcr &= ~BMCR_ANENABLE;
6077 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6078 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6079 				bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6080 			}
6081 		}
6082 	}
6083 	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6084 		 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6085 		u32 phy2;
6086 
6087 		bnx2_write_phy(bp, 0x17, 0x0f01);
6088 		bnx2_read_phy(bp, 0x15, &phy2);
6089 		if (phy2 & 0x20) {
6090 			u32 bmcr;
6091 
6092 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6093 			bmcr |= BMCR_ANENABLE;
6094 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6095 
6096 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6097 		}
6098 	} else
6099 		bp->current_interval = BNX2_TIMER_INTERVAL;
6100 
6101 	if (check_link) {
6102 		u32 val;
6103 
6104 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6105 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6106 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6107 
6108 		if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6109 			if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6110 				bnx2_5706s_force_link_dn(bp, 1);
6111 				bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6112 			} else
6113 				bnx2_set_link(bp);
6114 		} else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6115 			bnx2_set_link(bp);
6116 	}
6117 	spin_unlock(&bp->phy_lock);
6118 }
6119 
6120 static void
6121 bnx2_5708_serdes_timer(struct bnx2 *bp)
6122 {
6123 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6124 		return;
6125 
6126 	if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6127 		bp->serdes_an_pending = 0;
6128 		return;
6129 	}
6130 
6131 	spin_lock(&bp->phy_lock);
6132 	if (bp->serdes_an_pending)
6133 		bp->serdes_an_pending--;
6134 	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6135 		u32 bmcr;
6136 
6137 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6138 		if (bmcr & BMCR_ANENABLE) {
6139 			bnx2_enable_forced_2g5(bp);
6140 			bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6141 		} else {
6142 			bnx2_disable_forced_2g5(bp);
6143 			bp->serdes_an_pending = 2;
6144 			bp->current_interval = BNX2_TIMER_INTERVAL;
6145 		}
6146 
6147 	} else
6148 		bp->current_interval = BNX2_TIMER_INTERVAL;
6149 
6150 	spin_unlock(&bp->phy_lock);
6151 }
6152 
6153 static void
6154 bnx2_timer(unsigned long data)
6155 {
6156 	struct bnx2 *bp = (struct bnx2 *) data;
6157 
6158 	if (!netif_running(bp->dev))
6159 		return;
6160 
6161 	if (atomic_read(&bp->intr_sem) != 0)
6162 		goto bnx2_restart_timer;
6163 
6164 	if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6165 	     BNX2_FLAG_USING_MSI)
6166 		bnx2_chk_missed_msi(bp);
6167 
6168 	bnx2_send_heart_beat(bp);
6169 
6170 	bp->stats_blk->stat_FwRxDrop =
6171 		bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6172 
6173 	/* workaround occasional corrupted counters */
6174 	if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6175 		BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6176 			BNX2_HC_COMMAND_STATS_NOW);
6177 
6178 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6179 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6180 			bnx2_5706_serdes_timer(bp);
6181 		else
6182 			bnx2_5708_serdes_timer(bp);
6183 	}
6184 
6185 bnx2_restart_timer:
6186 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6187 }
6188 
6189 static int
6190 bnx2_request_irq(struct bnx2 *bp)
6191 {
6192 	unsigned long flags;
6193 	struct bnx2_irq *irq;
6194 	int rc = 0, i;
6195 
6196 	if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6197 		flags = 0;
6198 	else
6199 		flags = IRQF_SHARED;
6200 
6201 	for (i = 0; i < bp->irq_nvecs; i++) {
6202 		irq = &bp->irq_tbl[i];
6203 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6204 				 &bp->bnx2_napi[i]);
6205 		if (rc)
6206 			break;
6207 		irq->requested = 1;
6208 	}
6209 	return rc;
6210 }
6211 
6212 static void
6213 __bnx2_free_irq(struct bnx2 *bp)
6214 {
6215 	struct bnx2_irq *irq;
6216 	int i;
6217 
6218 	for (i = 0; i < bp->irq_nvecs; i++) {
6219 		irq = &bp->irq_tbl[i];
6220 		if (irq->requested)
6221 			free_irq(irq->vector, &bp->bnx2_napi[i]);
6222 		irq->requested = 0;
6223 	}
6224 }
6225 
6226 static void
6227 bnx2_free_irq(struct bnx2 *bp)
6228 {
6229 
6230 	__bnx2_free_irq(bp);
6231 	if (bp->flags & BNX2_FLAG_USING_MSI)
6232 		pci_disable_msi(bp->pdev);
6233 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6234 		pci_disable_msix(bp->pdev);
6235 
6236 	bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6237 }
6238 
6239 static void
6240 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6241 {
6242 	int i, total_vecs;
6243 	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6244 	struct net_device *dev = bp->dev;
6245 	const int len = sizeof(bp->irq_tbl[0].name);
6246 
6247 	bnx2_setup_msix_tbl(bp);
6248 	BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6249 	BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6250 	BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6251 
6252 	/*  Need to flush the previous three writes to ensure MSI-X
6253 	 *  is setup properly */
6254 	BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6255 
6256 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6257 		msix_ent[i].entry = i;
6258 		msix_ent[i].vector = 0;
6259 	}
6260 
6261 	total_vecs = msix_vecs;
6262 #ifdef BCM_CNIC
6263 	total_vecs++;
6264 #endif
6265 	total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6266 					   BNX2_MIN_MSIX_VEC, total_vecs);
6267 	if (total_vecs < 0)
6268 		return;
6269 
6270 	msix_vecs = total_vecs;
6271 #ifdef BCM_CNIC
6272 	msix_vecs--;
6273 #endif
6274 	bp->irq_nvecs = msix_vecs;
6275 	bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6276 	for (i = 0; i < total_vecs; i++) {
6277 		bp->irq_tbl[i].vector = msix_ent[i].vector;
6278 		snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6279 		bp->irq_tbl[i].handler = bnx2_msi_1shot;
6280 	}
6281 }
6282 
6283 static int
6284 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6285 {
6286 	int cpus = netif_get_num_default_rss_queues();
6287 	int msix_vecs;
6288 
6289 	if (!bp->num_req_rx_rings)
6290 		msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6291 	else if (!bp->num_req_tx_rings)
6292 		msix_vecs = max(cpus, bp->num_req_rx_rings);
6293 	else
6294 		msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6295 
6296 	msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6297 
6298 	bp->irq_tbl[0].handler = bnx2_interrupt;
6299 	strcpy(bp->irq_tbl[0].name, bp->dev->name);
6300 	bp->irq_nvecs = 1;
6301 	bp->irq_tbl[0].vector = bp->pdev->irq;
6302 
6303 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6304 		bnx2_enable_msix(bp, msix_vecs);
6305 
6306 	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6307 	    !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6308 		if (pci_enable_msi(bp->pdev) == 0) {
6309 			bp->flags |= BNX2_FLAG_USING_MSI;
6310 			if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6311 				bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6312 				bp->irq_tbl[0].handler = bnx2_msi_1shot;
6313 			} else
6314 				bp->irq_tbl[0].handler = bnx2_msi;
6315 
6316 			bp->irq_tbl[0].vector = bp->pdev->irq;
6317 		}
6318 	}
6319 
6320 	if (!bp->num_req_tx_rings)
6321 		bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6322 	else
6323 		bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6324 
6325 	if (!bp->num_req_rx_rings)
6326 		bp->num_rx_rings = bp->irq_nvecs;
6327 	else
6328 		bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6329 
6330 	netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6331 
6332 	return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6333 }
6334 
6335 /* Called with rtnl_lock */
6336 static int
6337 bnx2_open(struct net_device *dev)
6338 {
6339 	struct bnx2 *bp = netdev_priv(dev);
6340 	int rc;
6341 
6342 	rc = bnx2_request_firmware(bp);
6343 	if (rc < 0)
6344 		goto out;
6345 
6346 	netif_carrier_off(dev);
6347 
6348 	bnx2_disable_int(bp);
6349 
6350 	rc = bnx2_setup_int_mode(bp, disable_msi);
6351 	if (rc)
6352 		goto open_err;
6353 	bnx2_init_napi(bp);
6354 	bnx2_napi_enable(bp);
6355 	rc = bnx2_alloc_mem(bp);
6356 	if (rc)
6357 		goto open_err;
6358 
6359 	rc = bnx2_request_irq(bp);
6360 	if (rc)
6361 		goto open_err;
6362 
6363 	rc = bnx2_init_nic(bp, 1);
6364 	if (rc)
6365 		goto open_err;
6366 
6367 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6368 
6369 	atomic_set(&bp->intr_sem, 0);
6370 
6371 	memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6372 
6373 	bnx2_enable_int(bp);
6374 
6375 	if (bp->flags & BNX2_FLAG_USING_MSI) {
6376 		/* Test MSI to make sure it is working
6377 		 * If MSI test fails, go back to INTx mode
6378 		 */
6379 		if (bnx2_test_intr(bp) != 0) {
6380 			netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6381 
6382 			bnx2_disable_int(bp);
6383 			bnx2_free_irq(bp);
6384 
6385 			bnx2_setup_int_mode(bp, 1);
6386 
6387 			rc = bnx2_init_nic(bp, 0);
6388 
6389 			if (!rc)
6390 				rc = bnx2_request_irq(bp);
6391 
6392 			if (rc) {
6393 				del_timer_sync(&bp->timer);
6394 				goto open_err;
6395 			}
6396 			bnx2_enable_int(bp);
6397 		}
6398 	}
6399 	if (bp->flags & BNX2_FLAG_USING_MSI)
6400 		netdev_info(dev, "using MSI\n");
6401 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6402 		netdev_info(dev, "using MSIX\n");
6403 
6404 	netif_tx_start_all_queues(dev);
6405 out:
6406 	return rc;
6407 
6408 open_err:
6409 	bnx2_napi_disable(bp);
6410 	bnx2_free_skbs(bp);
6411 	bnx2_free_irq(bp);
6412 	bnx2_free_mem(bp);
6413 	bnx2_del_napi(bp);
6414 	bnx2_release_firmware(bp);
6415 	goto out;
6416 }
6417 
6418 static void
6419 bnx2_reset_task(struct work_struct *work)
6420 {
6421 	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6422 	int rc;
6423 	u16 pcicmd;
6424 
6425 	rtnl_lock();
6426 	if (!netif_running(bp->dev)) {
6427 		rtnl_unlock();
6428 		return;
6429 	}
6430 
6431 	bnx2_netif_stop(bp, true);
6432 
6433 	pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6434 	if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6435 		/* in case PCI block has reset */
6436 		pci_restore_state(bp->pdev);
6437 		pci_save_state(bp->pdev);
6438 	}
6439 	rc = bnx2_init_nic(bp, 1);
6440 	if (rc) {
6441 		netdev_err(bp->dev, "failed to reset NIC, closing\n");
6442 		bnx2_napi_enable(bp);
6443 		dev_close(bp->dev);
6444 		rtnl_unlock();
6445 		return;
6446 	}
6447 
6448 	atomic_set(&bp->intr_sem, 1);
6449 	bnx2_netif_start(bp, true);
6450 	rtnl_unlock();
6451 }
6452 
6453 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6454 
6455 static void
6456 bnx2_dump_ftq(struct bnx2 *bp)
6457 {
6458 	int i;
6459 	u32 reg, bdidx, cid, valid;
6460 	struct net_device *dev = bp->dev;
6461 	static const struct ftq_reg {
6462 		char *name;
6463 		u32 off;
6464 	} ftq_arr[] = {
6465 		BNX2_FTQ_ENTRY(RV2P_P),
6466 		BNX2_FTQ_ENTRY(RV2P_T),
6467 		BNX2_FTQ_ENTRY(RV2P_M),
6468 		BNX2_FTQ_ENTRY(TBDR_),
6469 		BNX2_FTQ_ENTRY(TDMA_),
6470 		BNX2_FTQ_ENTRY(TXP_),
6471 		BNX2_FTQ_ENTRY(TXP_),
6472 		BNX2_FTQ_ENTRY(TPAT_),
6473 		BNX2_FTQ_ENTRY(RXP_C),
6474 		BNX2_FTQ_ENTRY(RXP_),
6475 		BNX2_FTQ_ENTRY(COM_COMXQ_),
6476 		BNX2_FTQ_ENTRY(COM_COMTQ_),
6477 		BNX2_FTQ_ENTRY(COM_COMQ_),
6478 		BNX2_FTQ_ENTRY(CP_CPQ_),
6479 	};
6480 
6481 	netdev_err(dev, "<--- start FTQ dump --->\n");
6482 	for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6483 		netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6484 			   bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6485 
6486 	netdev_err(dev, "CPU states:\n");
6487 	for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6488 		netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6489 			   reg, bnx2_reg_rd_ind(bp, reg),
6490 			   bnx2_reg_rd_ind(bp, reg + 4),
6491 			   bnx2_reg_rd_ind(bp, reg + 8),
6492 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6493 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6494 			   bnx2_reg_rd_ind(bp, reg + 0x20));
6495 
6496 	netdev_err(dev, "<--- end FTQ dump --->\n");
6497 	netdev_err(dev, "<--- start TBDC dump --->\n");
6498 	netdev_err(dev, "TBDC free cnt: %ld\n",
6499 		   BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6500 	netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6501 	for (i = 0; i < 0x20; i++) {
6502 		int j = 0;
6503 
6504 		BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6505 		BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6506 			BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6507 		BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6508 		while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6509 			BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6510 			j++;
6511 
6512 		cid = BNX2_RD(bp, BNX2_TBDC_CID);
6513 		bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6514 		valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6515 		netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6516 			   i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6517 			   bdidx >> 24, (valid >> 8) & 0x0ff);
6518 	}
6519 	netdev_err(dev, "<--- end TBDC dump --->\n");
6520 }
6521 
6522 static void
6523 bnx2_dump_state(struct bnx2 *bp)
6524 {
6525 	struct net_device *dev = bp->dev;
6526 	u32 val1, val2;
6527 
6528 	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6529 	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6530 		   atomic_read(&bp->intr_sem), val1);
6531 	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6532 	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6533 	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6534 	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6535 		   BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6536 		   BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6537 	netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6538 		   BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6539 	netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6540 		   BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6541 	if (bp->flags & BNX2_FLAG_USING_MSIX)
6542 		netdev_err(dev, "DEBUG: PBA[%08x]\n",
6543 			   BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6544 }
6545 
6546 static void
6547 bnx2_tx_timeout(struct net_device *dev)
6548 {
6549 	struct bnx2 *bp = netdev_priv(dev);
6550 
6551 	bnx2_dump_ftq(bp);
6552 	bnx2_dump_state(bp);
6553 	bnx2_dump_mcp_state(bp);
6554 
6555 	/* This allows the netif to be shutdown gracefully before resetting */
6556 	schedule_work(&bp->reset_task);
6557 }
6558 
6559 /* Called with netif_tx_lock.
6560  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6561  * netif_wake_queue().
6562  */
6563 static netdev_tx_t
6564 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6565 {
6566 	struct bnx2 *bp = netdev_priv(dev);
6567 	dma_addr_t mapping;
6568 	struct bnx2_tx_bd *txbd;
6569 	struct bnx2_sw_tx_bd *tx_buf;
6570 	u32 len, vlan_tag_flags, last_frag, mss;
6571 	u16 prod, ring_prod;
6572 	int i;
6573 	struct bnx2_napi *bnapi;
6574 	struct bnx2_tx_ring_info *txr;
6575 	struct netdev_queue *txq;
6576 
6577 	/*  Determine which tx ring we will be placed on */
6578 	i = skb_get_queue_mapping(skb);
6579 	bnapi = &bp->bnx2_napi[i];
6580 	txr = &bnapi->tx_ring;
6581 	txq = netdev_get_tx_queue(dev, i);
6582 
6583 	if (unlikely(bnx2_tx_avail(bp, txr) <
6584 	    (skb_shinfo(skb)->nr_frags + 1))) {
6585 		netif_tx_stop_queue(txq);
6586 		netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6587 
6588 		return NETDEV_TX_BUSY;
6589 	}
6590 	len = skb_headlen(skb);
6591 	prod = txr->tx_prod;
6592 	ring_prod = BNX2_TX_RING_IDX(prod);
6593 
6594 	vlan_tag_flags = 0;
6595 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6596 		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6597 	}
6598 
6599 	if (vlan_tx_tag_present(skb)) {
6600 		vlan_tag_flags |=
6601 			(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6602 	}
6603 
6604 	if ((mss = skb_shinfo(skb)->gso_size)) {
6605 		u32 tcp_opt_len;
6606 		struct iphdr *iph;
6607 
6608 		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6609 
6610 		tcp_opt_len = tcp_optlen(skb);
6611 
6612 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6613 			u32 tcp_off = skb_transport_offset(skb) -
6614 				      sizeof(struct ipv6hdr) - ETH_HLEN;
6615 
6616 			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6617 					  TX_BD_FLAGS_SW_FLAGS;
6618 			if (likely(tcp_off == 0))
6619 				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6620 			else {
6621 				tcp_off >>= 3;
6622 				vlan_tag_flags |= ((tcp_off & 0x3) <<
6623 						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
6624 						  ((tcp_off & 0x10) <<
6625 						   TX_BD_FLAGS_TCP6_OFF4_SHL);
6626 				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6627 			}
6628 		} else {
6629 			iph = ip_hdr(skb);
6630 			if (tcp_opt_len || (iph->ihl > 5)) {
6631 				vlan_tag_flags |= ((iph->ihl - 5) +
6632 						   (tcp_opt_len >> 2)) << 8;
6633 			}
6634 		}
6635 	} else
6636 		mss = 0;
6637 
6638 	mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6639 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6640 		dev_kfree_skb_any(skb);
6641 		return NETDEV_TX_OK;
6642 	}
6643 
6644 	tx_buf = &txr->tx_buf_ring[ring_prod];
6645 	tx_buf->skb = skb;
6646 	dma_unmap_addr_set(tx_buf, mapping, mapping);
6647 
6648 	txbd = &txr->tx_desc_ring[ring_prod];
6649 
6650 	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6651 	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6652 	txbd->tx_bd_mss_nbytes = len | (mss << 16);
6653 	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6654 
6655 	last_frag = skb_shinfo(skb)->nr_frags;
6656 	tx_buf->nr_frags = last_frag;
6657 	tx_buf->is_gso = skb_is_gso(skb);
6658 
6659 	for (i = 0; i < last_frag; i++) {
6660 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6661 
6662 		prod = BNX2_NEXT_TX_BD(prod);
6663 		ring_prod = BNX2_TX_RING_IDX(prod);
6664 		txbd = &txr->tx_desc_ring[ring_prod];
6665 
6666 		len = skb_frag_size(frag);
6667 		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6668 					   DMA_TO_DEVICE);
6669 		if (dma_mapping_error(&bp->pdev->dev, mapping))
6670 			goto dma_error;
6671 		dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6672 				   mapping);
6673 
6674 		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6675 		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6676 		txbd->tx_bd_mss_nbytes = len | (mss << 16);
6677 		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6678 
6679 	}
6680 	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6681 
6682 	/* Sync BD data before updating TX mailbox */
6683 	wmb();
6684 
6685 	netdev_tx_sent_queue(txq, skb->len);
6686 
6687 	prod = BNX2_NEXT_TX_BD(prod);
6688 	txr->tx_prod_bseq += skb->len;
6689 
6690 	BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6691 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6692 
6693 	mmiowb();
6694 
6695 	txr->tx_prod = prod;
6696 
6697 	if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6698 		netif_tx_stop_queue(txq);
6699 
6700 		/* netif_tx_stop_queue() must be done before checking
6701 		 * tx index in bnx2_tx_avail() below, because in
6702 		 * bnx2_tx_int(), we update tx index before checking for
6703 		 * netif_tx_queue_stopped().
6704 		 */
6705 		smp_mb();
6706 		if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6707 			netif_tx_wake_queue(txq);
6708 	}
6709 
6710 	return NETDEV_TX_OK;
6711 dma_error:
6712 	/* save value of frag that failed */
6713 	last_frag = i;
6714 
6715 	/* start back at beginning and unmap skb */
6716 	prod = txr->tx_prod;
6717 	ring_prod = BNX2_TX_RING_IDX(prod);
6718 	tx_buf = &txr->tx_buf_ring[ring_prod];
6719 	tx_buf->skb = NULL;
6720 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6721 			 skb_headlen(skb), PCI_DMA_TODEVICE);
6722 
6723 	/* unmap remaining mapped pages */
6724 	for (i = 0; i < last_frag; i++) {
6725 		prod = BNX2_NEXT_TX_BD(prod);
6726 		ring_prod = BNX2_TX_RING_IDX(prod);
6727 		tx_buf = &txr->tx_buf_ring[ring_prod];
6728 		dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6729 			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6730 			       PCI_DMA_TODEVICE);
6731 	}
6732 
6733 	dev_kfree_skb_any(skb);
6734 	return NETDEV_TX_OK;
6735 }
6736 
6737 /* Called with rtnl_lock */
6738 static int
6739 bnx2_close(struct net_device *dev)
6740 {
6741 	struct bnx2 *bp = netdev_priv(dev);
6742 
6743 	bnx2_disable_int_sync(bp);
6744 	bnx2_napi_disable(bp);
6745 	netif_tx_disable(dev);
6746 	del_timer_sync(&bp->timer);
6747 	bnx2_shutdown_chip(bp);
6748 	bnx2_free_irq(bp);
6749 	bnx2_free_skbs(bp);
6750 	bnx2_free_mem(bp);
6751 	bnx2_del_napi(bp);
6752 	bp->link_up = 0;
6753 	netif_carrier_off(bp->dev);
6754 	return 0;
6755 }
6756 
6757 static void
6758 bnx2_save_stats(struct bnx2 *bp)
6759 {
6760 	u32 *hw_stats = (u32 *) bp->stats_blk;
6761 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6762 	int i;
6763 
6764 	/* The 1st 10 counters are 64-bit counters */
6765 	for (i = 0; i < 20; i += 2) {
6766 		u32 hi;
6767 		u64 lo;
6768 
6769 		hi = temp_stats[i] + hw_stats[i];
6770 		lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6771 		if (lo > 0xffffffff)
6772 			hi++;
6773 		temp_stats[i] = hi;
6774 		temp_stats[i + 1] = lo & 0xffffffff;
6775 	}
6776 
6777 	for ( ; i < sizeof(struct statistics_block) / 4; i++)
6778 		temp_stats[i] += hw_stats[i];
6779 }
6780 
6781 #define GET_64BIT_NET_STATS64(ctr)		\
6782 	(((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6783 
6784 #define GET_64BIT_NET_STATS(ctr)				\
6785 	GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +		\
6786 	GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6787 
6788 #define GET_32BIT_NET_STATS(ctr)				\
6789 	(unsigned long) (bp->stats_blk->ctr +			\
6790 			 bp->temp_stats_blk->ctr)
6791 
6792 static struct rtnl_link_stats64 *
6793 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6794 {
6795 	struct bnx2 *bp = netdev_priv(dev);
6796 
6797 	if (bp->stats_blk == NULL)
6798 		return net_stats;
6799 
6800 	net_stats->rx_packets =
6801 		GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6802 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6803 		GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6804 
6805 	net_stats->tx_packets =
6806 		GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6807 		GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6808 		GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6809 
6810 	net_stats->rx_bytes =
6811 		GET_64BIT_NET_STATS(stat_IfHCInOctets);
6812 
6813 	net_stats->tx_bytes =
6814 		GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6815 
6816 	net_stats->multicast =
6817 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6818 
6819 	net_stats->collisions =
6820 		GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6821 
6822 	net_stats->rx_length_errors =
6823 		GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6824 		GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6825 
6826 	net_stats->rx_over_errors =
6827 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6828 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6829 
6830 	net_stats->rx_frame_errors =
6831 		GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6832 
6833 	net_stats->rx_crc_errors =
6834 		GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6835 
6836 	net_stats->rx_errors = net_stats->rx_length_errors +
6837 		net_stats->rx_over_errors + net_stats->rx_frame_errors +
6838 		net_stats->rx_crc_errors;
6839 
6840 	net_stats->tx_aborted_errors =
6841 		GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6842 		GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6843 
6844 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6845 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6846 		net_stats->tx_carrier_errors = 0;
6847 	else {
6848 		net_stats->tx_carrier_errors =
6849 			GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6850 	}
6851 
6852 	net_stats->tx_errors =
6853 		GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6854 		net_stats->tx_aborted_errors +
6855 		net_stats->tx_carrier_errors;
6856 
6857 	net_stats->rx_missed_errors =
6858 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6859 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6860 		GET_32BIT_NET_STATS(stat_FwRxDrop);
6861 
6862 	return net_stats;
6863 }
6864 
6865 /* All ethtool functions called with rtnl_lock */
6866 
6867 static int
6868 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6869 {
6870 	struct bnx2 *bp = netdev_priv(dev);
6871 	int support_serdes = 0, support_copper = 0;
6872 
6873 	cmd->supported = SUPPORTED_Autoneg;
6874 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6875 		support_serdes = 1;
6876 		support_copper = 1;
6877 	} else if (bp->phy_port == PORT_FIBRE)
6878 		support_serdes = 1;
6879 	else
6880 		support_copper = 1;
6881 
6882 	if (support_serdes) {
6883 		cmd->supported |= SUPPORTED_1000baseT_Full |
6884 			SUPPORTED_FIBRE;
6885 		if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6886 			cmd->supported |= SUPPORTED_2500baseX_Full;
6887 
6888 	}
6889 	if (support_copper) {
6890 		cmd->supported |= SUPPORTED_10baseT_Half |
6891 			SUPPORTED_10baseT_Full |
6892 			SUPPORTED_100baseT_Half |
6893 			SUPPORTED_100baseT_Full |
6894 			SUPPORTED_1000baseT_Full |
6895 			SUPPORTED_TP;
6896 
6897 	}
6898 
6899 	spin_lock_bh(&bp->phy_lock);
6900 	cmd->port = bp->phy_port;
6901 	cmd->advertising = bp->advertising;
6902 
6903 	if (bp->autoneg & AUTONEG_SPEED) {
6904 		cmd->autoneg = AUTONEG_ENABLE;
6905 	} else {
6906 		cmd->autoneg = AUTONEG_DISABLE;
6907 	}
6908 
6909 	if (netif_carrier_ok(dev)) {
6910 		ethtool_cmd_speed_set(cmd, bp->line_speed);
6911 		cmd->duplex = bp->duplex;
6912 		if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6913 			if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6914 				cmd->eth_tp_mdix = ETH_TP_MDI_X;
6915 			else
6916 				cmd->eth_tp_mdix = ETH_TP_MDI;
6917 		}
6918 	}
6919 	else {
6920 		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
6921 		cmd->duplex = DUPLEX_UNKNOWN;
6922 	}
6923 	spin_unlock_bh(&bp->phy_lock);
6924 
6925 	cmd->transceiver = XCVR_INTERNAL;
6926 	cmd->phy_address = bp->phy_addr;
6927 
6928 	return 0;
6929 }
6930 
6931 static int
6932 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6933 {
6934 	struct bnx2 *bp = netdev_priv(dev);
6935 	u8 autoneg = bp->autoneg;
6936 	u8 req_duplex = bp->req_duplex;
6937 	u16 req_line_speed = bp->req_line_speed;
6938 	u32 advertising = bp->advertising;
6939 	int err = -EINVAL;
6940 
6941 	spin_lock_bh(&bp->phy_lock);
6942 
6943 	if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6944 		goto err_out_unlock;
6945 
6946 	if (cmd->port != bp->phy_port &&
6947 	    !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6948 		goto err_out_unlock;
6949 
6950 	/* If device is down, we can store the settings only if the user
6951 	 * is setting the currently active port.
6952 	 */
6953 	if (!netif_running(dev) && cmd->port != bp->phy_port)
6954 		goto err_out_unlock;
6955 
6956 	if (cmd->autoneg == AUTONEG_ENABLE) {
6957 		autoneg |= AUTONEG_SPEED;
6958 
6959 		advertising = cmd->advertising;
6960 		if (cmd->port == PORT_TP) {
6961 			advertising &= ETHTOOL_ALL_COPPER_SPEED;
6962 			if (!advertising)
6963 				advertising = ETHTOOL_ALL_COPPER_SPEED;
6964 		} else {
6965 			advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6966 			if (!advertising)
6967 				advertising = ETHTOOL_ALL_FIBRE_SPEED;
6968 		}
6969 		advertising |= ADVERTISED_Autoneg;
6970 	}
6971 	else {
6972 		u32 speed = ethtool_cmd_speed(cmd);
6973 		if (cmd->port == PORT_FIBRE) {
6974 			if ((speed != SPEED_1000 &&
6975 			     speed != SPEED_2500) ||
6976 			    (cmd->duplex != DUPLEX_FULL))
6977 				goto err_out_unlock;
6978 
6979 			if (speed == SPEED_2500 &&
6980 			    !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6981 				goto err_out_unlock;
6982 		} else if (speed == SPEED_1000 || speed == SPEED_2500)
6983 			goto err_out_unlock;
6984 
6985 		autoneg &= ~AUTONEG_SPEED;
6986 		req_line_speed = speed;
6987 		req_duplex = cmd->duplex;
6988 		advertising = 0;
6989 	}
6990 
6991 	bp->autoneg = autoneg;
6992 	bp->advertising = advertising;
6993 	bp->req_line_speed = req_line_speed;
6994 	bp->req_duplex = req_duplex;
6995 
6996 	err = 0;
6997 	/* If device is down, the new settings will be picked up when it is
6998 	 * brought up.
6999 	 */
7000 	if (netif_running(dev))
7001 		err = bnx2_setup_phy(bp, cmd->port);
7002 
7003 err_out_unlock:
7004 	spin_unlock_bh(&bp->phy_lock);
7005 
7006 	return err;
7007 }
7008 
7009 static void
7010 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7011 {
7012 	struct bnx2 *bp = netdev_priv(dev);
7013 
7014 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7015 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
7016 	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7017 	strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7018 }
7019 
7020 #define BNX2_REGDUMP_LEN		(32 * 1024)
7021 
7022 static int
7023 bnx2_get_regs_len(struct net_device *dev)
7024 {
7025 	return BNX2_REGDUMP_LEN;
7026 }
7027 
7028 static void
7029 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7030 {
7031 	u32 *p = _p, i, offset;
7032 	u8 *orig_p = _p;
7033 	struct bnx2 *bp = netdev_priv(dev);
7034 	static const u32 reg_boundaries[] = {
7035 		0x0000, 0x0098, 0x0400, 0x045c,
7036 		0x0800, 0x0880, 0x0c00, 0x0c10,
7037 		0x0c30, 0x0d08, 0x1000, 0x101c,
7038 		0x1040, 0x1048, 0x1080, 0x10a4,
7039 		0x1400, 0x1490, 0x1498, 0x14f0,
7040 		0x1500, 0x155c, 0x1580, 0x15dc,
7041 		0x1600, 0x1658, 0x1680, 0x16d8,
7042 		0x1800, 0x1820, 0x1840, 0x1854,
7043 		0x1880, 0x1894, 0x1900, 0x1984,
7044 		0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7045 		0x1c80, 0x1c94, 0x1d00, 0x1d84,
7046 		0x2000, 0x2030, 0x23c0, 0x2400,
7047 		0x2800, 0x2820, 0x2830, 0x2850,
7048 		0x2b40, 0x2c10, 0x2fc0, 0x3058,
7049 		0x3c00, 0x3c94, 0x4000, 0x4010,
7050 		0x4080, 0x4090, 0x43c0, 0x4458,
7051 		0x4c00, 0x4c18, 0x4c40, 0x4c54,
7052 		0x4fc0, 0x5010, 0x53c0, 0x5444,
7053 		0x5c00, 0x5c18, 0x5c80, 0x5c90,
7054 		0x5fc0, 0x6000, 0x6400, 0x6428,
7055 		0x6800, 0x6848, 0x684c, 0x6860,
7056 		0x6888, 0x6910, 0x8000
7057 	};
7058 
7059 	regs->version = 0;
7060 
7061 	memset(p, 0, BNX2_REGDUMP_LEN);
7062 
7063 	if (!netif_running(bp->dev))
7064 		return;
7065 
7066 	i = 0;
7067 	offset = reg_boundaries[0];
7068 	p += offset;
7069 	while (offset < BNX2_REGDUMP_LEN) {
7070 		*p++ = BNX2_RD(bp, offset);
7071 		offset += 4;
7072 		if (offset == reg_boundaries[i + 1]) {
7073 			offset = reg_boundaries[i + 2];
7074 			p = (u32 *) (orig_p + offset);
7075 			i += 2;
7076 		}
7077 	}
7078 }
7079 
7080 static void
7081 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7082 {
7083 	struct bnx2 *bp = netdev_priv(dev);
7084 
7085 	if (bp->flags & BNX2_FLAG_NO_WOL) {
7086 		wol->supported = 0;
7087 		wol->wolopts = 0;
7088 	}
7089 	else {
7090 		wol->supported = WAKE_MAGIC;
7091 		if (bp->wol)
7092 			wol->wolopts = WAKE_MAGIC;
7093 		else
7094 			wol->wolopts = 0;
7095 	}
7096 	memset(&wol->sopass, 0, sizeof(wol->sopass));
7097 }
7098 
7099 static int
7100 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7101 {
7102 	struct bnx2 *bp = netdev_priv(dev);
7103 
7104 	if (wol->wolopts & ~WAKE_MAGIC)
7105 		return -EINVAL;
7106 
7107 	if (wol->wolopts & WAKE_MAGIC) {
7108 		if (bp->flags & BNX2_FLAG_NO_WOL)
7109 			return -EINVAL;
7110 
7111 		bp->wol = 1;
7112 	}
7113 	else {
7114 		bp->wol = 0;
7115 	}
7116 
7117 	device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7118 
7119 	return 0;
7120 }
7121 
7122 static int
7123 bnx2_nway_reset(struct net_device *dev)
7124 {
7125 	struct bnx2 *bp = netdev_priv(dev);
7126 	u32 bmcr;
7127 
7128 	if (!netif_running(dev))
7129 		return -EAGAIN;
7130 
7131 	if (!(bp->autoneg & AUTONEG_SPEED)) {
7132 		return -EINVAL;
7133 	}
7134 
7135 	spin_lock_bh(&bp->phy_lock);
7136 
7137 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7138 		int rc;
7139 
7140 		rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7141 		spin_unlock_bh(&bp->phy_lock);
7142 		return rc;
7143 	}
7144 
7145 	/* Force a link down visible on the other side */
7146 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7147 		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7148 		spin_unlock_bh(&bp->phy_lock);
7149 
7150 		msleep(20);
7151 
7152 		spin_lock_bh(&bp->phy_lock);
7153 
7154 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7155 		bp->serdes_an_pending = 1;
7156 		mod_timer(&bp->timer, jiffies + bp->current_interval);
7157 	}
7158 
7159 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7160 	bmcr &= ~BMCR_LOOPBACK;
7161 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7162 
7163 	spin_unlock_bh(&bp->phy_lock);
7164 
7165 	return 0;
7166 }
7167 
7168 static u32
7169 bnx2_get_link(struct net_device *dev)
7170 {
7171 	struct bnx2 *bp = netdev_priv(dev);
7172 
7173 	return bp->link_up;
7174 }
7175 
7176 static int
7177 bnx2_get_eeprom_len(struct net_device *dev)
7178 {
7179 	struct bnx2 *bp = netdev_priv(dev);
7180 
7181 	if (bp->flash_info == NULL)
7182 		return 0;
7183 
7184 	return (int) bp->flash_size;
7185 }
7186 
7187 static int
7188 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7189 		u8 *eebuf)
7190 {
7191 	struct bnx2 *bp = netdev_priv(dev);
7192 	int rc;
7193 
7194 	/* parameters already validated in ethtool_get_eeprom */
7195 
7196 	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7197 
7198 	return rc;
7199 }
7200 
7201 static int
7202 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7203 		u8 *eebuf)
7204 {
7205 	struct bnx2 *bp = netdev_priv(dev);
7206 	int rc;
7207 
7208 	/* parameters already validated in ethtool_set_eeprom */
7209 
7210 	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7211 
7212 	return rc;
7213 }
7214 
7215 static int
7216 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7217 {
7218 	struct bnx2 *bp = netdev_priv(dev);
7219 
7220 	memset(coal, 0, sizeof(struct ethtool_coalesce));
7221 
7222 	coal->rx_coalesce_usecs = bp->rx_ticks;
7223 	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7224 	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7225 	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7226 
7227 	coal->tx_coalesce_usecs = bp->tx_ticks;
7228 	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7229 	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7230 	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7231 
7232 	coal->stats_block_coalesce_usecs = bp->stats_ticks;
7233 
7234 	return 0;
7235 }
7236 
7237 static int
7238 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7239 {
7240 	struct bnx2 *bp = netdev_priv(dev);
7241 
7242 	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7243 	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7244 
7245 	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7246 	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7247 
7248 	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7249 	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7250 
7251 	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7252 	if (bp->rx_quick_cons_trip_int > 0xff)
7253 		bp->rx_quick_cons_trip_int = 0xff;
7254 
7255 	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7256 	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7257 
7258 	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7259 	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7260 
7261 	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7262 	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7263 
7264 	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7265 	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7266 		0xff;
7267 
7268 	bp->stats_ticks = coal->stats_block_coalesce_usecs;
7269 	if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7270 		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7271 			bp->stats_ticks = USEC_PER_SEC;
7272 	}
7273 	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7274 		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7275 	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7276 
7277 	if (netif_running(bp->dev)) {
7278 		bnx2_netif_stop(bp, true);
7279 		bnx2_init_nic(bp, 0);
7280 		bnx2_netif_start(bp, true);
7281 	}
7282 
7283 	return 0;
7284 }
7285 
7286 static void
7287 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7288 {
7289 	struct bnx2 *bp = netdev_priv(dev);
7290 
7291 	ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7292 	ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7293 
7294 	ering->rx_pending = bp->rx_ring_size;
7295 	ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7296 
7297 	ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7298 	ering->tx_pending = bp->tx_ring_size;
7299 }
7300 
7301 static int
7302 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7303 {
7304 	if (netif_running(bp->dev)) {
7305 		/* Reset will erase chipset stats; save them */
7306 		bnx2_save_stats(bp);
7307 
7308 		bnx2_netif_stop(bp, true);
7309 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7310 		if (reset_irq) {
7311 			bnx2_free_irq(bp);
7312 			bnx2_del_napi(bp);
7313 		} else {
7314 			__bnx2_free_irq(bp);
7315 		}
7316 		bnx2_free_skbs(bp);
7317 		bnx2_free_mem(bp);
7318 	}
7319 
7320 	bnx2_set_rx_ring_size(bp, rx);
7321 	bp->tx_ring_size = tx;
7322 
7323 	if (netif_running(bp->dev)) {
7324 		int rc = 0;
7325 
7326 		if (reset_irq) {
7327 			rc = bnx2_setup_int_mode(bp, disable_msi);
7328 			bnx2_init_napi(bp);
7329 		}
7330 
7331 		if (!rc)
7332 			rc = bnx2_alloc_mem(bp);
7333 
7334 		if (!rc)
7335 			rc = bnx2_request_irq(bp);
7336 
7337 		if (!rc)
7338 			rc = bnx2_init_nic(bp, 0);
7339 
7340 		if (rc) {
7341 			bnx2_napi_enable(bp);
7342 			dev_close(bp->dev);
7343 			return rc;
7344 		}
7345 #ifdef BCM_CNIC
7346 		mutex_lock(&bp->cnic_lock);
7347 		/* Let cnic know about the new status block. */
7348 		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7349 			bnx2_setup_cnic_irq_info(bp);
7350 		mutex_unlock(&bp->cnic_lock);
7351 #endif
7352 		bnx2_netif_start(bp, true);
7353 	}
7354 	return 0;
7355 }
7356 
7357 static int
7358 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7359 {
7360 	struct bnx2 *bp = netdev_priv(dev);
7361 	int rc;
7362 
7363 	if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7364 		(ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7365 		(ering->tx_pending <= MAX_SKB_FRAGS)) {
7366 
7367 		return -EINVAL;
7368 	}
7369 	rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7370 				   false);
7371 	return rc;
7372 }
7373 
7374 static void
7375 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7376 {
7377 	struct bnx2 *bp = netdev_priv(dev);
7378 
7379 	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7380 	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7381 	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7382 }
7383 
7384 static int
7385 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7386 {
7387 	struct bnx2 *bp = netdev_priv(dev);
7388 
7389 	bp->req_flow_ctrl = 0;
7390 	if (epause->rx_pause)
7391 		bp->req_flow_ctrl |= FLOW_CTRL_RX;
7392 	if (epause->tx_pause)
7393 		bp->req_flow_ctrl |= FLOW_CTRL_TX;
7394 
7395 	if (epause->autoneg) {
7396 		bp->autoneg |= AUTONEG_FLOW_CTRL;
7397 	}
7398 	else {
7399 		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7400 	}
7401 
7402 	if (netif_running(dev)) {
7403 		spin_lock_bh(&bp->phy_lock);
7404 		bnx2_setup_phy(bp, bp->phy_port);
7405 		spin_unlock_bh(&bp->phy_lock);
7406 	}
7407 
7408 	return 0;
7409 }
7410 
7411 static struct {
7412 	char string[ETH_GSTRING_LEN];
7413 } bnx2_stats_str_arr[] = {
7414 	{ "rx_bytes" },
7415 	{ "rx_error_bytes" },
7416 	{ "tx_bytes" },
7417 	{ "tx_error_bytes" },
7418 	{ "rx_ucast_packets" },
7419 	{ "rx_mcast_packets" },
7420 	{ "rx_bcast_packets" },
7421 	{ "tx_ucast_packets" },
7422 	{ "tx_mcast_packets" },
7423 	{ "tx_bcast_packets" },
7424 	{ "tx_mac_errors" },
7425 	{ "tx_carrier_errors" },
7426 	{ "rx_crc_errors" },
7427 	{ "rx_align_errors" },
7428 	{ "tx_single_collisions" },
7429 	{ "tx_multi_collisions" },
7430 	{ "tx_deferred" },
7431 	{ "tx_excess_collisions" },
7432 	{ "tx_late_collisions" },
7433 	{ "tx_total_collisions" },
7434 	{ "rx_fragments" },
7435 	{ "rx_jabbers" },
7436 	{ "rx_undersize_packets" },
7437 	{ "rx_oversize_packets" },
7438 	{ "rx_64_byte_packets" },
7439 	{ "rx_65_to_127_byte_packets" },
7440 	{ "rx_128_to_255_byte_packets" },
7441 	{ "rx_256_to_511_byte_packets" },
7442 	{ "rx_512_to_1023_byte_packets" },
7443 	{ "rx_1024_to_1522_byte_packets" },
7444 	{ "rx_1523_to_9022_byte_packets" },
7445 	{ "tx_64_byte_packets" },
7446 	{ "tx_65_to_127_byte_packets" },
7447 	{ "tx_128_to_255_byte_packets" },
7448 	{ "tx_256_to_511_byte_packets" },
7449 	{ "tx_512_to_1023_byte_packets" },
7450 	{ "tx_1024_to_1522_byte_packets" },
7451 	{ "tx_1523_to_9022_byte_packets" },
7452 	{ "rx_xon_frames" },
7453 	{ "rx_xoff_frames" },
7454 	{ "tx_xon_frames" },
7455 	{ "tx_xoff_frames" },
7456 	{ "rx_mac_ctrl_frames" },
7457 	{ "rx_filtered_packets" },
7458 	{ "rx_ftq_discards" },
7459 	{ "rx_discards" },
7460 	{ "rx_fw_discards" },
7461 };
7462 
7463 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7464 
7465 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7466 
7467 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7468     STATS_OFFSET32(stat_IfHCInOctets_hi),
7469     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7470     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7471     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7472     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7473     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7474     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7475     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7476     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7477     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7478     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7479     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7480     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7481     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7482     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7483     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7484     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7485     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7486     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7487     STATS_OFFSET32(stat_EtherStatsCollisions),
7488     STATS_OFFSET32(stat_EtherStatsFragments),
7489     STATS_OFFSET32(stat_EtherStatsJabbers),
7490     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7491     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7492     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7493     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7494     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7495     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7496     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7497     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7498     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7499     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7500     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7501     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7502     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7503     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7504     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7505     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7506     STATS_OFFSET32(stat_XonPauseFramesReceived),
7507     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7508     STATS_OFFSET32(stat_OutXonSent),
7509     STATS_OFFSET32(stat_OutXoffSent),
7510     STATS_OFFSET32(stat_MacControlFramesReceived),
7511     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7512     STATS_OFFSET32(stat_IfInFTQDiscards),
7513     STATS_OFFSET32(stat_IfInMBUFDiscards),
7514     STATS_OFFSET32(stat_FwRxDrop),
7515 };
7516 
7517 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7518  * skipped because of errata.
7519  */
7520 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7521 	8,0,8,8,8,8,8,8,8,8,
7522 	4,0,4,4,4,4,4,4,4,4,
7523 	4,4,4,4,4,4,4,4,4,4,
7524 	4,4,4,4,4,4,4,4,4,4,
7525 	4,4,4,4,4,4,4,
7526 };
7527 
7528 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7529 	8,0,8,8,8,8,8,8,8,8,
7530 	4,4,4,4,4,4,4,4,4,4,
7531 	4,4,4,4,4,4,4,4,4,4,
7532 	4,4,4,4,4,4,4,4,4,4,
7533 	4,4,4,4,4,4,4,
7534 };
7535 
7536 #define BNX2_NUM_TESTS 6
7537 
7538 static struct {
7539 	char string[ETH_GSTRING_LEN];
7540 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7541 	{ "register_test (offline)" },
7542 	{ "memory_test (offline)" },
7543 	{ "loopback_test (offline)" },
7544 	{ "nvram_test (online)" },
7545 	{ "interrupt_test (online)" },
7546 	{ "link_test (online)" },
7547 };
7548 
7549 static int
7550 bnx2_get_sset_count(struct net_device *dev, int sset)
7551 {
7552 	switch (sset) {
7553 	case ETH_SS_TEST:
7554 		return BNX2_NUM_TESTS;
7555 	case ETH_SS_STATS:
7556 		return BNX2_NUM_STATS;
7557 	default:
7558 		return -EOPNOTSUPP;
7559 	}
7560 }
7561 
7562 static void
7563 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7564 {
7565 	struct bnx2 *bp = netdev_priv(dev);
7566 
7567 	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7568 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
7569 		int i;
7570 
7571 		bnx2_netif_stop(bp, true);
7572 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7573 		bnx2_free_skbs(bp);
7574 
7575 		if (bnx2_test_registers(bp) != 0) {
7576 			buf[0] = 1;
7577 			etest->flags |= ETH_TEST_FL_FAILED;
7578 		}
7579 		if (bnx2_test_memory(bp) != 0) {
7580 			buf[1] = 1;
7581 			etest->flags |= ETH_TEST_FL_FAILED;
7582 		}
7583 		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7584 			etest->flags |= ETH_TEST_FL_FAILED;
7585 
7586 		if (!netif_running(bp->dev))
7587 			bnx2_shutdown_chip(bp);
7588 		else {
7589 			bnx2_init_nic(bp, 1);
7590 			bnx2_netif_start(bp, true);
7591 		}
7592 
7593 		/* wait for link up */
7594 		for (i = 0; i < 7; i++) {
7595 			if (bp->link_up)
7596 				break;
7597 			msleep_interruptible(1000);
7598 		}
7599 	}
7600 
7601 	if (bnx2_test_nvram(bp) != 0) {
7602 		buf[3] = 1;
7603 		etest->flags |= ETH_TEST_FL_FAILED;
7604 	}
7605 	if (bnx2_test_intr(bp) != 0) {
7606 		buf[4] = 1;
7607 		etest->flags |= ETH_TEST_FL_FAILED;
7608 	}
7609 
7610 	if (bnx2_test_link(bp) != 0) {
7611 		buf[5] = 1;
7612 		etest->flags |= ETH_TEST_FL_FAILED;
7613 
7614 	}
7615 }
7616 
7617 static void
7618 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7619 {
7620 	switch (stringset) {
7621 	case ETH_SS_STATS:
7622 		memcpy(buf, bnx2_stats_str_arr,
7623 			sizeof(bnx2_stats_str_arr));
7624 		break;
7625 	case ETH_SS_TEST:
7626 		memcpy(buf, bnx2_tests_str_arr,
7627 			sizeof(bnx2_tests_str_arr));
7628 		break;
7629 	}
7630 }
7631 
7632 static void
7633 bnx2_get_ethtool_stats(struct net_device *dev,
7634 		struct ethtool_stats *stats, u64 *buf)
7635 {
7636 	struct bnx2 *bp = netdev_priv(dev);
7637 	int i;
7638 	u32 *hw_stats = (u32 *) bp->stats_blk;
7639 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7640 	u8 *stats_len_arr = NULL;
7641 
7642 	if (hw_stats == NULL) {
7643 		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7644 		return;
7645 	}
7646 
7647 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7648 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7649 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7650 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7651 		stats_len_arr = bnx2_5706_stats_len_arr;
7652 	else
7653 		stats_len_arr = bnx2_5708_stats_len_arr;
7654 
7655 	for (i = 0; i < BNX2_NUM_STATS; i++) {
7656 		unsigned long offset;
7657 
7658 		if (stats_len_arr[i] == 0) {
7659 			/* skip this counter */
7660 			buf[i] = 0;
7661 			continue;
7662 		}
7663 
7664 		offset = bnx2_stats_offset_arr[i];
7665 		if (stats_len_arr[i] == 4) {
7666 			/* 4-byte counter */
7667 			buf[i] = (u64) *(hw_stats + offset) +
7668 				 *(temp_stats + offset);
7669 			continue;
7670 		}
7671 		/* 8-byte counter */
7672 		buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7673 			 *(hw_stats + offset + 1) +
7674 			 (((u64) *(temp_stats + offset)) << 32) +
7675 			 *(temp_stats + offset + 1);
7676 	}
7677 }
7678 
7679 static int
7680 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7681 {
7682 	struct bnx2 *bp = netdev_priv(dev);
7683 
7684 	switch (state) {
7685 	case ETHTOOL_ID_ACTIVE:
7686 		bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7687 		BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7688 		return 1;	/* cycle on/off once per second */
7689 
7690 	case ETHTOOL_ID_ON:
7691 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7692 			BNX2_EMAC_LED_1000MB_OVERRIDE |
7693 			BNX2_EMAC_LED_100MB_OVERRIDE |
7694 			BNX2_EMAC_LED_10MB_OVERRIDE |
7695 			BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7696 			BNX2_EMAC_LED_TRAFFIC);
7697 		break;
7698 
7699 	case ETHTOOL_ID_OFF:
7700 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7701 		break;
7702 
7703 	case ETHTOOL_ID_INACTIVE:
7704 		BNX2_WR(bp, BNX2_EMAC_LED, 0);
7705 		BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7706 		break;
7707 	}
7708 
7709 	return 0;
7710 }
7711 
7712 static netdev_features_t
7713 bnx2_fix_features(struct net_device *dev, netdev_features_t features)
7714 {
7715 	struct bnx2 *bp = netdev_priv(dev);
7716 
7717 	if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7718 		features |= NETIF_F_HW_VLAN_CTAG_RX;
7719 
7720 	return features;
7721 }
7722 
7723 static int
7724 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7725 {
7726 	struct bnx2 *bp = netdev_priv(dev);
7727 
7728 	/* TSO with VLAN tag won't work with current firmware */
7729 	if (features & NETIF_F_HW_VLAN_CTAG_TX)
7730 		dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7731 	else
7732 		dev->vlan_features &= ~NETIF_F_ALL_TSO;
7733 
7734 	if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7735 	    !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7736 	    netif_running(dev)) {
7737 		bnx2_netif_stop(bp, false);
7738 		dev->features = features;
7739 		bnx2_set_rx_mode(dev);
7740 		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7741 		bnx2_netif_start(bp, false);
7742 		return 1;
7743 	}
7744 
7745 	return 0;
7746 }
7747 
7748 static void bnx2_get_channels(struct net_device *dev,
7749 			      struct ethtool_channels *channels)
7750 {
7751 	struct bnx2 *bp = netdev_priv(dev);
7752 	u32 max_rx_rings = 1;
7753 	u32 max_tx_rings = 1;
7754 
7755 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7756 		max_rx_rings = RX_MAX_RINGS;
7757 		max_tx_rings = TX_MAX_RINGS;
7758 	}
7759 
7760 	channels->max_rx = max_rx_rings;
7761 	channels->max_tx = max_tx_rings;
7762 	channels->max_other = 0;
7763 	channels->max_combined = 0;
7764 	channels->rx_count = bp->num_rx_rings;
7765 	channels->tx_count = bp->num_tx_rings;
7766 	channels->other_count = 0;
7767 	channels->combined_count = 0;
7768 }
7769 
7770 static int bnx2_set_channels(struct net_device *dev,
7771 			      struct ethtool_channels *channels)
7772 {
7773 	struct bnx2 *bp = netdev_priv(dev);
7774 	u32 max_rx_rings = 1;
7775 	u32 max_tx_rings = 1;
7776 	int rc = 0;
7777 
7778 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7779 		max_rx_rings = RX_MAX_RINGS;
7780 		max_tx_rings = TX_MAX_RINGS;
7781 	}
7782 	if (channels->rx_count > max_rx_rings ||
7783 	    channels->tx_count > max_tx_rings)
7784 		return -EINVAL;
7785 
7786 	bp->num_req_rx_rings = channels->rx_count;
7787 	bp->num_req_tx_rings = channels->tx_count;
7788 
7789 	if (netif_running(dev))
7790 		rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7791 					   bp->tx_ring_size, true);
7792 
7793 	return rc;
7794 }
7795 
7796 static const struct ethtool_ops bnx2_ethtool_ops = {
7797 	.get_settings		= bnx2_get_settings,
7798 	.set_settings		= bnx2_set_settings,
7799 	.get_drvinfo		= bnx2_get_drvinfo,
7800 	.get_regs_len		= bnx2_get_regs_len,
7801 	.get_regs		= bnx2_get_regs,
7802 	.get_wol		= bnx2_get_wol,
7803 	.set_wol		= bnx2_set_wol,
7804 	.nway_reset		= bnx2_nway_reset,
7805 	.get_link		= bnx2_get_link,
7806 	.get_eeprom_len		= bnx2_get_eeprom_len,
7807 	.get_eeprom		= bnx2_get_eeprom,
7808 	.set_eeprom		= bnx2_set_eeprom,
7809 	.get_coalesce		= bnx2_get_coalesce,
7810 	.set_coalesce		= bnx2_set_coalesce,
7811 	.get_ringparam		= bnx2_get_ringparam,
7812 	.set_ringparam		= bnx2_set_ringparam,
7813 	.get_pauseparam		= bnx2_get_pauseparam,
7814 	.set_pauseparam		= bnx2_set_pauseparam,
7815 	.self_test		= bnx2_self_test,
7816 	.get_strings		= bnx2_get_strings,
7817 	.set_phys_id		= bnx2_set_phys_id,
7818 	.get_ethtool_stats	= bnx2_get_ethtool_stats,
7819 	.get_sset_count		= bnx2_get_sset_count,
7820 	.get_channels		= bnx2_get_channels,
7821 	.set_channels		= bnx2_set_channels,
7822 };
7823 
7824 /* Called with rtnl_lock */
7825 static int
7826 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7827 {
7828 	struct mii_ioctl_data *data = if_mii(ifr);
7829 	struct bnx2 *bp = netdev_priv(dev);
7830 	int err;
7831 
7832 	switch(cmd) {
7833 	case SIOCGMIIPHY:
7834 		data->phy_id = bp->phy_addr;
7835 
7836 		/* fallthru */
7837 	case SIOCGMIIREG: {
7838 		u32 mii_regval;
7839 
7840 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7841 			return -EOPNOTSUPP;
7842 
7843 		if (!netif_running(dev))
7844 			return -EAGAIN;
7845 
7846 		spin_lock_bh(&bp->phy_lock);
7847 		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7848 		spin_unlock_bh(&bp->phy_lock);
7849 
7850 		data->val_out = mii_regval;
7851 
7852 		return err;
7853 	}
7854 
7855 	case SIOCSMIIREG:
7856 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7857 			return -EOPNOTSUPP;
7858 
7859 		if (!netif_running(dev))
7860 			return -EAGAIN;
7861 
7862 		spin_lock_bh(&bp->phy_lock);
7863 		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7864 		spin_unlock_bh(&bp->phy_lock);
7865 
7866 		return err;
7867 
7868 	default:
7869 		/* do nothing */
7870 		break;
7871 	}
7872 	return -EOPNOTSUPP;
7873 }
7874 
7875 /* Called with rtnl_lock */
7876 static int
7877 bnx2_change_mac_addr(struct net_device *dev, void *p)
7878 {
7879 	struct sockaddr *addr = p;
7880 	struct bnx2 *bp = netdev_priv(dev);
7881 
7882 	if (!is_valid_ether_addr(addr->sa_data))
7883 		return -EADDRNOTAVAIL;
7884 
7885 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7886 	if (netif_running(dev))
7887 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7888 
7889 	return 0;
7890 }
7891 
7892 /* Called with rtnl_lock */
7893 static int
7894 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7895 {
7896 	struct bnx2 *bp = netdev_priv(dev);
7897 
7898 	if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7899 		((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7900 		return -EINVAL;
7901 
7902 	dev->mtu = new_mtu;
7903 	return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7904 				     false);
7905 }
7906 
7907 #ifdef CONFIG_NET_POLL_CONTROLLER
7908 static void
7909 poll_bnx2(struct net_device *dev)
7910 {
7911 	struct bnx2 *bp = netdev_priv(dev);
7912 	int i;
7913 
7914 	for (i = 0; i < bp->irq_nvecs; i++) {
7915 		struct bnx2_irq *irq = &bp->irq_tbl[i];
7916 
7917 		disable_irq(irq->vector);
7918 		irq->handler(irq->vector, &bp->bnx2_napi[i]);
7919 		enable_irq(irq->vector);
7920 	}
7921 }
7922 #endif
7923 
7924 static void
7925 bnx2_get_5709_media(struct bnx2 *bp)
7926 {
7927 	u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7928 	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7929 	u32 strap;
7930 
7931 	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7932 		return;
7933 	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7934 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7935 		return;
7936 	}
7937 
7938 	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7939 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7940 	else
7941 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7942 
7943 	if (bp->func == 0) {
7944 		switch (strap) {
7945 		case 0x4:
7946 		case 0x5:
7947 		case 0x6:
7948 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7949 			return;
7950 		}
7951 	} else {
7952 		switch (strap) {
7953 		case 0x1:
7954 		case 0x2:
7955 		case 0x4:
7956 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7957 			return;
7958 		}
7959 	}
7960 }
7961 
7962 static void
7963 bnx2_get_pci_speed(struct bnx2 *bp)
7964 {
7965 	u32 reg;
7966 
7967 	reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7968 	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7969 		u32 clkreg;
7970 
7971 		bp->flags |= BNX2_FLAG_PCIX;
7972 
7973 		clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7974 
7975 		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7976 		switch (clkreg) {
7977 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7978 			bp->bus_speed_mhz = 133;
7979 			break;
7980 
7981 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7982 			bp->bus_speed_mhz = 100;
7983 			break;
7984 
7985 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7986 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7987 			bp->bus_speed_mhz = 66;
7988 			break;
7989 
7990 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7991 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7992 			bp->bus_speed_mhz = 50;
7993 			break;
7994 
7995 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7996 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7997 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7998 			bp->bus_speed_mhz = 33;
7999 			break;
8000 		}
8001 	}
8002 	else {
8003 		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
8004 			bp->bus_speed_mhz = 66;
8005 		else
8006 			bp->bus_speed_mhz = 33;
8007 	}
8008 
8009 	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
8010 		bp->flags |= BNX2_FLAG_PCI_32BIT;
8011 
8012 }
8013 
8014 static void
8015 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8016 {
8017 	int rc, i, j;
8018 	u8 *data;
8019 	unsigned int block_end, rosize, len;
8020 
8021 #define BNX2_VPD_NVRAM_OFFSET	0x300
8022 #define BNX2_VPD_LEN		128
8023 #define BNX2_MAX_VER_SLEN	30
8024 
8025 	data = kmalloc(256, GFP_KERNEL);
8026 	if (!data)
8027 		return;
8028 
8029 	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
8030 			     BNX2_VPD_LEN);
8031 	if (rc)
8032 		goto vpd_done;
8033 
8034 	for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8035 		data[i] = data[i + BNX2_VPD_LEN + 3];
8036 		data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8037 		data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8038 		data[i + 3] = data[i + BNX2_VPD_LEN];
8039 	}
8040 
8041 	i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8042 	if (i < 0)
8043 		goto vpd_done;
8044 
8045 	rosize = pci_vpd_lrdt_size(&data[i]);
8046 	i += PCI_VPD_LRDT_TAG_SIZE;
8047 	block_end = i + rosize;
8048 
8049 	if (block_end > BNX2_VPD_LEN)
8050 		goto vpd_done;
8051 
8052 	j = pci_vpd_find_info_keyword(data, i, rosize,
8053 				      PCI_VPD_RO_KEYWORD_MFR_ID);
8054 	if (j < 0)
8055 		goto vpd_done;
8056 
8057 	len = pci_vpd_info_field_size(&data[j]);
8058 
8059 	j += PCI_VPD_INFO_FLD_HDR_SIZE;
8060 	if (j + len > block_end || len != 4 ||
8061 	    memcmp(&data[j], "1028", 4))
8062 		goto vpd_done;
8063 
8064 	j = pci_vpd_find_info_keyword(data, i, rosize,
8065 				      PCI_VPD_RO_KEYWORD_VENDOR0);
8066 	if (j < 0)
8067 		goto vpd_done;
8068 
8069 	len = pci_vpd_info_field_size(&data[j]);
8070 
8071 	j += PCI_VPD_INFO_FLD_HDR_SIZE;
8072 	if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8073 		goto vpd_done;
8074 
8075 	memcpy(bp->fw_version, &data[j], len);
8076 	bp->fw_version[len] = ' ';
8077 
8078 vpd_done:
8079 	kfree(data);
8080 }
8081 
8082 static int
8083 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8084 {
8085 	struct bnx2 *bp;
8086 	int rc, i, j;
8087 	u32 reg;
8088 	u64 dma_mask, persist_dma_mask;
8089 	int err;
8090 
8091 	SET_NETDEV_DEV(dev, &pdev->dev);
8092 	bp = netdev_priv(dev);
8093 
8094 	bp->flags = 0;
8095 	bp->phy_flags = 0;
8096 
8097 	bp->temp_stats_blk =
8098 		kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8099 
8100 	if (bp->temp_stats_blk == NULL) {
8101 		rc = -ENOMEM;
8102 		goto err_out;
8103 	}
8104 
8105 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
8106 	rc = pci_enable_device(pdev);
8107 	if (rc) {
8108 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8109 		goto err_out;
8110 	}
8111 
8112 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8113 		dev_err(&pdev->dev,
8114 			"Cannot find PCI device base address, aborting\n");
8115 		rc = -ENODEV;
8116 		goto err_out_disable;
8117 	}
8118 
8119 	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8120 	if (rc) {
8121 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8122 		goto err_out_disable;
8123 	}
8124 
8125 	pci_set_master(pdev);
8126 
8127 	bp->pm_cap = pdev->pm_cap;
8128 	if (bp->pm_cap == 0) {
8129 		dev_err(&pdev->dev,
8130 			"Cannot find power management capability, aborting\n");
8131 		rc = -EIO;
8132 		goto err_out_release;
8133 	}
8134 
8135 	bp->dev = dev;
8136 	bp->pdev = pdev;
8137 
8138 	spin_lock_init(&bp->phy_lock);
8139 	spin_lock_init(&bp->indirect_lock);
8140 #ifdef BCM_CNIC
8141 	mutex_init(&bp->cnic_lock);
8142 #endif
8143 	INIT_WORK(&bp->reset_task, bnx2_reset_task);
8144 
8145 	bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8146 							 TX_MAX_TSS_RINGS + 1));
8147 	if (!bp->regview) {
8148 		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8149 		rc = -ENOMEM;
8150 		goto err_out_release;
8151 	}
8152 
8153 	/* Configure byte swap and enable write to the reg_window registers.
8154 	 * Rely on CPU to do target byte swapping on big endian systems
8155 	 * The chip's target access swapping will not swap all accesses
8156 	 */
8157 	BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8158 		BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8159 		BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8160 
8161 	bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8162 
8163 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8164 		if (!pci_is_pcie(pdev)) {
8165 			dev_err(&pdev->dev, "Not PCIE, aborting\n");
8166 			rc = -EIO;
8167 			goto err_out_unmap;
8168 		}
8169 		bp->flags |= BNX2_FLAG_PCIE;
8170 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8171 			bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8172 
8173 		/* AER (Advanced Error Reporting) hooks */
8174 		err = pci_enable_pcie_error_reporting(pdev);
8175 		if (!err)
8176 			bp->flags |= BNX2_FLAG_AER_ENABLED;
8177 
8178 	} else {
8179 		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8180 		if (bp->pcix_cap == 0) {
8181 			dev_err(&pdev->dev,
8182 				"Cannot find PCIX capability, aborting\n");
8183 			rc = -EIO;
8184 			goto err_out_unmap;
8185 		}
8186 		bp->flags |= BNX2_FLAG_BROKEN_STATS;
8187 	}
8188 
8189 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8190 	    BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8191 		if (pdev->msix_cap)
8192 			bp->flags |= BNX2_FLAG_MSIX_CAP;
8193 	}
8194 
8195 	if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8196 	    BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8197 		if (pdev->msi_cap)
8198 			bp->flags |= BNX2_FLAG_MSI_CAP;
8199 	}
8200 
8201 	/* 5708 cannot support DMA addresses > 40-bit.  */
8202 	if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8203 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8204 	else
8205 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8206 
8207 	/* Configure DMA attributes. */
8208 	if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8209 		dev->features |= NETIF_F_HIGHDMA;
8210 		rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8211 		if (rc) {
8212 			dev_err(&pdev->dev,
8213 				"pci_set_consistent_dma_mask failed, aborting\n");
8214 			goto err_out_unmap;
8215 		}
8216 	} else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8217 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8218 		goto err_out_unmap;
8219 	}
8220 
8221 	if (!(bp->flags & BNX2_FLAG_PCIE))
8222 		bnx2_get_pci_speed(bp);
8223 
8224 	/* 5706A0 may falsely detect SERR and PERR. */
8225 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8226 		reg = BNX2_RD(bp, PCI_COMMAND);
8227 		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8228 		BNX2_WR(bp, PCI_COMMAND, reg);
8229 	} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8230 		!(bp->flags & BNX2_FLAG_PCIX)) {
8231 
8232 		dev_err(&pdev->dev,
8233 			"5706 A1 can only be used in a PCIX bus, aborting\n");
8234 		goto err_out_unmap;
8235 	}
8236 
8237 	bnx2_init_nvram(bp);
8238 
8239 	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8240 
8241 	if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8242 		bp->func = 1;
8243 
8244 	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8245 	    BNX2_SHM_HDR_SIGNATURE_SIG) {
8246 		u32 off = bp->func << 2;
8247 
8248 		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8249 	} else
8250 		bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8251 
8252 	/* Get the permanent MAC address.  First we need to make sure the
8253 	 * firmware is actually running.
8254 	 */
8255 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8256 
8257 	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8258 	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8259 		dev_err(&pdev->dev, "Firmware not running, aborting\n");
8260 		rc = -ENODEV;
8261 		goto err_out_unmap;
8262 	}
8263 
8264 	bnx2_read_vpd_fw_ver(bp);
8265 
8266 	j = strlen(bp->fw_version);
8267 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8268 	for (i = 0; i < 3 && j < 24; i++) {
8269 		u8 num, k, skip0;
8270 
8271 		if (i == 0) {
8272 			bp->fw_version[j++] = 'b';
8273 			bp->fw_version[j++] = 'c';
8274 			bp->fw_version[j++] = ' ';
8275 		}
8276 		num = (u8) (reg >> (24 - (i * 8)));
8277 		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8278 			if (num >= k || !skip0 || k == 1) {
8279 				bp->fw_version[j++] = (num / k) + '0';
8280 				skip0 = 0;
8281 			}
8282 		}
8283 		if (i != 2)
8284 			bp->fw_version[j++] = '.';
8285 	}
8286 	reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8287 	if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8288 		bp->wol = 1;
8289 
8290 	if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8291 		bp->flags |= BNX2_FLAG_ASF_ENABLE;
8292 
8293 		for (i = 0; i < 30; i++) {
8294 			reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8295 			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8296 				break;
8297 			msleep(10);
8298 		}
8299 	}
8300 	reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8301 	reg &= BNX2_CONDITION_MFW_RUN_MASK;
8302 	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8303 	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
8304 		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8305 
8306 		if (j < 32)
8307 			bp->fw_version[j++] = ' ';
8308 		for (i = 0; i < 3 && j < 28; i++) {
8309 			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8310 			reg = be32_to_cpu(reg);
8311 			memcpy(&bp->fw_version[j], &reg, 4);
8312 			j += 4;
8313 		}
8314 	}
8315 
8316 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8317 	bp->mac_addr[0] = (u8) (reg >> 8);
8318 	bp->mac_addr[1] = (u8) reg;
8319 
8320 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8321 	bp->mac_addr[2] = (u8) (reg >> 24);
8322 	bp->mac_addr[3] = (u8) (reg >> 16);
8323 	bp->mac_addr[4] = (u8) (reg >> 8);
8324 	bp->mac_addr[5] = (u8) reg;
8325 
8326 	bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8327 	bnx2_set_rx_ring_size(bp, 255);
8328 
8329 	bp->tx_quick_cons_trip_int = 2;
8330 	bp->tx_quick_cons_trip = 20;
8331 	bp->tx_ticks_int = 18;
8332 	bp->tx_ticks = 80;
8333 
8334 	bp->rx_quick_cons_trip_int = 2;
8335 	bp->rx_quick_cons_trip = 12;
8336 	bp->rx_ticks_int = 18;
8337 	bp->rx_ticks = 18;
8338 
8339 	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8340 
8341 	bp->current_interval = BNX2_TIMER_INTERVAL;
8342 
8343 	bp->phy_addr = 1;
8344 
8345 	/* Disable WOL support if we are running on a SERDES chip. */
8346 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8347 		bnx2_get_5709_media(bp);
8348 	else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8349 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8350 
8351 	bp->phy_port = PORT_TP;
8352 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8353 		bp->phy_port = PORT_FIBRE;
8354 		reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8355 		if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8356 			bp->flags |= BNX2_FLAG_NO_WOL;
8357 			bp->wol = 0;
8358 		}
8359 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8360 			/* Don't do parallel detect on this board because of
8361 			 * some board problems.  The link will not go down
8362 			 * if we do parallel detect.
8363 			 */
8364 			if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8365 			    pdev->subsystem_device == 0x310c)
8366 				bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8367 		} else {
8368 			bp->phy_addr = 2;
8369 			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8370 				bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8371 		}
8372 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8373 		   BNX2_CHIP(bp) == BNX2_CHIP_5708)
8374 		bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8375 	else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8376 		 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8377 		  BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8378 		bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8379 
8380 	bnx2_init_fw_cap(bp);
8381 
8382 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8383 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8384 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8385 	    !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8386 		bp->flags |= BNX2_FLAG_NO_WOL;
8387 		bp->wol = 0;
8388 	}
8389 
8390 	if (bp->flags & BNX2_FLAG_NO_WOL)
8391 		device_set_wakeup_capable(&bp->pdev->dev, false);
8392 	else
8393 		device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8394 
8395 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8396 		bp->tx_quick_cons_trip_int =
8397 			bp->tx_quick_cons_trip;
8398 		bp->tx_ticks_int = bp->tx_ticks;
8399 		bp->rx_quick_cons_trip_int =
8400 			bp->rx_quick_cons_trip;
8401 		bp->rx_ticks_int = bp->rx_ticks;
8402 		bp->comp_prod_trip_int = bp->comp_prod_trip;
8403 		bp->com_ticks_int = bp->com_ticks;
8404 		bp->cmd_ticks_int = bp->cmd_ticks;
8405 	}
8406 
8407 	/* Disable MSI on 5706 if AMD 8132 bridge is found.
8408 	 *
8409 	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8410 	 * with byte enables disabled on the unused 32-bit word.  This is legal
8411 	 * but causes problems on the AMD 8132 which will eventually stop
8412 	 * responding after a while.
8413 	 *
8414 	 * AMD believes this incompatibility is unique to the 5706, and
8415 	 * prefers to locally disable MSI rather than globally disabling it.
8416 	 */
8417 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8418 		struct pci_dev *amd_8132 = NULL;
8419 
8420 		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8421 						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
8422 						  amd_8132))) {
8423 
8424 			if (amd_8132->revision >= 0x10 &&
8425 			    amd_8132->revision <= 0x13) {
8426 				disable_msi = 1;
8427 				pci_dev_put(amd_8132);
8428 				break;
8429 			}
8430 		}
8431 	}
8432 
8433 	bnx2_set_default_link(bp);
8434 	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8435 
8436 	init_timer(&bp->timer);
8437 	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8438 	bp->timer.data = (unsigned long) bp;
8439 	bp->timer.function = bnx2_timer;
8440 
8441 #ifdef BCM_CNIC
8442 	if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8443 		bp->cnic_eth_dev.max_iscsi_conn =
8444 			(bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8445 			 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8446 	bp->cnic_probe = bnx2_cnic_probe;
8447 #endif
8448 	pci_save_state(pdev);
8449 
8450 	return 0;
8451 
8452 err_out_unmap:
8453 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8454 		pci_disable_pcie_error_reporting(pdev);
8455 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8456 	}
8457 
8458 	pci_iounmap(pdev, bp->regview);
8459 	bp->regview = NULL;
8460 
8461 err_out_release:
8462 	pci_release_regions(pdev);
8463 
8464 err_out_disable:
8465 	pci_disable_device(pdev);
8466 
8467 err_out:
8468 	return rc;
8469 }
8470 
8471 static char *
8472 bnx2_bus_string(struct bnx2 *bp, char *str)
8473 {
8474 	char *s = str;
8475 
8476 	if (bp->flags & BNX2_FLAG_PCIE) {
8477 		s += sprintf(s, "PCI Express");
8478 	} else {
8479 		s += sprintf(s, "PCI");
8480 		if (bp->flags & BNX2_FLAG_PCIX)
8481 			s += sprintf(s, "-X");
8482 		if (bp->flags & BNX2_FLAG_PCI_32BIT)
8483 			s += sprintf(s, " 32-bit");
8484 		else
8485 			s += sprintf(s, " 64-bit");
8486 		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8487 	}
8488 	return str;
8489 }
8490 
8491 static void
8492 bnx2_del_napi(struct bnx2 *bp)
8493 {
8494 	int i;
8495 
8496 	for (i = 0; i < bp->irq_nvecs; i++)
8497 		netif_napi_del(&bp->bnx2_napi[i].napi);
8498 }
8499 
8500 static void
8501 bnx2_init_napi(struct bnx2 *bp)
8502 {
8503 	int i;
8504 
8505 	for (i = 0; i < bp->irq_nvecs; i++) {
8506 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8507 		int (*poll)(struct napi_struct *, int);
8508 
8509 		if (i == 0)
8510 			poll = bnx2_poll;
8511 		else
8512 			poll = bnx2_poll_msix;
8513 
8514 		netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8515 		bnapi->bp = bp;
8516 	}
8517 }
8518 
8519 static const struct net_device_ops bnx2_netdev_ops = {
8520 	.ndo_open		= bnx2_open,
8521 	.ndo_start_xmit		= bnx2_start_xmit,
8522 	.ndo_stop		= bnx2_close,
8523 	.ndo_get_stats64	= bnx2_get_stats64,
8524 	.ndo_set_rx_mode	= bnx2_set_rx_mode,
8525 	.ndo_do_ioctl		= bnx2_ioctl,
8526 	.ndo_validate_addr	= eth_validate_addr,
8527 	.ndo_set_mac_address	= bnx2_change_mac_addr,
8528 	.ndo_change_mtu		= bnx2_change_mtu,
8529 	.ndo_fix_features	= bnx2_fix_features,
8530 	.ndo_set_features	= bnx2_set_features,
8531 	.ndo_tx_timeout		= bnx2_tx_timeout,
8532 #ifdef CONFIG_NET_POLL_CONTROLLER
8533 	.ndo_poll_controller	= poll_bnx2,
8534 #endif
8535 };
8536 
8537 static int
8538 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8539 {
8540 	static int version_printed = 0;
8541 	struct net_device *dev;
8542 	struct bnx2 *bp;
8543 	int rc;
8544 	char str[40];
8545 
8546 	if (version_printed++ == 0)
8547 		pr_info("%s", version);
8548 
8549 	/* dev zeroed in init_etherdev */
8550 	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8551 	if (!dev)
8552 		return -ENOMEM;
8553 
8554 	rc = bnx2_init_board(pdev, dev);
8555 	if (rc < 0)
8556 		goto err_free;
8557 
8558 	dev->netdev_ops = &bnx2_netdev_ops;
8559 	dev->watchdog_timeo = TX_TIMEOUT;
8560 	dev->ethtool_ops = &bnx2_ethtool_ops;
8561 
8562 	bp = netdev_priv(dev);
8563 
8564 	pci_set_drvdata(pdev, dev);
8565 
8566 	memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8567 
8568 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8569 		NETIF_F_TSO | NETIF_F_TSO_ECN |
8570 		NETIF_F_RXHASH | NETIF_F_RXCSUM;
8571 
8572 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8573 		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8574 
8575 	dev->vlan_features = dev->hw_features;
8576 	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8577 	dev->features |= dev->hw_features;
8578 	dev->priv_flags |= IFF_UNICAST_FLT;
8579 
8580 	if ((rc = register_netdev(dev))) {
8581 		dev_err(&pdev->dev, "Cannot register net device\n");
8582 		goto error;
8583 	}
8584 
8585 	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8586 		    "node addr %pM\n", board_info[ent->driver_data].name,
8587 		    ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8588 		    ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8589 		    bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8590 		    pdev->irq, dev->dev_addr);
8591 
8592 	return 0;
8593 
8594 error:
8595 	pci_iounmap(pdev, bp->regview);
8596 	pci_release_regions(pdev);
8597 	pci_disable_device(pdev);
8598 err_free:
8599 	free_netdev(dev);
8600 	return rc;
8601 }
8602 
8603 static void
8604 bnx2_remove_one(struct pci_dev *pdev)
8605 {
8606 	struct net_device *dev = pci_get_drvdata(pdev);
8607 	struct bnx2 *bp = netdev_priv(dev);
8608 
8609 	unregister_netdev(dev);
8610 
8611 	del_timer_sync(&bp->timer);
8612 	cancel_work_sync(&bp->reset_task);
8613 
8614 	pci_iounmap(bp->pdev, bp->regview);
8615 
8616 	kfree(bp->temp_stats_blk);
8617 
8618 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8619 		pci_disable_pcie_error_reporting(pdev);
8620 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8621 	}
8622 
8623 	bnx2_release_firmware(bp);
8624 
8625 	free_netdev(dev);
8626 
8627 	pci_release_regions(pdev);
8628 	pci_disable_device(pdev);
8629 }
8630 
8631 #ifdef CONFIG_PM_SLEEP
8632 static int
8633 bnx2_suspend(struct device *device)
8634 {
8635 	struct pci_dev *pdev = to_pci_dev(device);
8636 	struct net_device *dev = pci_get_drvdata(pdev);
8637 	struct bnx2 *bp = netdev_priv(dev);
8638 
8639 	if (netif_running(dev)) {
8640 		cancel_work_sync(&bp->reset_task);
8641 		bnx2_netif_stop(bp, true);
8642 		netif_device_detach(dev);
8643 		del_timer_sync(&bp->timer);
8644 		bnx2_shutdown_chip(bp);
8645 		__bnx2_free_irq(bp);
8646 		bnx2_free_skbs(bp);
8647 	}
8648 	bnx2_setup_wol(bp);
8649 	return 0;
8650 }
8651 
8652 static int
8653 bnx2_resume(struct device *device)
8654 {
8655 	struct pci_dev *pdev = to_pci_dev(device);
8656 	struct net_device *dev = pci_get_drvdata(pdev);
8657 	struct bnx2 *bp = netdev_priv(dev);
8658 
8659 	if (!netif_running(dev))
8660 		return 0;
8661 
8662 	bnx2_set_power_state(bp, PCI_D0);
8663 	netif_device_attach(dev);
8664 	bnx2_request_irq(bp);
8665 	bnx2_init_nic(bp, 1);
8666 	bnx2_netif_start(bp, true);
8667 	return 0;
8668 }
8669 
8670 static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8671 #define BNX2_PM_OPS (&bnx2_pm_ops)
8672 
8673 #else
8674 
8675 #define BNX2_PM_OPS NULL
8676 
8677 #endif /* CONFIG_PM_SLEEP */
8678 /**
8679  * bnx2_io_error_detected - called when PCI error is detected
8680  * @pdev: Pointer to PCI device
8681  * @state: The current pci connection state
8682  *
8683  * This function is called after a PCI bus error affecting
8684  * this device has been detected.
8685  */
8686 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8687 					       pci_channel_state_t state)
8688 {
8689 	struct net_device *dev = pci_get_drvdata(pdev);
8690 	struct bnx2 *bp = netdev_priv(dev);
8691 
8692 	rtnl_lock();
8693 	netif_device_detach(dev);
8694 
8695 	if (state == pci_channel_io_perm_failure) {
8696 		rtnl_unlock();
8697 		return PCI_ERS_RESULT_DISCONNECT;
8698 	}
8699 
8700 	if (netif_running(dev)) {
8701 		bnx2_netif_stop(bp, true);
8702 		del_timer_sync(&bp->timer);
8703 		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8704 	}
8705 
8706 	pci_disable_device(pdev);
8707 	rtnl_unlock();
8708 
8709 	/* Request a slot slot reset. */
8710 	return PCI_ERS_RESULT_NEED_RESET;
8711 }
8712 
8713 /**
8714  * bnx2_io_slot_reset - called after the pci bus has been reset.
8715  * @pdev: Pointer to PCI device
8716  *
8717  * Restart the card from scratch, as if from a cold-boot.
8718  */
8719 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8720 {
8721 	struct net_device *dev = pci_get_drvdata(pdev);
8722 	struct bnx2 *bp = netdev_priv(dev);
8723 	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8724 	int err = 0;
8725 
8726 	rtnl_lock();
8727 	if (pci_enable_device(pdev)) {
8728 		dev_err(&pdev->dev,
8729 			"Cannot re-enable PCI device after reset\n");
8730 	} else {
8731 		pci_set_master(pdev);
8732 		pci_restore_state(pdev);
8733 		pci_save_state(pdev);
8734 
8735 		if (netif_running(dev))
8736 			err = bnx2_init_nic(bp, 1);
8737 
8738 		if (!err)
8739 			result = PCI_ERS_RESULT_RECOVERED;
8740 	}
8741 
8742 	if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8743 		bnx2_napi_enable(bp);
8744 		dev_close(dev);
8745 	}
8746 	rtnl_unlock();
8747 
8748 	if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8749 		return result;
8750 
8751 	err = pci_cleanup_aer_uncorrect_error_status(pdev);
8752 	if (err) {
8753 		dev_err(&pdev->dev,
8754 			"pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8755 			 err); /* non-fatal, continue */
8756 	}
8757 
8758 	return result;
8759 }
8760 
8761 /**
8762  * bnx2_io_resume - called when traffic can start flowing again.
8763  * @pdev: Pointer to PCI device
8764  *
8765  * This callback is called when the error recovery driver tells us that
8766  * its OK to resume normal operation.
8767  */
8768 static void bnx2_io_resume(struct pci_dev *pdev)
8769 {
8770 	struct net_device *dev = pci_get_drvdata(pdev);
8771 	struct bnx2 *bp = netdev_priv(dev);
8772 
8773 	rtnl_lock();
8774 	if (netif_running(dev))
8775 		bnx2_netif_start(bp, true);
8776 
8777 	netif_device_attach(dev);
8778 	rtnl_unlock();
8779 }
8780 
8781 static void bnx2_shutdown(struct pci_dev *pdev)
8782 {
8783 	struct net_device *dev = pci_get_drvdata(pdev);
8784 	struct bnx2 *bp;
8785 
8786 	if (!dev)
8787 		return;
8788 
8789 	bp = netdev_priv(dev);
8790 	if (!bp)
8791 		return;
8792 
8793 	rtnl_lock();
8794 	if (netif_running(dev))
8795 		dev_close(bp->dev);
8796 
8797 	if (system_state == SYSTEM_POWER_OFF)
8798 		bnx2_set_power_state(bp, PCI_D3hot);
8799 
8800 	rtnl_unlock();
8801 }
8802 
8803 static const struct pci_error_handlers bnx2_err_handler = {
8804 	.error_detected	= bnx2_io_error_detected,
8805 	.slot_reset	= bnx2_io_slot_reset,
8806 	.resume		= bnx2_io_resume,
8807 };
8808 
8809 static struct pci_driver bnx2_pci_driver = {
8810 	.name		= DRV_MODULE_NAME,
8811 	.id_table	= bnx2_pci_tbl,
8812 	.probe		= bnx2_init_one,
8813 	.remove		= bnx2_remove_one,
8814 	.driver.pm	= BNX2_PM_OPS,
8815 	.err_handler	= &bnx2_err_handler,
8816 	.shutdown	= bnx2_shutdown,
8817 };
8818 
8819 module_pci_driver(bnx2_pci_driver);
8820