1 /* bnx2.c: QLogic bnx2 network driver.
2  *
3  * Copyright (c) 2004-2014 Broadcom Corporation
4  * Copyright (c) 2014-2015 QLogic Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  *
10  * Written by: Michael Chan  (mchan@broadcom.com)
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 
18 #include <linux/stringify.h>
19 #include <linux/kernel.h>
20 #include <linux/timer.h>
21 #include <linux/errno.h>
22 #include <linux/ioport.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/interrupt.h>
26 #include <linux/pci.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52 #include <linux/crash_dump.h>
53 
54 #if IS_ENABLED(CONFIG_CNIC)
55 #define BCM_CNIC 1
56 #include "cnic_if.h"
57 #endif
58 #include "bnx2.h"
59 #include "bnx2_fw.h"
60 
61 #define DRV_MODULE_NAME		"bnx2"
62 #define DRV_MODULE_VERSION	"2.2.6"
63 #define DRV_MODULE_RELDATE	"January 29, 2014"
64 #define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.3.fw"
65 #define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
66 #define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1b.fw"
67 #define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-6.0.17.fw"
68 #define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-6.0.17.fw"
69 
70 #define RUN_AT(x) (jiffies + (x))
71 
72 /* Time in jiffies before concluding the transmitter is hung. */
73 #define TX_TIMEOUT  (5*HZ)
74 
75 static char version[] =
76 	"QLogic " DRV_MODULE_NAME " Gigabit Ethernet Driver v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77 
78 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
79 MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
80 MODULE_LICENSE("GPL");
81 MODULE_VERSION(DRV_MODULE_VERSION);
82 MODULE_FIRMWARE(FW_MIPS_FILE_06);
83 MODULE_FIRMWARE(FW_RV2P_FILE_06);
84 MODULE_FIRMWARE(FW_MIPS_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09);
86 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
87 
88 static int disable_msi = 0;
89 
90 module_param(disable_msi, int, 0444);
91 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 
93 typedef enum {
94 	BCM5706 = 0,
95 	NC370T,
96 	NC370I,
97 	BCM5706S,
98 	NC370F,
99 	BCM5708,
100 	BCM5708S,
101 	BCM5709,
102 	BCM5709S,
103 	BCM5716,
104 	BCM5716S,
105 } board_t;
106 
107 /* indexed by board_t, above */
108 static struct {
109 	char *name;
110 } board_info[] = {
111 	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
112 	{ "HP NC370T Multifunction Gigabit Server Adapter" },
113 	{ "HP NC370i Multifunction Gigabit Server Adapter" },
114 	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
115 	{ "HP NC370F Multifunction Gigabit Server Adapter" },
116 	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
117 	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
118 	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
119 	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
120 	{ "Broadcom NetXtreme II BCM5716 1000Base-T" },
121 	{ "Broadcom NetXtreme II BCM5716 1000Base-SX" },
122 	};
123 
124 static const struct pci_device_id bnx2_pci_tbl[] = {
125 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126 	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
127 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128 	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
129 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
130 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
131 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
132 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
133 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134 	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
135 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
136 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
137 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
138 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
139 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
140 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
141 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
142 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
143 	{ PCI_VENDOR_ID_BROADCOM, 0x163b,
144 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
145 	{ PCI_VENDOR_ID_BROADCOM, 0x163c,
146 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
147 	{ 0, }
148 };
149 
150 static const struct flash_spec flash_table[] =
151 {
152 #define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
153 #define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
154 	/* Slow EEPROM */
155 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
156 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
157 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
158 	 "EEPROM - slow"},
159 	/* Expansion entry 0001 */
160 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
161 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
163 	 "Entry 0001"},
164 	/* Saifun SA25F010 (non-buffered flash) */
165 	/* strap, cfg1, & write1 need updates */
166 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
167 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
168 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
169 	 "Non-buffered flash (128kB)"},
170 	/* Saifun SA25F020 (non-buffered flash) */
171 	/* strap, cfg1, & write1 need updates */
172 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
173 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
175 	 "Non-buffered flash (256kB)"},
176 	/* Expansion entry 0100 */
177 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
178 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 	 "Entry 0100"},
181 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
182 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
183 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
184 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
185 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
186 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
187 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
188 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
189 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
190 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
191 	/* Saifun SA25F005 (non-buffered flash) */
192 	/* strap, cfg1, & write1 need updates */
193 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
194 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
196 	 "Non-buffered flash (64kB)"},
197 	/* Fast EEPROM */
198 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
199 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
200 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
201 	 "EEPROM - fast"},
202 	/* Expansion entry 1001 */
203 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
204 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
205 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 	 "Entry 1001"},
207 	/* Expansion entry 1010 */
208 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
209 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
210 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
211 	 "Entry 1010"},
212 	/* ATMEL AT45DB011B (buffered flash) */
213 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
214 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
215 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
216 	 "Buffered flash (128kB)"},
217 	/* Expansion entry 1100 */
218 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
219 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
220 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
221 	 "Entry 1100"},
222 	/* Expansion entry 1101 */
223 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
224 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
225 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
226 	 "Entry 1101"},
227 	/* Ateml Expansion entry 1110 */
228 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
229 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
230 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
231 	 "Entry 1110 (Atmel)"},
232 	/* ATMEL AT45DB021B (buffered flash) */
233 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
234 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
235 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
236 	 "Buffered flash (256kB)"},
237 };
238 
239 static const struct flash_spec flash_5709 = {
240 	.flags		= BNX2_NV_BUFFERED,
241 	.page_bits	= BCM5709_FLASH_PAGE_BITS,
242 	.page_size	= BCM5709_FLASH_PAGE_SIZE,
243 	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
244 	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
245 	.name		= "5709 Buffered flash (256kB)",
246 };
247 
248 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
249 
250 static void bnx2_init_napi(struct bnx2 *bp);
251 static void bnx2_del_napi(struct bnx2 *bp);
252 
253 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
254 {
255 	u32 diff;
256 
257 	/* The ring uses 256 indices for 255 entries, one of them
258 	 * needs to be skipped.
259 	 */
260 	diff = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);
261 	if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
262 		diff &= 0xffff;
263 		if (diff == BNX2_TX_DESC_CNT)
264 			diff = BNX2_MAX_TX_DESC_CNT;
265 	}
266 	return bp->tx_ring_size - diff;
267 }
268 
269 static u32
270 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
271 {
272 	unsigned long flags;
273 	u32 val;
274 
275 	spin_lock_irqsave(&bp->indirect_lock, flags);
276 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
277 	val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
278 	spin_unlock_irqrestore(&bp->indirect_lock, flags);
279 	return val;
280 }
281 
282 static void
283 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
284 {
285 	unsigned long flags;
286 
287 	spin_lock_irqsave(&bp->indirect_lock, flags);
288 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
289 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
290 	spin_unlock_irqrestore(&bp->indirect_lock, flags);
291 }
292 
293 static void
294 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
295 {
296 	bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
297 }
298 
299 static u32
300 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
301 {
302 	return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
303 }
304 
305 static void
306 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
307 {
308 	unsigned long flags;
309 
310 	offset += cid_addr;
311 	spin_lock_irqsave(&bp->indirect_lock, flags);
312 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
313 		int i;
314 
315 		BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
316 		BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
317 			offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
318 		for (i = 0; i < 5; i++) {
319 			val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
320 			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
321 				break;
322 			udelay(5);
323 		}
324 	} else {
325 		BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
326 		BNX2_WR(bp, BNX2_CTX_DATA, val);
327 	}
328 	spin_unlock_irqrestore(&bp->indirect_lock, flags);
329 }
330 
331 #ifdef BCM_CNIC
332 static int
333 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
334 {
335 	struct bnx2 *bp = netdev_priv(dev);
336 	struct drv_ctl_io *io = &info->data.io;
337 
338 	switch (info->cmd) {
339 	case DRV_CTL_IO_WR_CMD:
340 		bnx2_reg_wr_ind(bp, io->offset, io->data);
341 		break;
342 	case DRV_CTL_IO_RD_CMD:
343 		io->data = bnx2_reg_rd_ind(bp, io->offset);
344 		break;
345 	case DRV_CTL_CTX_WR_CMD:
346 		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
347 		break;
348 	default:
349 		return -EINVAL;
350 	}
351 	return 0;
352 }
353 
354 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
355 {
356 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
357 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
358 	int sb_id;
359 
360 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
361 		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
362 		bnapi->cnic_present = 0;
363 		sb_id = bp->irq_nvecs;
364 		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
365 	} else {
366 		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
367 		bnapi->cnic_tag = bnapi->last_status_idx;
368 		bnapi->cnic_present = 1;
369 		sb_id = 0;
370 		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
371 	}
372 
373 	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
374 	cp->irq_arr[0].status_blk = (void *)
375 		((unsigned long) bnapi->status_blk.msi +
376 		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
377 	cp->irq_arr[0].status_blk_num = sb_id;
378 	cp->num_irq = 1;
379 }
380 
381 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
382 			      void *data)
383 {
384 	struct bnx2 *bp = netdev_priv(dev);
385 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
386 
387 	if (!ops)
388 		return -EINVAL;
389 
390 	if (cp->drv_state & CNIC_DRV_STATE_REGD)
391 		return -EBUSY;
392 
393 	if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
394 		return -ENODEV;
395 
396 	bp->cnic_data = data;
397 	rcu_assign_pointer(bp->cnic_ops, ops);
398 
399 	cp->num_irq = 0;
400 	cp->drv_state = CNIC_DRV_STATE_REGD;
401 
402 	bnx2_setup_cnic_irq_info(bp);
403 
404 	return 0;
405 }
406 
407 static int bnx2_unregister_cnic(struct net_device *dev)
408 {
409 	struct bnx2 *bp = netdev_priv(dev);
410 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
411 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
412 
413 	mutex_lock(&bp->cnic_lock);
414 	cp->drv_state = 0;
415 	bnapi->cnic_present = 0;
416 	RCU_INIT_POINTER(bp->cnic_ops, NULL);
417 	mutex_unlock(&bp->cnic_lock);
418 	synchronize_rcu();
419 	return 0;
420 }
421 
422 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
423 {
424 	struct bnx2 *bp = netdev_priv(dev);
425 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
426 
427 	if (!cp->max_iscsi_conn)
428 		return NULL;
429 
430 	cp->drv_owner = THIS_MODULE;
431 	cp->chip_id = bp->chip_id;
432 	cp->pdev = bp->pdev;
433 	cp->io_base = bp->regview;
434 	cp->drv_ctl = bnx2_drv_ctl;
435 	cp->drv_register_cnic = bnx2_register_cnic;
436 	cp->drv_unregister_cnic = bnx2_unregister_cnic;
437 
438 	return cp;
439 }
440 
441 static void
442 bnx2_cnic_stop(struct bnx2 *bp)
443 {
444 	struct cnic_ops *c_ops;
445 	struct cnic_ctl_info info;
446 
447 	mutex_lock(&bp->cnic_lock);
448 	c_ops = rcu_dereference_protected(bp->cnic_ops,
449 					  lockdep_is_held(&bp->cnic_lock));
450 	if (c_ops) {
451 		info.cmd = CNIC_CTL_STOP_CMD;
452 		c_ops->cnic_ctl(bp->cnic_data, &info);
453 	}
454 	mutex_unlock(&bp->cnic_lock);
455 }
456 
457 static void
458 bnx2_cnic_start(struct bnx2 *bp)
459 {
460 	struct cnic_ops *c_ops;
461 	struct cnic_ctl_info info;
462 
463 	mutex_lock(&bp->cnic_lock);
464 	c_ops = rcu_dereference_protected(bp->cnic_ops,
465 					  lockdep_is_held(&bp->cnic_lock));
466 	if (c_ops) {
467 		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
468 			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
469 
470 			bnapi->cnic_tag = bnapi->last_status_idx;
471 		}
472 		info.cmd = CNIC_CTL_START_CMD;
473 		c_ops->cnic_ctl(bp->cnic_data, &info);
474 	}
475 	mutex_unlock(&bp->cnic_lock);
476 }
477 
478 #else
479 
480 static void
481 bnx2_cnic_stop(struct bnx2 *bp)
482 {
483 }
484 
485 static void
486 bnx2_cnic_start(struct bnx2 *bp)
487 {
488 }
489 
490 #endif
491 
492 static int
493 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
494 {
495 	u32 val1;
496 	int i, ret;
497 
498 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
499 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
500 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
501 
502 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
503 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
504 
505 		udelay(40);
506 	}
507 
508 	val1 = (bp->phy_addr << 21) | (reg << 16) |
509 		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
510 		BNX2_EMAC_MDIO_COMM_START_BUSY;
511 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
512 
513 	for (i = 0; i < 50; i++) {
514 		udelay(10);
515 
516 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
517 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
518 			udelay(5);
519 
520 			val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
521 			val1 &= BNX2_EMAC_MDIO_COMM_DATA;
522 
523 			break;
524 		}
525 	}
526 
527 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
528 		*val = 0x0;
529 		ret = -EBUSY;
530 	}
531 	else {
532 		*val = val1;
533 		ret = 0;
534 	}
535 
536 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
537 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
538 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
539 
540 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
541 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
542 
543 		udelay(40);
544 	}
545 
546 	return ret;
547 }
548 
549 static int
550 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
551 {
552 	u32 val1;
553 	int i, ret;
554 
555 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
556 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
557 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
558 
559 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
560 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
561 
562 		udelay(40);
563 	}
564 
565 	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
566 		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
567 		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
568 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
569 
570 	for (i = 0; i < 50; i++) {
571 		udelay(10);
572 
573 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
574 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
575 			udelay(5);
576 			break;
577 		}
578 	}
579 
580 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
581         	ret = -EBUSY;
582 	else
583 		ret = 0;
584 
585 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
586 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
587 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
588 
589 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
590 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
591 
592 		udelay(40);
593 	}
594 
595 	return ret;
596 }
597 
598 static void
599 bnx2_disable_int(struct bnx2 *bp)
600 {
601 	int i;
602 	struct bnx2_napi *bnapi;
603 
604 	for (i = 0; i < bp->irq_nvecs; i++) {
605 		bnapi = &bp->bnx2_napi[i];
606 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
607 		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
608 	}
609 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
610 }
611 
612 static void
613 bnx2_enable_int(struct bnx2 *bp)
614 {
615 	int i;
616 	struct bnx2_napi *bnapi;
617 
618 	for (i = 0; i < bp->irq_nvecs; i++) {
619 		bnapi = &bp->bnx2_napi[i];
620 
621 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
622 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
623 			BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
624 			bnapi->last_status_idx);
625 
626 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
627 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
628 			bnapi->last_status_idx);
629 	}
630 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
631 }
632 
633 static void
634 bnx2_disable_int_sync(struct bnx2 *bp)
635 {
636 	int i;
637 
638 	atomic_inc(&bp->intr_sem);
639 	if (!netif_running(bp->dev))
640 		return;
641 
642 	bnx2_disable_int(bp);
643 	for (i = 0; i < bp->irq_nvecs; i++)
644 		synchronize_irq(bp->irq_tbl[i].vector);
645 }
646 
647 static void
648 bnx2_napi_disable(struct bnx2 *bp)
649 {
650 	int i;
651 
652 	for (i = 0; i < bp->irq_nvecs; i++)
653 		napi_disable(&bp->bnx2_napi[i].napi);
654 }
655 
656 static void
657 bnx2_napi_enable(struct bnx2 *bp)
658 {
659 	int i;
660 
661 	for (i = 0; i < bp->irq_nvecs; i++)
662 		napi_enable(&bp->bnx2_napi[i].napi);
663 }
664 
665 static void
666 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
667 {
668 	if (stop_cnic)
669 		bnx2_cnic_stop(bp);
670 	if (netif_running(bp->dev)) {
671 		bnx2_napi_disable(bp);
672 		netif_tx_disable(bp->dev);
673 	}
674 	bnx2_disable_int_sync(bp);
675 	netif_carrier_off(bp->dev);	/* prevent tx timeout */
676 }
677 
678 static void
679 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
680 {
681 	if (atomic_dec_and_test(&bp->intr_sem)) {
682 		if (netif_running(bp->dev)) {
683 			netif_tx_wake_all_queues(bp->dev);
684 			spin_lock_bh(&bp->phy_lock);
685 			if (bp->link_up)
686 				netif_carrier_on(bp->dev);
687 			spin_unlock_bh(&bp->phy_lock);
688 			bnx2_napi_enable(bp);
689 			bnx2_enable_int(bp);
690 			if (start_cnic)
691 				bnx2_cnic_start(bp);
692 		}
693 	}
694 }
695 
696 static void
697 bnx2_free_tx_mem(struct bnx2 *bp)
698 {
699 	int i;
700 
701 	for (i = 0; i < bp->num_tx_rings; i++) {
702 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
703 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
704 
705 		if (txr->tx_desc_ring) {
706 			dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
707 					  txr->tx_desc_ring,
708 					  txr->tx_desc_mapping);
709 			txr->tx_desc_ring = NULL;
710 		}
711 		kfree(txr->tx_buf_ring);
712 		txr->tx_buf_ring = NULL;
713 	}
714 }
715 
716 static void
717 bnx2_free_rx_mem(struct bnx2 *bp)
718 {
719 	int i;
720 
721 	for (i = 0; i < bp->num_rx_rings; i++) {
722 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
723 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
724 		int j;
725 
726 		for (j = 0; j < bp->rx_max_ring; j++) {
727 			if (rxr->rx_desc_ring[j])
728 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
729 						  rxr->rx_desc_ring[j],
730 						  rxr->rx_desc_mapping[j]);
731 			rxr->rx_desc_ring[j] = NULL;
732 		}
733 		vfree(rxr->rx_buf_ring);
734 		rxr->rx_buf_ring = NULL;
735 
736 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
737 			if (rxr->rx_pg_desc_ring[j])
738 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
739 						  rxr->rx_pg_desc_ring[j],
740 						  rxr->rx_pg_desc_mapping[j]);
741 			rxr->rx_pg_desc_ring[j] = NULL;
742 		}
743 		vfree(rxr->rx_pg_ring);
744 		rxr->rx_pg_ring = NULL;
745 	}
746 }
747 
748 static int
749 bnx2_alloc_tx_mem(struct bnx2 *bp)
750 {
751 	int i;
752 
753 	for (i = 0; i < bp->num_tx_rings; i++) {
754 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
755 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
756 
757 		txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
758 		if (!txr->tx_buf_ring)
759 			return -ENOMEM;
760 
761 		txr->tx_desc_ring =
762 			dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
763 					   &txr->tx_desc_mapping, GFP_KERNEL);
764 		if (!txr->tx_desc_ring)
765 			return -ENOMEM;
766 	}
767 	return 0;
768 }
769 
770 static int
771 bnx2_alloc_rx_mem(struct bnx2 *bp)
772 {
773 	int i;
774 
775 	for (i = 0; i < bp->num_rx_rings; i++) {
776 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
777 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
778 		int j;
779 
780 		rxr->rx_buf_ring =
781 			vzalloc(array_size(SW_RXBD_RING_SIZE, bp->rx_max_ring));
782 		if (!rxr->rx_buf_ring)
783 			return -ENOMEM;
784 
785 		for (j = 0; j < bp->rx_max_ring; j++) {
786 			rxr->rx_desc_ring[j] =
787 				dma_alloc_coherent(&bp->pdev->dev,
788 						   RXBD_RING_SIZE,
789 						   &rxr->rx_desc_mapping[j],
790 						   GFP_KERNEL);
791 			if (!rxr->rx_desc_ring[j])
792 				return -ENOMEM;
793 
794 		}
795 
796 		if (bp->rx_pg_ring_size) {
797 			rxr->rx_pg_ring =
798 				vzalloc(array_size(SW_RXPG_RING_SIZE,
799 						   bp->rx_max_pg_ring));
800 			if (!rxr->rx_pg_ring)
801 				return -ENOMEM;
802 
803 		}
804 
805 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
806 			rxr->rx_pg_desc_ring[j] =
807 				dma_alloc_coherent(&bp->pdev->dev,
808 						   RXBD_RING_SIZE,
809 						   &rxr->rx_pg_desc_mapping[j],
810 						   GFP_KERNEL);
811 			if (!rxr->rx_pg_desc_ring[j])
812 				return -ENOMEM;
813 
814 		}
815 	}
816 	return 0;
817 }
818 
819 static void
820 bnx2_free_stats_blk(struct net_device *dev)
821 {
822 	struct bnx2 *bp = netdev_priv(dev);
823 
824 	if (bp->status_blk) {
825 		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
826 				  bp->status_blk,
827 				  bp->status_blk_mapping);
828 		bp->status_blk = NULL;
829 		bp->stats_blk = NULL;
830 	}
831 }
832 
833 static int
834 bnx2_alloc_stats_blk(struct net_device *dev)
835 {
836 	int status_blk_size;
837 	void *status_blk;
838 	struct bnx2 *bp = netdev_priv(dev);
839 
840 	/* Combine status and statistics blocks into one allocation. */
841 	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
842 	if (bp->flags & BNX2_FLAG_MSIX_CAP)
843 		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
844 						 BNX2_SBLK_MSIX_ALIGN_SIZE);
845 	bp->status_stats_size = status_blk_size +
846 				sizeof(struct statistics_block);
847 	status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
848 					 &bp->status_blk_mapping, GFP_KERNEL);
849 	if (!status_blk)
850 		return -ENOMEM;
851 
852 	bp->status_blk = status_blk;
853 	bp->stats_blk = status_blk + status_blk_size;
854 	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
855 
856 	return 0;
857 }
858 
859 static void
860 bnx2_free_mem(struct bnx2 *bp)
861 {
862 	int i;
863 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
864 
865 	bnx2_free_tx_mem(bp);
866 	bnx2_free_rx_mem(bp);
867 
868 	for (i = 0; i < bp->ctx_pages; i++) {
869 		if (bp->ctx_blk[i]) {
870 			dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
871 					  bp->ctx_blk[i],
872 					  bp->ctx_blk_mapping[i]);
873 			bp->ctx_blk[i] = NULL;
874 		}
875 	}
876 
877 	if (bnapi->status_blk.msi)
878 		bnapi->status_blk.msi = NULL;
879 }
880 
881 static int
882 bnx2_alloc_mem(struct bnx2 *bp)
883 {
884 	int i, err;
885 	struct bnx2_napi *bnapi;
886 
887 	bnapi = &bp->bnx2_napi[0];
888 	bnapi->status_blk.msi = bp->status_blk;
889 	bnapi->hw_tx_cons_ptr =
890 		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
891 	bnapi->hw_rx_cons_ptr =
892 		&bnapi->status_blk.msi->status_rx_quick_consumer_index0;
893 	if (bp->flags & BNX2_FLAG_MSIX_CAP) {
894 		for (i = 1; i < bp->irq_nvecs; i++) {
895 			struct status_block_msix *sblk;
896 
897 			bnapi = &bp->bnx2_napi[i];
898 
899 			sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
900 			bnapi->status_blk.msix = sblk;
901 			bnapi->hw_tx_cons_ptr =
902 				&sblk->status_tx_quick_consumer_index;
903 			bnapi->hw_rx_cons_ptr =
904 				&sblk->status_rx_quick_consumer_index;
905 			bnapi->int_num = i << 24;
906 		}
907 	}
908 
909 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
910 		bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
911 		if (bp->ctx_pages == 0)
912 			bp->ctx_pages = 1;
913 		for (i = 0; i < bp->ctx_pages; i++) {
914 			bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
915 						BNX2_PAGE_SIZE,
916 						&bp->ctx_blk_mapping[i],
917 						GFP_KERNEL);
918 			if (!bp->ctx_blk[i])
919 				goto alloc_mem_err;
920 		}
921 	}
922 
923 	err = bnx2_alloc_rx_mem(bp);
924 	if (err)
925 		goto alloc_mem_err;
926 
927 	err = bnx2_alloc_tx_mem(bp);
928 	if (err)
929 		goto alloc_mem_err;
930 
931 	return 0;
932 
933 alloc_mem_err:
934 	bnx2_free_mem(bp);
935 	return -ENOMEM;
936 }
937 
938 static void
939 bnx2_report_fw_link(struct bnx2 *bp)
940 {
941 	u32 fw_link_status = 0;
942 
943 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
944 		return;
945 
946 	if (bp->link_up) {
947 		u32 bmsr;
948 
949 		switch (bp->line_speed) {
950 		case SPEED_10:
951 			if (bp->duplex == DUPLEX_HALF)
952 				fw_link_status = BNX2_LINK_STATUS_10HALF;
953 			else
954 				fw_link_status = BNX2_LINK_STATUS_10FULL;
955 			break;
956 		case SPEED_100:
957 			if (bp->duplex == DUPLEX_HALF)
958 				fw_link_status = BNX2_LINK_STATUS_100HALF;
959 			else
960 				fw_link_status = BNX2_LINK_STATUS_100FULL;
961 			break;
962 		case SPEED_1000:
963 			if (bp->duplex == DUPLEX_HALF)
964 				fw_link_status = BNX2_LINK_STATUS_1000HALF;
965 			else
966 				fw_link_status = BNX2_LINK_STATUS_1000FULL;
967 			break;
968 		case SPEED_2500:
969 			if (bp->duplex == DUPLEX_HALF)
970 				fw_link_status = BNX2_LINK_STATUS_2500HALF;
971 			else
972 				fw_link_status = BNX2_LINK_STATUS_2500FULL;
973 			break;
974 		}
975 
976 		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
977 
978 		if (bp->autoneg) {
979 			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
980 
981 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
982 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
983 
984 			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
985 			    bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
986 				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
987 			else
988 				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
989 		}
990 	}
991 	else
992 		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
993 
994 	bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
995 }
996 
997 static char *
998 bnx2_xceiver_str(struct bnx2 *bp)
999 {
1000 	return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
1001 		((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
1002 		 "Copper");
1003 }
1004 
1005 static void
1006 bnx2_report_link(struct bnx2 *bp)
1007 {
1008 	if (bp->link_up) {
1009 		netif_carrier_on(bp->dev);
1010 		netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
1011 			    bnx2_xceiver_str(bp),
1012 			    bp->line_speed,
1013 			    bp->duplex == DUPLEX_FULL ? "full" : "half");
1014 
1015 		if (bp->flow_ctrl) {
1016 			if (bp->flow_ctrl & FLOW_CTRL_RX) {
1017 				pr_cont(", receive ");
1018 				if (bp->flow_ctrl & FLOW_CTRL_TX)
1019 					pr_cont("& transmit ");
1020 			}
1021 			else {
1022 				pr_cont(", transmit ");
1023 			}
1024 			pr_cont("flow control ON");
1025 		}
1026 		pr_cont("\n");
1027 	} else {
1028 		netif_carrier_off(bp->dev);
1029 		netdev_err(bp->dev, "NIC %s Link is Down\n",
1030 			   bnx2_xceiver_str(bp));
1031 	}
1032 
1033 	bnx2_report_fw_link(bp);
1034 }
1035 
1036 static void
1037 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1038 {
1039 	u32 local_adv, remote_adv;
1040 
1041 	bp->flow_ctrl = 0;
1042 	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1043 		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1044 
1045 		if (bp->duplex == DUPLEX_FULL) {
1046 			bp->flow_ctrl = bp->req_flow_ctrl;
1047 		}
1048 		return;
1049 	}
1050 
1051 	if (bp->duplex != DUPLEX_FULL) {
1052 		return;
1053 	}
1054 
1055 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1056 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1057 		u32 val;
1058 
1059 		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1060 		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1061 			bp->flow_ctrl |= FLOW_CTRL_TX;
1062 		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1063 			bp->flow_ctrl |= FLOW_CTRL_RX;
1064 		return;
1065 	}
1066 
1067 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1068 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1069 
1070 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1071 		u32 new_local_adv = 0;
1072 		u32 new_remote_adv = 0;
1073 
1074 		if (local_adv & ADVERTISE_1000XPAUSE)
1075 			new_local_adv |= ADVERTISE_PAUSE_CAP;
1076 		if (local_adv & ADVERTISE_1000XPSE_ASYM)
1077 			new_local_adv |= ADVERTISE_PAUSE_ASYM;
1078 		if (remote_adv & ADVERTISE_1000XPAUSE)
1079 			new_remote_adv |= ADVERTISE_PAUSE_CAP;
1080 		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1081 			new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1082 
1083 		local_adv = new_local_adv;
1084 		remote_adv = new_remote_adv;
1085 	}
1086 
1087 	/* See Table 28B-3 of 802.3ab-1999 spec. */
1088 	if (local_adv & ADVERTISE_PAUSE_CAP) {
1089 		if(local_adv & ADVERTISE_PAUSE_ASYM) {
1090 	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
1091 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1092 			}
1093 			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1094 				bp->flow_ctrl = FLOW_CTRL_RX;
1095 			}
1096 		}
1097 		else {
1098 			if (remote_adv & ADVERTISE_PAUSE_CAP) {
1099 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1100 			}
1101 		}
1102 	}
1103 	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1104 		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1105 			(remote_adv & ADVERTISE_PAUSE_ASYM)) {
1106 
1107 			bp->flow_ctrl = FLOW_CTRL_TX;
1108 		}
1109 	}
1110 }
1111 
1112 static int
1113 bnx2_5709s_linkup(struct bnx2 *bp)
1114 {
1115 	u32 val, speed;
1116 
1117 	bp->link_up = 1;
1118 
1119 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1120 	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1121 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1122 
1123 	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1124 		bp->line_speed = bp->req_line_speed;
1125 		bp->duplex = bp->req_duplex;
1126 		return 0;
1127 	}
1128 	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1129 	switch (speed) {
1130 		case MII_BNX2_GP_TOP_AN_SPEED_10:
1131 			bp->line_speed = SPEED_10;
1132 			break;
1133 		case MII_BNX2_GP_TOP_AN_SPEED_100:
1134 			bp->line_speed = SPEED_100;
1135 			break;
1136 		case MII_BNX2_GP_TOP_AN_SPEED_1G:
1137 		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1138 			bp->line_speed = SPEED_1000;
1139 			break;
1140 		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1141 			bp->line_speed = SPEED_2500;
1142 			break;
1143 	}
1144 	if (val & MII_BNX2_GP_TOP_AN_FD)
1145 		bp->duplex = DUPLEX_FULL;
1146 	else
1147 		bp->duplex = DUPLEX_HALF;
1148 	return 0;
1149 }
1150 
1151 static int
1152 bnx2_5708s_linkup(struct bnx2 *bp)
1153 {
1154 	u32 val;
1155 
1156 	bp->link_up = 1;
1157 	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1158 	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1159 		case BCM5708S_1000X_STAT1_SPEED_10:
1160 			bp->line_speed = SPEED_10;
1161 			break;
1162 		case BCM5708S_1000X_STAT1_SPEED_100:
1163 			bp->line_speed = SPEED_100;
1164 			break;
1165 		case BCM5708S_1000X_STAT1_SPEED_1G:
1166 			bp->line_speed = SPEED_1000;
1167 			break;
1168 		case BCM5708S_1000X_STAT1_SPEED_2G5:
1169 			bp->line_speed = SPEED_2500;
1170 			break;
1171 	}
1172 	if (val & BCM5708S_1000X_STAT1_FD)
1173 		bp->duplex = DUPLEX_FULL;
1174 	else
1175 		bp->duplex = DUPLEX_HALF;
1176 
1177 	return 0;
1178 }
1179 
1180 static int
1181 bnx2_5706s_linkup(struct bnx2 *bp)
1182 {
1183 	u32 bmcr, local_adv, remote_adv, common;
1184 
1185 	bp->link_up = 1;
1186 	bp->line_speed = SPEED_1000;
1187 
1188 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1189 	if (bmcr & BMCR_FULLDPLX) {
1190 		bp->duplex = DUPLEX_FULL;
1191 	}
1192 	else {
1193 		bp->duplex = DUPLEX_HALF;
1194 	}
1195 
1196 	if (!(bmcr & BMCR_ANENABLE)) {
1197 		return 0;
1198 	}
1199 
1200 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1201 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1202 
1203 	common = local_adv & remote_adv;
1204 	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1205 
1206 		if (common & ADVERTISE_1000XFULL) {
1207 			bp->duplex = DUPLEX_FULL;
1208 		}
1209 		else {
1210 			bp->duplex = DUPLEX_HALF;
1211 		}
1212 	}
1213 
1214 	return 0;
1215 }
1216 
1217 static int
1218 bnx2_copper_linkup(struct bnx2 *bp)
1219 {
1220 	u32 bmcr;
1221 
1222 	bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1223 
1224 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1225 	if (bmcr & BMCR_ANENABLE) {
1226 		u32 local_adv, remote_adv, common;
1227 
1228 		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1229 		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1230 
1231 		common = local_adv & (remote_adv >> 2);
1232 		if (common & ADVERTISE_1000FULL) {
1233 			bp->line_speed = SPEED_1000;
1234 			bp->duplex = DUPLEX_FULL;
1235 		}
1236 		else if (common & ADVERTISE_1000HALF) {
1237 			bp->line_speed = SPEED_1000;
1238 			bp->duplex = DUPLEX_HALF;
1239 		}
1240 		else {
1241 			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1242 			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1243 
1244 			common = local_adv & remote_adv;
1245 			if (common & ADVERTISE_100FULL) {
1246 				bp->line_speed = SPEED_100;
1247 				bp->duplex = DUPLEX_FULL;
1248 			}
1249 			else if (common & ADVERTISE_100HALF) {
1250 				bp->line_speed = SPEED_100;
1251 				bp->duplex = DUPLEX_HALF;
1252 			}
1253 			else if (common & ADVERTISE_10FULL) {
1254 				bp->line_speed = SPEED_10;
1255 				bp->duplex = DUPLEX_FULL;
1256 			}
1257 			else if (common & ADVERTISE_10HALF) {
1258 				bp->line_speed = SPEED_10;
1259 				bp->duplex = DUPLEX_HALF;
1260 			}
1261 			else {
1262 				bp->line_speed = 0;
1263 				bp->link_up = 0;
1264 			}
1265 		}
1266 	}
1267 	else {
1268 		if (bmcr & BMCR_SPEED100) {
1269 			bp->line_speed = SPEED_100;
1270 		}
1271 		else {
1272 			bp->line_speed = SPEED_10;
1273 		}
1274 		if (bmcr & BMCR_FULLDPLX) {
1275 			bp->duplex = DUPLEX_FULL;
1276 		}
1277 		else {
1278 			bp->duplex = DUPLEX_HALF;
1279 		}
1280 	}
1281 
1282 	if (bp->link_up) {
1283 		u32 ext_status;
1284 
1285 		bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1286 		if (ext_status & EXT_STATUS_MDIX)
1287 			bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1288 	}
1289 
1290 	return 0;
1291 }
1292 
1293 static void
1294 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1295 {
1296 	u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1297 
1298 	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1299 	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1300 	val |= 0x02 << 8;
1301 
1302 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1303 		val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1304 
1305 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1306 }
1307 
1308 static void
1309 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1310 {
1311 	int i;
1312 	u32 cid;
1313 
1314 	for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1315 		if (i == 1)
1316 			cid = RX_RSS_CID;
1317 		bnx2_init_rx_context(bp, cid);
1318 	}
1319 }
1320 
1321 static void
1322 bnx2_set_mac_link(struct bnx2 *bp)
1323 {
1324 	u32 val;
1325 
1326 	BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1327 	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1328 		(bp->duplex == DUPLEX_HALF)) {
1329 		BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1330 	}
1331 
1332 	/* Configure the EMAC mode register. */
1333 	val = BNX2_RD(bp, BNX2_EMAC_MODE);
1334 
1335 	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1336 		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1337 		BNX2_EMAC_MODE_25G_MODE);
1338 
1339 	if (bp->link_up) {
1340 		switch (bp->line_speed) {
1341 			case SPEED_10:
1342 				if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1343 					val |= BNX2_EMAC_MODE_PORT_MII_10M;
1344 					break;
1345 				}
1346 				/* fall through */
1347 			case SPEED_100:
1348 				val |= BNX2_EMAC_MODE_PORT_MII;
1349 				break;
1350 			case SPEED_2500:
1351 				val |= BNX2_EMAC_MODE_25G_MODE;
1352 				/* fall through */
1353 			case SPEED_1000:
1354 				val |= BNX2_EMAC_MODE_PORT_GMII;
1355 				break;
1356 		}
1357 	}
1358 	else {
1359 		val |= BNX2_EMAC_MODE_PORT_GMII;
1360 	}
1361 
1362 	/* Set the MAC to operate in the appropriate duplex mode. */
1363 	if (bp->duplex == DUPLEX_HALF)
1364 		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1365 	BNX2_WR(bp, BNX2_EMAC_MODE, val);
1366 
1367 	/* Enable/disable rx PAUSE. */
1368 	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1369 
1370 	if (bp->flow_ctrl & FLOW_CTRL_RX)
1371 		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1372 	BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1373 
1374 	/* Enable/disable tx PAUSE. */
1375 	val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1376 	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1377 
1378 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1379 		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1380 	BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1381 
1382 	/* Acknowledge the interrupt. */
1383 	BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1384 
1385 	bnx2_init_all_rx_contexts(bp);
1386 }
1387 
1388 static void
1389 bnx2_enable_bmsr1(struct bnx2 *bp)
1390 {
1391 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1392 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1393 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1394 			       MII_BNX2_BLK_ADDR_GP_STATUS);
1395 }
1396 
1397 static void
1398 bnx2_disable_bmsr1(struct bnx2 *bp)
1399 {
1400 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1401 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1402 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1403 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1404 }
1405 
1406 static int
1407 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1408 {
1409 	u32 up1;
1410 	int ret = 1;
1411 
1412 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1413 		return 0;
1414 
1415 	if (bp->autoneg & AUTONEG_SPEED)
1416 		bp->advertising |= ADVERTISED_2500baseX_Full;
1417 
1418 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1419 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1420 
1421 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1422 	if (!(up1 & BCM5708S_UP1_2G5)) {
1423 		up1 |= BCM5708S_UP1_2G5;
1424 		bnx2_write_phy(bp, bp->mii_up1, up1);
1425 		ret = 0;
1426 	}
1427 
1428 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1429 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1430 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1431 
1432 	return ret;
1433 }
1434 
1435 static int
1436 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1437 {
1438 	u32 up1;
1439 	int ret = 0;
1440 
1441 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1442 		return 0;
1443 
1444 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1445 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1446 
1447 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1448 	if (up1 & BCM5708S_UP1_2G5) {
1449 		up1 &= ~BCM5708S_UP1_2G5;
1450 		bnx2_write_phy(bp, bp->mii_up1, up1);
1451 		ret = 1;
1452 	}
1453 
1454 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1455 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1456 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1457 
1458 	return ret;
1459 }
1460 
1461 static void
1462 bnx2_enable_forced_2g5(struct bnx2 *bp)
1463 {
1464 	u32 uninitialized_var(bmcr);
1465 	int err;
1466 
1467 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1468 		return;
1469 
1470 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1471 		u32 val;
1472 
1473 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1474 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1475 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1476 			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1477 			val |= MII_BNX2_SD_MISC1_FORCE |
1478 				MII_BNX2_SD_MISC1_FORCE_2_5G;
1479 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1480 		}
1481 
1482 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1483 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1484 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1485 
1486 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1487 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1488 		if (!err)
1489 			bmcr |= BCM5708S_BMCR_FORCE_2500;
1490 	} else {
1491 		return;
1492 	}
1493 
1494 	if (err)
1495 		return;
1496 
1497 	if (bp->autoneg & AUTONEG_SPEED) {
1498 		bmcr &= ~BMCR_ANENABLE;
1499 		if (bp->req_duplex == DUPLEX_FULL)
1500 			bmcr |= BMCR_FULLDPLX;
1501 	}
1502 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1503 }
1504 
1505 static void
1506 bnx2_disable_forced_2g5(struct bnx2 *bp)
1507 {
1508 	u32 uninitialized_var(bmcr);
1509 	int err;
1510 
1511 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1512 		return;
1513 
1514 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1515 		u32 val;
1516 
1517 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1518 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1519 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1520 			val &= ~MII_BNX2_SD_MISC1_FORCE;
1521 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1522 		}
1523 
1524 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1525 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1526 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1527 
1528 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1529 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1530 		if (!err)
1531 			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1532 	} else {
1533 		return;
1534 	}
1535 
1536 	if (err)
1537 		return;
1538 
1539 	if (bp->autoneg & AUTONEG_SPEED)
1540 		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1541 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1542 }
1543 
1544 static void
1545 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1546 {
1547 	u32 val;
1548 
1549 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1550 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1551 	if (start)
1552 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1553 	else
1554 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1555 }
1556 
1557 static int
1558 bnx2_set_link(struct bnx2 *bp)
1559 {
1560 	u32 bmsr;
1561 	u8 link_up;
1562 
1563 	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1564 		bp->link_up = 1;
1565 		return 0;
1566 	}
1567 
1568 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1569 		return 0;
1570 
1571 	link_up = bp->link_up;
1572 
1573 	bnx2_enable_bmsr1(bp);
1574 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1575 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1576 	bnx2_disable_bmsr1(bp);
1577 
1578 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1579 	    (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1580 		u32 val, an_dbg;
1581 
1582 		if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1583 			bnx2_5706s_force_link_dn(bp, 0);
1584 			bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1585 		}
1586 		val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1587 
1588 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1589 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1590 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1591 
1592 		if ((val & BNX2_EMAC_STATUS_LINK) &&
1593 		    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1594 			bmsr |= BMSR_LSTATUS;
1595 		else
1596 			bmsr &= ~BMSR_LSTATUS;
1597 	}
1598 
1599 	if (bmsr & BMSR_LSTATUS) {
1600 		bp->link_up = 1;
1601 
1602 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1603 			if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1604 				bnx2_5706s_linkup(bp);
1605 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1606 				bnx2_5708s_linkup(bp);
1607 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1608 				bnx2_5709s_linkup(bp);
1609 		}
1610 		else {
1611 			bnx2_copper_linkup(bp);
1612 		}
1613 		bnx2_resolve_flow_ctrl(bp);
1614 	}
1615 	else {
1616 		if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1617 		    (bp->autoneg & AUTONEG_SPEED))
1618 			bnx2_disable_forced_2g5(bp);
1619 
1620 		if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1621 			u32 bmcr;
1622 
1623 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1624 			bmcr |= BMCR_ANENABLE;
1625 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1626 
1627 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1628 		}
1629 		bp->link_up = 0;
1630 	}
1631 
1632 	if (bp->link_up != link_up) {
1633 		bnx2_report_link(bp);
1634 	}
1635 
1636 	bnx2_set_mac_link(bp);
1637 
1638 	return 0;
1639 }
1640 
1641 static int
1642 bnx2_reset_phy(struct bnx2 *bp)
1643 {
1644 	int i;
1645 	u32 reg;
1646 
1647         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1648 
1649 #define PHY_RESET_MAX_WAIT 100
1650 	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1651 		udelay(10);
1652 
1653 		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1654 		if (!(reg & BMCR_RESET)) {
1655 			udelay(20);
1656 			break;
1657 		}
1658 	}
1659 	if (i == PHY_RESET_MAX_WAIT) {
1660 		return -EBUSY;
1661 	}
1662 	return 0;
1663 }
1664 
1665 static u32
1666 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1667 {
1668 	u32 adv = 0;
1669 
1670 	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1671 		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1672 
1673 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1674 			adv = ADVERTISE_1000XPAUSE;
1675 		}
1676 		else {
1677 			adv = ADVERTISE_PAUSE_CAP;
1678 		}
1679 	}
1680 	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1681 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1682 			adv = ADVERTISE_1000XPSE_ASYM;
1683 		}
1684 		else {
1685 			adv = ADVERTISE_PAUSE_ASYM;
1686 		}
1687 	}
1688 	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1689 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1690 			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1691 		}
1692 		else {
1693 			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1694 		}
1695 	}
1696 	return adv;
1697 }
1698 
1699 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1700 
1701 static int
1702 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1703 __releases(&bp->phy_lock)
1704 __acquires(&bp->phy_lock)
1705 {
1706 	u32 speed_arg = 0, pause_adv;
1707 
1708 	pause_adv = bnx2_phy_get_pause_adv(bp);
1709 
1710 	if (bp->autoneg & AUTONEG_SPEED) {
1711 		speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1712 		if (bp->advertising & ADVERTISED_10baseT_Half)
1713 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1714 		if (bp->advertising & ADVERTISED_10baseT_Full)
1715 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1716 		if (bp->advertising & ADVERTISED_100baseT_Half)
1717 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1718 		if (bp->advertising & ADVERTISED_100baseT_Full)
1719 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1720 		if (bp->advertising & ADVERTISED_1000baseT_Full)
1721 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1722 		if (bp->advertising & ADVERTISED_2500baseX_Full)
1723 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1724 	} else {
1725 		if (bp->req_line_speed == SPEED_2500)
1726 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1727 		else if (bp->req_line_speed == SPEED_1000)
1728 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1729 		else if (bp->req_line_speed == SPEED_100) {
1730 			if (bp->req_duplex == DUPLEX_FULL)
1731 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1732 			else
1733 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1734 		} else if (bp->req_line_speed == SPEED_10) {
1735 			if (bp->req_duplex == DUPLEX_FULL)
1736 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1737 			else
1738 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1739 		}
1740 	}
1741 
1742 	if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1743 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1744 	if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1745 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1746 
1747 	if (port == PORT_TP)
1748 		speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1749 			     BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1750 
1751 	bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1752 
1753 	spin_unlock_bh(&bp->phy_lock);
1754 	bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1755 	spin_lock_bh(&bp->phy_lock);
1756 
1757 	return 0;
1758 }
1759 
1760 static int
1761 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1762 __releases(&bp->phy_lock)
1763 __acquires(&bp->phy_lock)
1764 {
1765 	u32 adv, bmcr;
1766 	u32 new_adv = 0;
1767 
1768 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1769 		return bnx2_setup_remote_phy(bp, port);
1770 
1771 	if (!(bp->autoneg & AUTONEG_SPEED)) {
1772 		u32 new_bmcr;
1773 		int force_link_down = 0;
1774 
1775 		if (bp->req_line_speed == SPEED_2500) {
1776 			if (!bnx2_test_and_enable_2g5(bp))
1777 				force_link_down = 1;
1778 		} else if (bp->req_line_speed == SPEED_1000) {
1779 			if (bnx2_test_and_disable_2g5(bp))
1780 				force_link_down = 1;
1781 		}
1782 		bnx2_read_phy(bp, bp->mii_adv, &adv);
1783 		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1784 
1785 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1786 		new_bmcr = bmcr & ~BMCR_ANENABLE;
1787 		new_bmcr |= BMCR_SPEED1000;
1788 
1789 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1790 			if (bp->req_line_speed == SPEED_2500)
1791 				bnx2_enable_forced_2g5(bp);
1792 			else if (bp->req_line_speed == SPEED_1000) {
1793 				bnx2_disable_forced_2g5(bp);
1794 				new_bmcr &= ~0x2000;
1795 			}
1796 
1797 		} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1798 			if (bp->req_line_speed == SPEED_2500)
1799 				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1800 			else
1801 				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1802 		}
1803 
1804 		if (bp->req_duplex == DUPLEX_FULL) {
1805 			adv |= ADVERTISE_1000XFULL;
1806 			new_bmcr |= BMCR_FULLDPLX;
1807 		}
1808 		else {
1809 			adv |= ADVERTISE_1000XHALF;
1810 			new_bmcr &= ~BMCR_FULLDPLX;
1811 		}
1812 		if ((new_bmcr != bmcr) || (force_link_down)) {
1813 			/* Force a link down visible on the other side */
1814 			if (bp->link_up) {
1815 				bnx2_write_phy(bp, bp->mii_adv, adv &
1816 					       ~(ADVERTISE_1000XFULL |
1817 						 ADVERTISE_1000XHALF));
1818 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1819 					BMCR_ANRESTART | BMCR_ANENABLE);
1820 
1821 				bp->link_up = 0;
1822 				netif_carrier_off(bp->dev);
1823 				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1824 				bnx2_report_link(bp);
1825 			}
1826 			bnx2_write_phy(bp, bp->mii_adv, adv);
1827 			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1828 		} else {
1829 			bnx2_resolve_flow_ctrl(bp);
1830 			bnx2_set_mac_link(bp);
1831 		}
1832 		return 0;
1833 	}
1834 
1835 	bnx2_test_and_enable_2g5(bp);
1836 
1837 	if (bp->advertising & ADVERTISED_1000baseT_Full)
1838 		new_adv |= ADVERTISE_1000XFULL;
1839 
1840 	new_adv |= bnx2_phy_get_pause_adv(bp);
1841 
1842 	bnx2_read_phy(bp, bp->mii_adv, &adv);
1843 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1844 
1845 	bp->serdes_an_pending = 0;
1846 	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1847 		/* Force a link down visible on the other side */
1848 		if (bp->link_up) {
1849 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1850 			spin_unlock_bh(&bp->phy_lock);
1851 			msleep(20);
1852 			spin_lock_bh(&bp->phy_lock);
1853 		}
1854 
1855 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
1856 		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1857 			BMCR_ANENABLE);
1858 		/* Speed up link-up time when the link partner
1859 		 * does not autonegotiate which is very common
1860 		 * in blade servers. Some blade servers use
1861 		 * IPMI for kerboard input and it's important
1862 		 * to minimize link disruptions. Autoneg. involves
1863 		 * exchanging base pages plus 3 next pages and
1864 		 * normally completes in about 120 msec.
1865 		 */
1866 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1867 		bp->serdes_an_pending = 1;
1868 		mod_timer(&bp->timer, jiffies + bp->current_interval);
1869 	} else {
1870 		bnx2_resolve_flow_ctrl(bp);
1871 		bnx2_set_mac_link(bp);
1872 	}
1873 
1874 	return 0;
1875 }
1876 
1877 #define ETHTOOL_ALL_FIBRE_SPEED						\
1878 	(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?			\
1879 		(ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1880 		(ADVERTISED_1000baseT_Full)
1881 
1882 #define ETHTOOL_ALL_COPPER_SPEED					\
1883 	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
1884 	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
1885 	ADVERTISED_1000baseT_Full)
1886 
1887 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1888 	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1889 
1890 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1891 
1892 static void
1893 bnx2_set_default_remote_link(struct bnx2 *bp)
1894 {
1895 	u32 link;
1896 
1897 	if (bp->phy_port == PORT_TP)
1898 		link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1899 	else
1900 		link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1901 
1902 	if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1903 		bp->req_line_speed = 0;
1904 		bp->autoneg |= AUTONEG_SPEED;
1905 		bp->advertising = ADVERTISED_Autoneg;
1906 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1907 			bp->advertising |= ADVERTISED_10baseT_Half;
1908 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1909 			bp->advertising |= ADVERTISED_10baseT_Full;
1910 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1911 			bp->advertising |= ADVERTISED_100baseT_Half;
1912 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1913 			bp->advertising |= ADVERTISED_100baseT_Full;
1914 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1915 			bp->advertising |= ADVERTISED_1000baseT_Full;
1916 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1917 			bp->advertising |= ADVERTISED_2500baseX_Full;
1918 	} else {
1919 		bp->autoneg = 0;
1920 		bp->advertising = 0;
1921 		bp->req_duplex = DUPLEX_FULL;
1922 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1923 			bp->req_line_speed = SPEED_10;
1924 			if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1925 				bp->req_duplex = DUPLEX_HALF;
1926 		}
1927 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1928 			bp->req_line_speed = SPEED_100;
1929 			if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1930 				bp->req_duplex = DUPLEX_HALF;
1931 		}
1932 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1933 			bp->req_line_speed = SPEED_1000;
1934 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1935 			bp->req_line_speed = SPEED_2500;
1936 	}
1937 }
1938 
1939 static void
1940 bnx2_set_default_link(struct bnx2 *bp)
1941 {
1942 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1943 		bnx2_set_default_remote_link(bp);
1944 		return;
1945 	}
1946 
1947 	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1948 	bp->req_line_speed = 0;
1949 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1950 		u32 reg;
1951 
1952 		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1953 
1954 		reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1955 		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1956 		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1957 			bp->autoneg = 0;
1958 			bp->req_line_speed = bp->line_speed = SPEED_1000;
1959 			bp->req_duplex = DUPLEX_FULL;
1960 		}
1961 	} else
1962 		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1963 }
1964 
1965 static void
1966 bnx2_send_heart_beat(struct bnx2 *bp)
1967 {
1968 	u32 msg;
1969 	u32 addr;
1970 
1971 	spin_lock(&bp->indirect_lock);
1972 	msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1973 	addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1974 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1975 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1976 	spin_unlock(&bp->indirect_lock);
1977 }
1978 
1979 static void
1980 bnx2_remote_phy_event(struct bnx2 *bp)
1981 {
1982 	u32 msg;
1983 	u8 link_up = bp->link_up;
1984 	u8 old_port;
1985 
1986 	msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1987 
1988 	if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1989 		bnx2_send_heart_beat(bp);
1990 
1991 	msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1992 
1993 	if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1994 		bp->link_up = 0;
1995 	else {
1996 		u32 speed;
1997 
1998 		bp->link_up = 1;
1999 		speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
2000 		bp->duplex = DUPLEX_FULL;
2001 		switch (speed) {
2002 			case BNX2_LINK_STATUS_10HALF:
2003 				bp->duplex = DUPLEX_HALF;
2004 				/* fall through */
2005 			case BNX2_LINK_STATUS_10FULL:
2006 				bp->line_speed = SPEED_10;
2007 				break;
2008 			case BNX2_LINK_STATUS_100HALF:
2009 				bp->duplex = DUPLEX_HALF;
2010 				/* fall through */
2011 			case BNX2_LINK_STATUS_100BASE_T4:
2012 			case BNX2_LINK_STATUS_100FULL:
2013 				bp->line_speed = SPEED_100;
2014 				break;
2015 			case BNX2_LINK_STATUS_1000HALF:
2016 				bp->duplex = DUPLEX_HALF;
2017 				/* fall through */
2018 			case BNX2_LINK_STATUS_1000FULL:
2019 				bp->line_speed = SPEED_1000;
2020 				break;
2021 			case BNX2_LINK_STATUS_2500HALF:
2022 				bp->duplex = DUPLEX_HALF;
2023 				/* fall through */
2024 			case BNX2_LINK_STATUS_2500FULL:
2025 				bp->line_speed = SPEED_2500;
2026 				break;
2027 			default:
2028 				bp->line_speed = 0;
2029 				break;
2030 		}
2031 
2032 		bp->flow_ctrl = 0;
2033 		if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2034 		    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2035 			if (bp->duplex == DUPLEX_FULL)
2036 				bp->flow_ctrl = bp->req_flow_ctrl;
2037 		} else {
2038 			if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2039 				bp->flow_ctrl |= FLOW_CTRL_TX;
2040 			if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2041 				bp->flow_ctrl |= FLOW_CTRL_RX;
2042 		}
2043 
2044 		old_port = bp->phy_port;
2045 		if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2046 			bp->phy_port = PORT_FIBRE;
2047 		else
2048 			bp->phy_port = PORT_TP;
2049 
2050 		if (old_port != bp->phy_port)
2051 			bnx2_set_default_link(bp);
2052 
2053 	}
2054 	if (bp->link_up != link_up)
2055 		bnx2_report_link(bp);
2056 
2057 	bnx2_set_mac_link(bp);
2058 }
2059 
2060 static int
2061 bnx2_set_remote_link(struct bnx2 *bp)
2062 {
2063 	u32 evt_code;
2064 
2065 	evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2066 	switch (evt_code) {
2067 		case BNX2_FW_EVT_CODE_LINK_EVENT:
2068 			bnx2_remote_phy_event(bp);
2069 			break;
2070 		case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2071 		default:
2072 			bnx2_send_heart_beat(bp);
2073 			break;
2074 	}
2075 	return 0;
2076 }
2077 
2078 static int
2079 bnx2_setup_copper_phy(struct bnx2 *bp)
2080 __releases(&bp->phy_lock)
2081 __acquires(&bp->phy_lock)
2082 {
2083 	u32 bmcr, adv_reg, new_adv = 0;
2084 	u32 new_bmcr;
2085 
2086 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2087 
2088 	bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2089 	adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2090 		    ADVERTISE_PAUSE_ASYM);
2091 
2092 	new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2093 
2094 	if (bp->autoneg & AUTONEG_SPEED) {
2095 		u32 adv1000_reg;
2096 		u32 new_adv1000 = 0;
2097 
2098 		new_adv |= bnx2_phy_get_pause_adv(bp);
2099 
2100 		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2101 		adv1000_reg &= PHY_ALL_1000_SPEED;
2102 
2103 		new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2104 		if ((adv1000_reg != new_adv1000) ||
2105 			(adv_reg != new_adv) ||
2106 			((bmcr & BMCR_ANENABLE) == 0)) {
2107 
2108 			bnx2_write_phy(bp, bp->mii_adv, new_adv);
2109 			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2110 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2111 				BMCR_ANENABLE);
2112 		}
2113 		else if (bp->link_up) {
2114 			/* Flow ctrl may have changed from auto to forced */
2115 			/* or vice-versa. */
2116 
2117 			bnx2_resolve_flow_ctrl(bp);
2118 			bnx2_set_mac_link(bp);
2119 		}
2120 		return 0;
2121 	}
2122 
2123 	/* advertise nothing when forcing speed */
2124 	if (adv_reg != new_adv)
2125 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
2126 
2127 	new_bmcr = 0;
2128 	if (bp->req_line_speed == SPEED_100) {
2129 		new_bmcr |= BMCR_SPEED100;
2130 	}
2131 	if (bp->req_duplex == DUPLEX_FULL) {
2132 		new_bmcr |= BMCR_FULLDPLX;
2133 	}
2134 	if (new_bmcr != bmcr) {
2135 		u32 bmsr;
2136 
2137 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2138 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2139 
2140 		if (bmsr & BMSR_LSTATUS) {
2141 			/* Force link down */
2142 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2143 			spin_unlock_bh(&bp->phy_lock);
2144 			msleep(50);
2145 			spin_lock_bh(&bp->phy_lock);
2146 
2147 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2148 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2149 		}
2150 
2151 		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2152 
2153 		/* Normally, the new speed is setup after the link has
2154 		 * gone down and up again. In some cases, link will not go
2155 		 * down so we need to set up the new speed here.
2156 		 */
2157 		if (bmsr & BMSR_LSTATUS) {
2158 			bp->line_speed = bp->req_line_speed;
2159 			bp->duplex = bp->req_duplex;
2160 			bnx2_resolve_flow_ctrl(bp);
2161 			bnx2_set_mac_link(bp);
2162 		}
2163 	} else {
2164 		bnx2_resolve_flow_ctrl(bp);
2165 		bnx2_set_mac_link(bp);
2166 	}
2167 	return 0;
2168 }
2169 
2170 static int
2171 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2172 __releases(&bp->phy_lock)
2173 __acquires(&bp->phy_lock)
2174 {
2175 	if (bp->loopback == MAC_LOOPBACK)
2176 		return 0;
2177 
2178 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2179 		return bnx2_setup_serdes_phy(bp, port);
2180 	}
2181 	else {
2182 		return bnx2_setup_copper_phy(bp);
2183 	}
2184 }
2185 
2186 static int
2187 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2188 {
2189 	u32 val;
2190 
2191 	bp->mii_bmcr = MII_BMCR + 0x10;
2192 	bp->mii_bmsr = MII_BMSR + 0x10;
2193 	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2194 	bp->mii_adv = MII_ADVERTISE + 0x10;
2195 	bp->mii_lpa = MII_LPA + 0x10;
2196 	bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2197 
2198 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2199 	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2200 
2201 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2202 	if (reset_phy)
2203 		bnx2_reset_phy(bp);
2204 
2205 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2206 
2207 	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2208 	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2209 	val |= MII_BNX2_SD_1000XCTL1_FIBER;
2210 	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2211 
2212 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2213 	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2214 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2215 		val |= BCM5708S_UP1_2G5;
2216 	else
2217 		val &= ~BCM5708S_UP1_2G5;
2218 	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2219 
2220 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2221 	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2222 	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2223 	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2224 
2225 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2226 
2227 	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2228 	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2229 	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2230 
2231 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2232 
2233 	return 0;
2234 }
2235 
2236 static int
2237 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2238 {
2239 	u32 val;
2240 
2241 	if (reset_phy)
2242 		bnx2_reset_phy(bp);
2243 
2244 	bp->mii_up1 = BCM5708S_UP1;
2245 
2246 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2247 	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2248 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2249 
2250 	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2251 	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2252 	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2253 
2254 	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2255 	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2256 	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2257 
2258 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2259 		bnx2_read_phy(bp, BCM5708S_UP1, &val);
2260 		val |= BCM5708S_UP1_2G5;
2261 		bnx2_write_phy(bp, BCM5708S_UP1, val);
2262 	}
2263 
2264 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2265 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2266 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2267 		/* increase tx signal amplitude */
2268 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2269 			       BCM5708S_BLK_ADDR_TX_MISC);
2270 		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2271 		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2272 		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2273 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2274 	}
2275 
2276 	val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2277 	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2278 
2279 	if (val) {
2280 		u32 is_backplane;
2281 
2282 		is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2283 		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2284 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2285 				       BCM5708S_BLK_ADDR_TX_MISC);
2286 			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2287 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2288 				       BCM5708S_BLK_ADDR_DIG);
2289 		}
2290 	}
2291 	return 0;
2292 }
2293 
2294 static int
2295 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2296 {
2297 	if (reset_phy)
2298 		bnx2_reset_phy(bp);
2299 
2300 	bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2301 
2302 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2303 		BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2304 
2305 	if (bp->dev->mtu > ETH_DATA_LEN) {
2306 		u32 val;
2307 
2308 		/* Set extended packet length bit */
2309 		bnx2_write_phy(bp, 0x18, 0x7);
2310 		bnx2_read_phy(bp, 0x18, &val);
2311 		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2312 
2313 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2314 		bnx2_read_phy(bp, 0x1c, &val);
2315 		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2316 	}
2317 	else {
2318 		u32 val;
2319 
2320 		bnx2_write_phy(bp, 0x18, 0x7);
2321 		bnx2_read_phy(bp, 0x18, &val);
2322 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2323 
2324 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2325 		bnx2_read_phy(bp, 0x1c, &val);
2326 		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2327 	}
2328 
2329 	return 0;
2330 }
2331 
2332 static int
2333 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2334 {
2335 	u32 val;
2336 
2337 	if (reset_phy)
2338 		bnx2_reset_phy(bp);
2339 
2340 	if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2341 		bnx2_write_phy(bp, 0x18, 0x0c00);
2342 		bnx2_write_phy(bp, 0x17, 0x000a);
2343 		bnx2_write_phy(bp, 0x15, 0x310b);
2344 		bnx2_write_phy(bp, 0x17, 0x201f);
2345 		bnx2_write_phy(bp, 0x15, 0x9506);
2346 		bnx2_write_phy(bp, 0x17, 0x401f);
2347 		bnx2_write_phy(bp, 0x15, 0x14e2);
2348 		bnx2_write_phy(bp, 0x18, 0x0400);
2349 	}
2350 
2351 	if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2352 		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2353 			       MII_BNX2_DSP_EXPAND_REG | 0x8);
2354 		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2355 		val &= ~(1 << 8);
2356 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2357 	}
2358 
2359 	if (bp->dev->mtu > ETH_DATA_LEN) {
2360 		/* Set extended packet length bit */
2361 		bnx2_write_phy(bp, 0x18, 0x7);
2362 		bnx2_read_phy(bp, 0x18, &val);
2363 		bnx2_write_phy(bp, 0x18, val | 0x4000);
2364 
2365 		bnx2_read_phy(bp, 0x10, &val);
2366 		bnx2_write_phy(bp, 0x10, val | 0x1);
2367 	}
2368 	else {
2369 		bnx2_write_phy(bp, 0x18, 0x7);
2370 		bnx2_read_phy(bp, 0x18, &val);
2371 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2372 
2373 		bnx2_read_phy(bp, 0x10, &val);
2374 		bnx2_write_phy(bp, 0x10, val & ~0x1);
2375 	}
2376 
2377 	/* ethernet@wirespeed */
2378 	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2379 	bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2380 	val |=  AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2381 
2382 	/* auto-mdix */
2383 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2384 		val |=  AUX_CTL_MISC_CTL_AUTOMDIX;
2385 
2386 	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2387 	return 0;
2388 }
2389 
2390 
2391 static int
2392 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2393 __releases(&bp->phy_lock)
2394 __acquires(&bp->phy_lock)
2395 {
2396 	u32 val;
2397 	int rc = 0;
2398 
2399 	bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2400 	bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2401 
2402 	bp->mii_bmcr = MII_BMCR;
2403 	bp->mii_bmsr = MII_BMSR;
2404 	bp->mii_bmsr1 = MII_BMSR;
2405 	bp->mii_adv = MII_ADVERTISE;
2406 	bp->mii_lpa = MII_LPA;
2407 
2408 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2409 
2410 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2411 		goto setup_phy;
2412 
2413 	bnx2_read_phy(bp, MII_PHYSID1, &val);
2414 	bp->phy_id = val << 16;
2415 	bnx2_read_phy(bp, MII_PHYSID2, &val);
2416 	bp->phy_id |= val & 0xffff;
2417 
2418 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2419 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2420 			rc = bnx2_init_5706s_phy(bp, reset_phy);
2421 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2422 			rc = bnx2_init_5708s_phy(bp, reset_phy);
2423 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2424 			rc = bnx2_init_5709s_phy(bp, reset_phy);
2425 	}
2426 	else {
2427 		rc = bnx2_init_copper_phy(bp, reset_phy);
2428 	}
2429 
2430 setup_phy:
2431 	if (!rc)
2432 		rc = bnx2_setup_phy(bp, bp->phy_port);
2433 
2434 	return rc;
2435 }
2436 
2437 static int
2438 bnx2_set_mac_loopback(struct bnx2 *bp)
2439 {
2440 	u32 mac_mode;
2441 
2442 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2443 	mac_mode &= ~BNX2_EMAC_MODE_PORT;
2444 	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2445 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2446 	bp->link_up = 1;
2447 	return 0;
2448 }
2449 
2450 static int bnx2_test_link(struct bnx2 *);
2451 
2452 static int
2453 bnx2_set_phy_loopback(struct bnx2 *bp)
2454 {
2455 	u32 mac_mode;
2456 	int rc, i;
2457 
2458 	spin_lock_bh(&bp->phy_lock);
2459 	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2460 			    BMCR_SPEED1000);
2461 	spin_unlock_bh(&bp->phy_lock);
2462 	if (rc)
2463 		return rc;
2464 
2465 	for (i = 0; i < 10; i++) {
2466 		if (bnx2_test_link(bp) == 0)
2467 			break;
2468 		msleep(100);
2469 	}
2470 
2471 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2472 	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2473 		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2474 		      BNX2_EMAC_MODE_25G_MODE);
2475 
2476 	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2477 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2478 	bp->link_up = 1;
2479 	return 0;
2480 }
2481 
2482 static void
2483 bnx2_dump_mcp_state(struct bnx2 *bp)
2484 {
2485 	struct net_device *dev = bp->dev;
2486 	u32 mcp_p0, mcp_p1;
2487 
2488 	netdev_err(dev, "<--- start MCP states dump --->\n");
2489 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2490 		mcp_p0 = BNX2_MCP_STATE_P0;
2491 		mcp_p1 = BNX2_MCP_STATE_P1;
2492 	} else {
2493 		mcp_p0 = BNX2_MCP_STATE_P0_5708;
2494 		mcp_p1 = BNX2_MCP_STATE_P1_5708;
2495 	}
2496 	netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2497 		   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2498 	netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2499 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2500 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2501 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2502 	netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2503 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2504 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2505 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2506 	netdev_err(dev, "DEBUG: shmem states:\n");
2507 	netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2508 		   bnx2_shmem_rd(bp, BNX2_DRV_MB),
2509 		   bnx2_shmem_rd(bp, BNX2_FW_MB),
2510 		   bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2511 	pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2512 	netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2513 		   bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2514 		   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2515 	pr_cont(" condition[%08x]\n",
2516 		bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2517 	DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2518 	DP_SHMEM_LINE(bp, 0x3cc);
2519 	DP_SHMEM_LINE(bp, 0x3dc);
2520 	DP_SHMEM_LINE(bp, 0x3ec);
2521 	netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2522 	netdev_err(dev, "<--- end MCP states dump --->\n");
2523 }
2524 
2525 static int
2526 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2527 {
2528 	int i;
2529 	u32 val;
2530 
2531 	bp->fw_wr_seq++;
2532 	msg_data |= bp->fw_wr_seq;
2533 	bp->fw_last_msg = msg_data;
2534 
2535 	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2536 
2537 	if (!ack)
2538 		return 0;
2539 
2540 	/* wait for an acknowledgement. */
2541 	for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2542 		msleep(10);
2543 
2544 		val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2545 
2546 		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2547 			break;
2548 	}
2549 	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2550 		return 0;
2551 
2552 	/* If we timed out, inform the firmware that this is the case. */
2553 	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2554 		msg_data &= ~BNX2_DRV_MSG_CODE;
2555 		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2556 
2557 		bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2558 		if (!silent) {
2559 			pr_err("fw sync timeout, reset code = %x\n", msg_data);
2560 			bnx2_dump_mcp_state(bp);
2561 		}
2562 
2563 		return -EBUSY;
2564 	}
2565 
2566 	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2567 		return -EIO;
2568 
2569 	return 0;
2570 }
2571 
2572 static int
2573 bnx2_init_5709_context(struct bnx2 *bp)
2574 {
2575 	int i, ret = 0;
2576 	u32 val;
2577 
2578 	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2579 	val |= (BNX2_PAGE_BITS - 8) << 16;
2580 	BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2581 	for (i = 0; i < 10; i++) {
2582 		val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2583 		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2584 			break;
2585 		udelay(2);
2586 	}
2587 	if (val & BNX2_CTX_COMMAND_MEM_INIT)
2588 		return -EBUSY;
2589 
2590 	for (i = 0; i < bp->ctx_pages; i++) {
2591 		int j;
2592 
2593 		if (bp->ctx_blk[i])
2594 			memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2595 		else
2596 			return -ENOMEM;
2597 
2598 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2599 			(bp->ctx_blk_mapping[i] & 0xffffffff) |
2600 			BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2601 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2602 			(u64) bp->ctx_blk_mapping[i] >> 32);
2603 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2604 			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2605 		for (j = 0; j < 10; j++) {
2606 
2607 			val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2608 			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2609 				break;
2610 			udelay(5);
2611 		}
2612 		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2613 			ret = -EBUSY;
2614 			break;
2615 		}
2616 	}
2617 	return ret;
2618 }
2619 
2620 static void
2621 bnx2_init_context(struct bnx2 *bp)
2622 {
2623 	u32 vcid;
2624 
2625 	vcid = 96;
2626 	while (vcid) {
2627 		u32 vcid_addr, pcid_addr, offset;
2628 		int i;
2629 
2630 		vcid--;
2631 
2632 		if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2633 			u32 new_vcid;
2634 
2635 			vcid_addr = GET_PCID_ADDR(vcid);
2636 			if (vcid & 0x8) {
2637 				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2638 			}
2639 			else {
2640 				new_vcid = vcid;
2641 			}
2642 			pcid_addr = GET_PCID_ADDR(new_vcid);
2643 		}
2644 		else {
2645 	    		vcid_addr = GET_CID_ADDR(vcid);
2646 			pcid_addr = vcid_addr;
2647 		}
2648 
2649 		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2650 			vcid_addr += (i << PHY_CTX_SHIFT);
2651 			pcid_addr += (i << PHY_CTX_SHIFT);
2652 
2653 			BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2654 			BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2655 
2656 			/* Zero out the context. */
2657 			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2658 				bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2659 		}
2660 	}
2661 }
2662 
2663 static int
2664 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2665 {
2666 	u16 *good_mbuf;
2667 	u32 good_mbuf_cnt;
2668 	u32 val;
2669 
2670 	good_mbuf = kmalloc_array(512, sizeof(u16), GFP_KERNEL);
2671 	if (!good_mbuf)
2672 		return -ENOMEM;
2673 
2674 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2675 		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2676 
2677 	good_mbuf_cnt = 0;
2678 
2679 	/* Allocate a bunch of mbufs and save the good ones in an array. */
2680 	val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2681 	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2682 		bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2683 				BNX2_RBUF_COMMAND_ALLOC_REQ);
2684 
2685 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2686 
2687 		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2688 
2689 		/* The addresses with Bit 9 set are bad memory blocks. */
2690 		if (!(val & (1 << 9))) {
2691 			good_mbuf[good_mbuf_cnt] = (u16) val;
2692 			good_mbuf_cnt++;
2693 		}
2694 
2695 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2696 	}
2697 
2698 	/* Free the good ones back to the mbuf pool thus discarding
2699 	 * all the bad ones. */
2700 	while (good_mbuf_cnt) {
2701 		good_mbuf_cnt--;
2702 
2703 		val = good_mbuf[good_mbuf_cnt];
2704 		val = (val << 9) | val | 1;
2705 
2706 		bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2707 	}
2708 	kfree(good_mbuf);
2709 	return 0;
2710 }
2711 
2712 static void
2713 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2714 {
2715 	u32 val;
2716 
2717 	val = (mac_addr[0] << 8) | mac_addr[1];
2718 
2719 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2720 
2721 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2722 		(mac_addr[4] << 8) | mac_addr[5];
2723 
2724 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2725 }
2726 
2727 static inline int
2728 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2729 {
2730 	dma_addr_t mapping;
2731 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2732 	struct bnx2_rx_bd *rxbd =
2733 		&rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2734 	struct page *page = alloc_page(gfp);
2735 
2736 	if (!page)
2737 		return -ENOMEM;
2738 	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2739 			       PCI_DMA_FROMDEVICE);
2740 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2741 		__free_page(page);
2742 		return -EIO;
2743 	}
2744 
2745 	rx_pg->page = page;
2746 	dma_unmap_addr_set(rx_pg, mapping, mapping);
2747 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2748 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2749 	return 0;
2750 }
2751 
2752 static void
2753 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2754 {
2755 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2756 	struct page *page = rx_pg->page;
2757 
2758 	if (!page)
2759 		return;
2760 
2761 	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2762 		       PAGE_SIZE, PCI_DMA_FROMDEVICE);
2763 
2764 	__free_page(page);
2765 	rx_pg->page = NULL;
2766 }
2767 
2768 static inline int
2769 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2770 {
2771 	u8 *data;
2772 	struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2773 	dma_addr_t mapping;
2774 	struct bnx2_rx_bd *rxbd =
2775 		&rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2776 
2777 	data = kmalloc(bp->rx_buf_size, gfp);
2778 	if (!data)
2779 		return -ENOMEM;
2780 
2781 	mapping = dma_map_single(&bp->pdev->dev,
2782 				 get_l2_fhdr(data),
2783 				 bp->rx_buf_use_size,
2784 				 PCI_DMA_FROMDEVICE);
2785 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2786 		kfree(data);
2787 		return -EIO;
2788 	}
2789 
2790 	rx_buf->data = data;
2791 	dma_unmap_addr_set(rx_buf, mapping, mapping);
2792 
2793 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2794 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2795 
2796 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2797 
2798 	return 0;
2799 }
2800 
2801 static int
2802 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2803 {
2804 	struct status_block *sblk = bnapi->status_blk.msi;
2805 	u32 new_link_state, old_link_state;
2806 	int is_set = 1;
2807 
2808 	new_link_state = sblk->status_attn_bits & event;
2809 	old_link_state = sblk->status_attn_bits_ack & event;
2810 	if (new_link_state != old_link_state) {
2811 		if (new_link_state)
2812 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2813 		else
2814 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2815 	} else
2816 		is_set = 0;
2817 
2818 	return is_set;
2819 }
2820 
2821 static void
2822 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2823 {
2824 	spin_lock(&bp->phy_lock);
2825 
2826 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2827 		bnx2_set_link(bp);
2828 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2829 		bnx2_set_remote_link(bp);
2830 
2831 	spin_unlock(&bp->phy_lock);
2832 
2833 }
2834 
2835 static inline u16
2836 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2837 {
2838 	u16 cons;
2839 
2840 	cons = READ_ONCE(*bnapi->hw_tx_cons_ptr);
2841 
2842 	if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2843 		cons++;
2844 	return cons;
2845 }
2846 
2847 static int
2848 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2849 {
2850 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2851 	u16 hw_cons, sw_cons, sw_ring_cons;
2852 	int tx_pkt = 0, index;
2853 	unsigned int tx_bytes = 0;
2854 	struct netdev_queue *txq;
2855 
2856 	index = (bnapi - bp->bnx2_napi);
2857 	txq = netdev_get_tx_queue(bp->dev, index);
2858 
2859 	hw_cons = bnx2_get_hw_tx_cons(bnapi);
2860 	sw_cons = txr->tx_cons;
2861 
2862 	while (sw_cons != hw_cons) {
2863 		struct bnx2_sw_tx_bd *tx_buf;
2864 		struct sk_buff *skb;
2865 		int i, last;
2866 
2867 		sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2868 
2869 		tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2870 		skb = tx_buf->skb;
2871 
2872 		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2873 		prefetch(&skb->end);
2874 
2875 		/* partial BD completions possible with TSO packets */
2876 		if (tx_buf->is_gso) {
2877 			u16 last_idx, last_ring_idx;
2878 
2879 			last_idx = sw_cons + tx_buf->nr_frags + 1;
2880 			last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2881 			if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2882 				last_idx++;
2883 			}
2884 			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2885 				break;
2886 			}
2887 		}
2888 
2889 		dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2890 			skb_headlen(skb), PCI_DMA_TODEVICE);
2891 
2892 		tx_buf->skb = NULL;
2893 		last = tx_buf->nr_frags;
2894 
2895 		for (i = 0; i < last; i++) {
2896 			struct bnx2_sw_tx_bd *tx_buf;
2897 
2898 			sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2899 
2900 			tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2901 			dma_unmap_page(&bp->pdev->dev,
2902 				dma_unmap_addr(tx_buf, mapping),
2903 				skb_frag_size(&skb_shinfo(skb)->frags[i]),
2904 				PCI_DMA_TODEVICE);
2905 		}
2906 
2907 		sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2908 
2909 		tx_bytes += skb->len;
2910 		dev_kfree_skb_any(skb);
2911 		tx_pkt++;
2912 		if (tx_pkt == budget)
2913 			break;
2914 
2915 		if (hw_cons == sw_cons)
2916 			hw_cons = bnx2_get_hw_tx_cons(bnapi);
2917 	}
2918 
2919 	netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2920 	txr->hw_tx_cons = hw_cons;
2921 	txr->tx_cons = sw_cons;
2922 
2923 	/* Need to make the tx_cons update visible to bnx2_start_xmit()
2924 	 * before checking for netif_tx_queue_stopped().  Without the
2925 	 * memory barrier, there is a small possibility that bnx2_start_xmit()
2926 	 * will miss it and cause the queue to be stopped forever.
2927 	 */
2928 	smp_mb();
2929 
2930 	if (unlikely(netif_tx_queue_stopped(txq)) &&
2931 		     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2932 		__netif_tx_lock(txq, smp_processor_id());
2933 		if ((netif_tx_queue_stopped(txq)) &&
2934 		    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2935 			netif_tx_wake_queue(txq);
2936 		__netif_tx_unlock(txq);
2937 	}
2938 
2939 	return tx_pkt;
2940 }
2941 
2942 static void
2943 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2944 			struct sk_buff *skb, int count)
2945 {
2946 	struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2947 	struct bnx2_rx_bd *cons_bd, *prod_bd;
2948 	int i;
2949 	u16 hw_prod, prod;
2950 	u16 cons = rxr->rx_pg_cons;
2951 
2952 	cons_rx_pg = &rxr->rx_pg_ring[cons];
2953 
2954 	/* The caller was unable to allocate a new page to replace the
2955 	 * last one in the frags array, so we need to recycle that page
2956 	 * and then free the skb.
2957 	 */
2958 	if (skb) {
2959 		struct page *page;
2960 		struct skb_shared_info *shinfo;
2961 
2962 		shinfo = skb_shinfo(skb);
2963 		shinfo->nr_frags--;
2964 		page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2965 		__skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2966 
2967 		cons_rx_pg->page = page;
2968 		dev_kfree_skb(skb);
2969 	}
2970 
2971 	hw_prod = rxr->rx_pg_prod;
2972 
2973 	for (i = 0; i < count; i++) {
2974 		prod = BNX2_RX_PG_RING_IDX(hw_prod);
2975 
2976 		prod_rx_pg = &rxr->rx_pg_ring[prod];
2977 		cons_rx_pg = &rxr->rx_pg_ring[cons];
2978 		cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2979 						[BNX2_RX_IDX(cons)];
2980 		prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2981 						[BNX2_RX_IDX(prod)];
2982 
2983 		if (prod != cons) {
2984 			prod_rx_pg->page = cons_rx_pg->page;
2985 			cons_rx_pg->page = NULL;
2986 			dma_unmap_addr_set(prod_rx_pg, mapping,
2987 				dma_unmap_addr(cons_rx_pg, mapping));
2988 
2989 			prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2990 			prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2991 
2992 		}
2993 		cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2994 		hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2995 	}
2996 	rxr->rx_pg_prod = hw_prod;
2997 	rxr->rx_pg_cons = cons;
2998 }
2999 
3000 static inline void
3001 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
3002 		   u8 *data, u16 cons, u16 prod)
3003 {
3004 	struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
3005 	struct bnx2_rx_bd *cons_bd, *prod_bd;
3006 
3007 	cons_rx_buf = &rxr->rx_buf_ring[cons];
3008 	prod_rx_buf = &rxr->rx_buf_ring[prod];
3009 
3010 	dma_sync_single_for_device(&bp->pdev->dev,
3011 		dma_unmap_addr(cons_rx_buf, mapping),
3012 		BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
3013 
3014 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
3015 
3016 	prod_rx_buf->data = data;
3017 
3018 	if (cons == prod)
3019 		return;
3020 
3021 	dma_unmap_addr_set(prod_rx_buf, mapping,
3022 			dma_unmap_addr(cons_rx_buf, mapping));
3023 
3024 	cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3025 	prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3026 	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3027 	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3028 }
3029 
3030 static struct sk_buff *
3031 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3032 	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3033 	    u32 ring_idx)
3034 {
3035 	int err;
3036 	u16 prod = ring_idx & 0xffff;
3037 	struct sk_buff *skb;
3038 
3039 	err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3040 	if (unlikely(err)) {
3041 		bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3042 error:
3043 		if (hdr_len) {
3044 			unsigned int raw_len = len + 4;
3045 			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3046 
3047 			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3048 		}
3049 		return NULL;
3050 	}
3051 
3052 	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3053 			 PCI_DMA_FROMDEVICE);
3054 	skb = build_skb(data, 0);
3055 	if (!skb) {
3056 		kfree(data);
3057 		goto error;
3058 	}
3059 	skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3060 	if (hdr_len == 0) {
3061 		skb_put(skb, len);
3062 		return skb;
3063 	} else {
3064 		unsigned int i, frag_len, frag_size, pages;
3065 		struct bnx2_sw_pg *rx_pg;
3066 		u16 pg_cons = rxr->rx_pg_cons;
3067 		u16 pg_prod = rxr->rx_pg_prod;
3068 
3069 		frag_size = len + 4 - hdr_len;
3070 		pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3071 		skb_put(skb, hdr_len);
3072 
3073 		for (i = 0; i < pages; i++) {
3074 			dma_addr_t mapping_old;
3075 
3076 			frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3077 			if (unlikely(frag_len <= 4)) {
3078 				unsigned int tail = 4 - frag_len;
3079 
3080 				rxr->rx_pg_cons = pg_cons;
3081 				rxr->rx_pg_prod = pg_prod;
3082 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3083 							pages - i);
3084 				skb->len -= tail;
3085 				if (i == 0) {
3086 					skb->tail -= tail;
3087 				} else {
3088 					skb_frag_t *frag =
3089 						&skb_shinfo(skb)->frags[i - 1];
3090 					skb_frag_size_sub(frag, tail);
3091 					skb->data_len -= tail;
3092 				}
3093 				return skb;
3094 			}
3095 			rx_pg = &rxr->rx_pg_ring[pg_cons];
3096 
3097 			/* Don't unmap yet.  If we're unable to allocate a new
3098 			 * page, we need to recycle the page and the DMA addr.
3099 			 */
3100 			mapping_old = dma_unmap_addr(rx_pg, mapping);
3101 			if (i == pages - 1)
3102 				frag_len -= 4;
3103 
3104 			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3105 			rx_pg->page = NULL;
3106 
3107 			err = bnx2_alloc_rx_page(bp, rxr,
3108 						 BNX2_RX_PG_RING_IDX(pg_prod),
3109 						 GFP_ATOMIC);
3110 			if (unlikely(err)) {
3111 				rxr->rx_pg_cons = pg_cons;
3112 				rxr->rx_pg_prod = pg_prod;
3113 				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3114 							pages - i);
3115 				return NULL;
3116 			}
3117 
3118 			dma_unmap_page(&bp->pdev->dev, mapping_old,
3119 				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
3120 
3121 			frag_size -= frag_len;
3122 			skb->data_len += frag_len;
3123 			skb->truesize += PAGE_SIZE;
3124 			skb->len += frag_len;
3125 
3126 			pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3127 			pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3128 		}
3129 		rxr->rx_pg_prod = pg_prod;
3130 		rxr->rx_pg_cons = pg_cons;
3131 	}
3132 	return skb;
3133 }
3134 
3135 static inline u16
3136 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3137 {
3138 	u16 cons;
3139 
3140 	cons = READ_ONCE(*bnapi->hw_rx_cons_ptr);
3141 
3142 	if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3143 		cons++;
3144 	return cons;
3145 }
3146 
3147 static int
3148 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3149 {
3150 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3151 	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3152 	struct l2_fhdr *rx_hdr;
3153 	int rx_pkt = 0, pg_ring_used = 0;
3154 
3155 	if (budget <= 0)
3156 		return rx_pkt;
3157 
3158 	hw_cons = bnx2_get_hw_rx_cons(bnapi);
3159 	sw_cons = rxr->rx_cons;
3160 	sw_prod = rxr->rx_prod;
3161 
3162 	/* Memory barrier necessary as speculative reads of the rx
3163 	 * buffer can be ahead of the index in the status block
3164 	 */
3165 	rmb();
3166 	while (sw_cons != hw_cons) {
3167 		unsigned int len, hdr_len;
3168 		u32 status;
3169 		struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3170 		struct sk_buff *skb;
3171 		dma_addr_t dma_addr;
3172 		u8 *data;
3173 		u16 next_ring_idx;
3174 
3175 		sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3176 		sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3177 
3178 		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3179 		data = rx_buf->data;
3180 		rx_buf->data = NULL;
3181 
3182 		rx_hdr = get_l2_fhdr(data);
3183 		prefetch(rx_hdr);
3184 
3185 		dma_addr = dma_unmap_addr(rx_buf, mapping);
3186 
3187 		dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3188 			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3189 			PCI_DMA_FROMDEVICE);
3190 
3191 		next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3192 		next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3193 		prefetch(get_l2_fhdr(next_rx_buf->data));
3194 
3195 		len = rx_hdr->l2_fhdr_pkt_len;
3196 		status = rx_hdr->l2_fhdr_status;
3197 
3198 		hdr_len = 0;
3199 		if (status & L2_FHDR_STATUS_SPLIT) {
3200 			hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3201 			pg_ring_used = 1;
3202 		} else if (len > bp->rx_jumbo_thresh) {
3203 			hdr_len = bp->rx_jumbo_thresh;
3204 			pg_ring_used = 1;
3205 		}
3206 
3207 		if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3208 				       L2_FHDR_ERRORS_PHY_DECODE |
3209 				       L2_FHDR_ERRORS_ALIGNMENT |
3210 				       L2_FHDR_ERRORS_TOO_SHORT |
3211 				       L2_FHDR_ERRORS_GIANT_FRAME))) {
3212 
3213 			bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3214 					  sw_ring_prod);
3215 			if (pg_ring_used) {
3216 				int pages;
3217 
3218 				pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3219 
3220 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3221 			}
3222 			goto next_rx;
3223 		}
3224 
3225 		len -= 4;
3226 
3227 		if (len <= bp->rx_copy_thresh) {
3228 			skb = netdev_alloc_skb(bp->dev, len + 6);
3229 			if (!skb) {
3230 				bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3231 						  sw_ring_prod);
3232 				goto next_rx;
3233 			}
3234 
3235 			/* aligned copy */
3236 			memcpy(skb->data,
3237 			       (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3238 			       len + 6);
3239 			skb_reserve(skb, 6);
3240 			skb_put(skb, len);
3241 
3242 			bnx2_reuse_rx_data(bp, rxr, data,
3243 				sw_ring_cons, sw_ring_prod);
3244 
3245 		} else {
3246 			skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3247 					  (sw_ring_cons << 16) | sw_ring_prod);
3248 			if (!skb)
3249 				goto next_rx;
3250 		}
3251 		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3252 		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3253 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3254 
3255 		skb->protocol = eth_type_trans(skb, bp->dev);
3256 
3257 		if (len > (bp->dev->mtu + ETH_HLEN) &&
3258 		    skb->protocol != htons(0x8100) &&
3259 		    skb->protocol != htons(ETH_P_8021AD)) {
3260 
3261 			dev_kfree_skb(skb);
3262 			goto next_rx;
3263 
3264 		}
3265 
3266 		skb_checksum_none_assert(skb);
3267 		if ((bp->dev->features & NETIF_F_RXCSUM) &&
3268 			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
3269 			L2_FHDR_STATUS_UDP_DATAGRAM))) {
3270 
3271 			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3272 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3273 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3274 		}
3275 		if ((bp->dev->features & NETIF_F_RXHASH) &&
3276 		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3277 		     L2_FHDR_STATUS_USE_RXHASH))
3278 			skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3279 				     PKT_HASH_TYPE_L3);
3280 
3281 		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3282 		napi_gro_receive(&bnapi->napi, skb);
3283 		rx_pkt++;
3284 
3285 next_rx:
3286 		sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3287 		sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3288 
3289 		if (rx_pkt == budget)
3290 			break;
3291 
3292 		/* Refresh hw_cons to see if there is new work */
3293 		if (sw_cons == hw_cons) {
3294 			hw_cons = bnx2_get_hw_rx_cons(bnapi);
3295 			rmb();
3296 		}
3297 	}
3298 	rxr->rx_cons = sw_cons;
3299 	rxr->rx_prod = sw_prod;
3300 
3301 	if (pg_ring_used)
3302 		BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3303 
3304 	BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3305 
3306 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3307 
3308 	mmiowb();
3309 
3310 	return rx_pkt;
3311 
3312 }
3313 
3314 /* MSI ISR - The only difference between this and the INTx ISR
3315  * is that the MSI interrupt is always serviced.
3316  */
3317 static irqreturn_t
3318 bnx2_msi(int irq, void *dev_instance)
3319 {
3320 	struct bnx2_napi *bnapi = dev_instance;
3321 	struct bnx2 *bp = bnapi->bp;
3322 
3323 	prefetch(bnapi->status_blk.msi);
3324 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3325 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3326 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3327 
3328 	/* Return here if interrupt is disabled. */
3329 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3330 		return IRQ_HANDLED;
3331 
3332 	napi_schedule(&bnapi->napi);
3333 
3334 	return IRQ_HANDLED;
3335 }
3336 
3337 static irqreturn_t
3338 bnx2_msi_1shot(int irq, void *dev_instance)
3339 {
3340 	struct bnx2_napi *bnapi = dev_instance;
3341 	struct bnx2 *bp = bnapi->bp;
3342 
3343 	prefetch(bnapi->status_blk.msi);
3344 
3345 	/* Return here if interrupt is disabled. */
3346 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3347 		return IRQ_HANDLED;
3348 
3349 	napi_schedule(&bnapi->napi);
3350 
3351 	return IRQ_HANDLED;
3352 }
3353 
3354 static irqreturn_t
3355 bnx2_interrupt(int irq, void *dev_instance)
3356 {
3357 	struct bnx2_napi *bnapi = dev_instance;
3358 	struct bnx2 *bp = bnapi->bp;
3359 	struct status_block *sblk = bnapi->status_blk.msi;
3360 
3361 	/* When using INTx, it is possible for the interrupt to arrive
3362 	 * at the CPU before the status block posted prior to the
3363 	 * interrupt. Reading a register will flush the status block.
3364 	 * When using MSI, the MSI message will always complete after
3365 	 * the status block write.
3366 	 */
3367 	if ((sblk->status_idx == bnapi->last_status_idx) &&
3368 	    (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3369 	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3370 		return IRQ_NONE;
3371 
3372 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3373 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3374 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3375 
3376 	/* Read back to deassert IRQ immediately to avoid too many
3377 	 * spurious interrupts.
3378 	 */
3379 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3380 
3381 	/* Return here if interrupt is shared and is disabled. */
3382 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3383 		return IRQ_HANDLED;
3384 
3385 	if (napi_schedule_prep(&bnapi->napi)) {
3386 		bnapi->last_status_idx = sblk->status_idx;
3387 		__napi_schedule(&bnapi->napi);
3388 	}
3389 
3390 	return IRQ_HANDLED;
3391 }
3392 
3393 static inline int
3394 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3395 {
3396 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3397 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3398 
3399 	if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3400 	    (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3401 		return 1;
3402 	return 0;
3403 }
3404 
3405 #define STATUS_ATTN_EVENTS	(STATUS_ATTN_BITS_LINK_STATE | \
3406 				 STATUS_ATTN_BITS_TIMER_ABORT)
3407 
3408 static inline int
3409 bnx2_has_work(struct bnx2_napi *bnapi)
3410 {
3411 	struct status_block *sblk = bnapi->status_blk.msi;
3412 
3413 	if (bnx2_has_fast_work(bnapi))
3414 		return 1;
3415 
3416 #ifdef BCM_CNIC
3417 	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3418 		return 1;
3419 #endif
3420 
3421 	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3422 	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3423 		return 1;
3424 
3425 	return 0;
3426 }
3427 
3428 static void
3429 bnx2_chk_missed_msi(struct bnx2 *bp)
3430 {
3431 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3432 	u32 msi_ctrl;
3433 
3434 	if (bnx2_has_work(bnapi)) {
3435 		msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3436 		if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3437 			return;
3438 
3439 		if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3440 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3441 				~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3442 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3443 			bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3444 		}
3445 	}
3446 
3447 	bp->idle_chk_status_idx = bnapi->last_status_idx;
3448 }
3449 
3450 #ifdef BCM_CNIC
3451 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3452 {
3453 	struct cnic_ops *c_ops;
3454 
3455 	if (!bnapi->cnic_present)
3456 		return;
3457 
3458 	rcu_read_lock();
3459 	c_ops = rcu_dereference(bp->cnic_ops);
3460 	if (c_ops)
3461 		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3462 						      bnapi->status_blk.msi);
3463 	rcu_read_unlock();
3464 }
3465 #endif
3466 
3467 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3468 {
3469 	struct status_block *sblk = bnapi->status_blk.msi;
3470 	u32 status_attn_bits = sblk->status_attn_bits;
3471 	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3472 
3473 	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3474 	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3475 
3476 		bnx2_phy_int(bp, bnapi);
3477 
3478 		/* This is needed to take care of transient status
3479 		 * during link changes.
3480 		 */
3481 		BNX2_WR(bp, BNX2_HC_COMMAND,
3482 			bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3483 		BNX2_RD(bp, BNX2_HC_COMMAND);
3484 	}
3485 }
3486 
3487 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3488 			  int work_done, int budget)
3489 {
3490 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3491 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3492 
3493 	if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3494 		bnx2_tx_int(bp, bnapi, 0);
3495 
3496 	if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3497 		work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3498 
3499 	return work_done;
3500 }
3501 
3502 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3503 {
3504 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3505 	struct bnx2 *bp = bnapi->bp;
3506 	int work_done = 0;
3507 	struct status_block_msix *sblk = bnapi->status_blk.msix;
3508 
3509 	while (1) {
3510 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3511 		if (unlikely(work_done >= budget))
3512 			break;
3513 
3514 		bnapi->last_status_idx = sblk->status_idx;
3515 		/* status idx must be read before checking for more work. */
3516 		rmb();
3517 		if (likely(!bnx2_has_fast_work(bnapi))) {
3518 
3519 			napi_complete_done(napi, work_done);
3520 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3521 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3522 				bnapi->last_status_idx);
3523 			break;
3524 		}
3525 	}
3526 	return work_done;
3527 }
3528 
3529 static int bnx2_poll(struct napi_struct *napi, int budget)
3530 {
3531 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3532 	struct bnx2 *bp = bnapi->bp;
3533 	int work_done = 0;
3534 	struct status_block *sblk = bnapi->status_blk.msi;
3535 
3536 	while (1) {
3537 		bnx2_poll_link(bp, bnapi);
3538 
3539 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3540 
3541 #ifdef BCM_CNIC
3542 		bnx2_poll_cnic(bp, bnapi);
3543 #endif
3544 
3545 		/* bnapi->last_status_idx is used below to tell the hw how
3546 		 * much work has been processed, so we must read it before
3547 		 * checking for more work.
3548 		 */
3549 		bnapi->last_status_idx = sblk->status_idx;
3550 
3551 		if (unlikely(work_done >= budget))
3552 			break;
3553 
3554 		rmb();
3555 		if (likely(!bnx2_has_work(bnapi))) {
3556 			napi_complete_done(napi, work_done);
3557 			if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3558 				BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3559 					BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3560 					bnapi->last_status_idx);
3561 				break;
3562 			}
3563 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3564 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3565 				BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3566 				bnapi->last_status_idx);
3567 
3568 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3569 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3570 				bnapi->last_status_idx);
3571 			break;
3572 		}
3573 	}
3574 
3575 	return work_done;
3576 }
3577 
3578 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3579  * from set_multicast.
3580  */
3581 static void
3582 bnx2_set_rx_mode(struct net_device *dev)
3583 {
3584 	struct bnx2 *bp = netdev_priv(dev);
3585 	u32 rx_mode, sort_mode;
3586 	struct netdev_hw_addr *ha;
3587 	int i;
3588 
3589 	if (!netif_running(dev))
3590 		return;
3591 
3592 	spin_lock_bh(&bp->phy_lock);
3593 
3594 	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3595 				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3596 	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3597 	if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3598 	     (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3599 		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3600 	if (dev->flags & IFF_PROMISC) {
3601 		/* Promiscuous mode. */
3602 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3603 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3604 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3605 	}
3606 	else if (dev->flags & IFF_ALLMULTI) {
3607 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3608 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3609 				0xffffffff);
3610         	}
3611 		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3612 	}
3613 	else {
3614 		/* Accept one or more multicast(s). */
3615 		u32 mc_filter[NUM_MC_HASH_REGISTERS];
3616 		u32 regidx;
3617 		u32 bit;
3618 		u32 crc;
3619 
3620 		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3621 
3622 		netdev_for_each_mc_addr(ha, dev) {
3623 			crc = ether_crc_le(ETH_ALEN, ha->addr);
3624 			bit = crc & 0xff;
3625 			regidx = (bit & 0xe0) >> 5;
3626 			bit &= 0x1f;
3627 			mc_filter[regidx] |= (1 << bit);
3628 		}
3629 
3630 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3631 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3632 				mc_filter[i]);
3633 		}
3634 
3635 		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3636 	}
3637 
3638 	if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3639 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3640 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3641 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3642 	} else if (!(dev->flags & IFF_PROMISC)) {
3643 		/* Add all entries into to the match filter list */
3644 		i = 0;
3645 		netdev_for_each_uc_addr(ha, dev) {
3646 			bnx2_set_mac_addr(bp, ha->addr,
3647 					  i + BNX2_START_UNICAST_ADDRESS_INDEX);
3648 			sort_mode |= (1 <<
3649 				      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3650 			i++;
3651 		}
3652 
3653 	}
3654 
3655 	if (rx_mode != bp->rx_mode) {
3656 		bp->rx_mode = rx_mode;
3657 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3658 	}
3659 
3660 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3661 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3662 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3663 
3664 	spin_unlock_bh(&bp->phy_lock);
3665 }
3666 
3667 static int
3668 check_fw_section(const struct firmware *fw,
3669 		 const struct bnx2_fw_file_section *section,
3670 		 u32 alignment, bool non_empty)
3671 {
3672 	u32 offset = be32_to_cpu(section->offset);
3673 	u32 len = be32_to_cpu(section->len);
3674 
3675 	if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3676 		return -EINVAL;
3677 	if ((non_empty && len == 0) || len > fw->size - offset ||
3678 	    len & (alignment - 1))
3679 		return -EINVAL;
3680 	return 0;
3681 }
3682 
3683 static int
3684 check_mips_fw_entry(const struct firmware *fw,
3685 		    const struct bnx2_mips_fw_file_entry *entry)
3686 {
3687 	if (check_fw_section(fw, &entry->text, 4, true) ||
3688 	    check_fw_section(fw, &entry->data, 4, false) ||
3689 	    check_fw_section(fw, &entry->rodata, 4, false))
3690 		return -EINVAL;
3691 	return 0;
3692 }
3693 
3694 static void bnx2_release_firmware(struct bnx2 *bp)
3695 {
3696 	if (bp->rv2p_firmware) {
3697 		release_firmware(bp->mips_firmware);
3698 		release_firmware(bp->rv2p_firmware);
3699 		bp->rv2p_firmware = NULL;
3700 	}
3701 }
3702 
3703 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3704 {
3705 	const char *mips_fw_file, *rv2p_fw_file;
3706 	const struct bnx2_mips_fw_file *mips_fw;
3707 	const struct bnx2_rv2p_fw_file *rv2p_fw;
3708 	int rc;
3709 
3710 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3711 		mips_fw_file = FW_MIPS_FILE_09;
3712 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3713 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3714 			rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3715 		else
3716 			rv2p_fw_file = FW_RV2P_FILE_09;
3717 	} else {
3718 		mips_fw_file = FW_MIPS_FILE_06;
3719 		rv2p_fw_file = FW_RV2P_FILE_06;
3720 	}
3721 
3722 	rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3723 	if (rc) {
3724 		pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3725 		goto out;
3726 	}
3727 
3728 	rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3729 	if (rc) {
3730 		pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3731 		goto err_release_mips_firmware;
3732 	}
3733 	mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3734 	rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3735 	if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3736 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3737 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3738 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3739 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3740 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3741 		pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3742 		rc = -EINVAL;
3743 		goto err_release_firmware;
3744 	}
3745 	if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3746 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3747 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3748 		pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3749 		rc = -EINVAL;
3750 		goto err_release_firmware;
3751 	}
3752 out:
3753 	return rc;
3754 
3755 err_release_firmware:
3756 	release_firmware(bp->rv2p_firmware);
3757 	bp->rv2p_firmware = NULL;
3758 err_release_mips_firmware:
3759 	release_firmware(bp->mips_firmware);
3760 	goto out;
3761 }
3762 
3763 static int bnx2_request_firmware(struct bnx2 *bp)
3764 {
3765 	return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3766 }
3767 
3768 static u32
3769 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3770 {
3771 	switch (idx) {
3772 	case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3773 		rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3774 		rv2p_code |= RV2P_BD_PAGE_SIZE;
3775 		break;
3776 	}
3777 	return rv2p_code;
3778 }
3779 
3780 static int
3781 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3782 	     const struct bnx2_rv2p_fw_file_entry *fw_entry)
3783 {
3784 	u32 rv2p_code_len, file_offset;
3785 	__be32 *rv2p_code;
3786 	int i;
3787 	u32 val, cmd, addr;
3788 
3789 	rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3790 	file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3791 
3792 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3793 
3794 	if (rv2p_proc == RV2P_PROC1) {
3795 		cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3796 		addr = BNX2_RV2P_PROC1_ADDR_CMD;
3797 	} else {
3798 		cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3799 		addr = BNX2_RV2P_PROC2_ADDR_CMD;
3800 	}
3801 
3802 	for (i = 0; i < rv2p_code_len; i += 8) {
3803 		BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3804 		rv2p_code++;
3805 		BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3806 		rv2p_code++;
3807 
3808 		val = (i / 8) | cmd;
3809 		BNX2_WR(bp, addr, val);
3810 	}
3811 
3812 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3813 	for (i = 0; i < 8; i++) {
3814 		u32 loc, code;
3815 
3816 		loc = be32_to_cpu(fw_entry->fixup[i]);
3817 		if (loc && ((loc * 4) < rv2p_code_len)) {
3818 			code = be32_to_cpu(*(rv2p_code + loc - 1));
3819 			BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3820 			code = be32_to_cpu(*(rv2p_code + loc));
3821 			code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3822 			BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3823 
3824 			val = (loc / 2) | cmd;
3825 			BNX2_WR(bp, addr, val);
3826 		}
3827 	}
3828 
3829 	/* Reset the processor, un-stall is done later. */
3830 	if (rv2p_proc == RV2P_PROC1) {
3831 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3832 	}
3833 	else {
3834 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3835 	}
3836 
3837 	return 0;
3838 }
3839 
3840 static int
3841 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3842 	    const struct bnx2_mips_fw_file_entry *fw_entry)
3843 {
3844 	u32 addr, len, file_offset;
3845 	__be32 *data;
3846 	u32 offset;
3847 	u32 val;
3848 
3849 	/* Halt the CPU. */
3850 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3851 	val |= cpu_reg->mode_value_halt;
3852 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3853 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3854 
3855 	/* Load the Text area. */
3856 	addr = be32_to_cpu(fw_entry->text.addr);
3857 	len = be32_to_cpu(fw_entry->text.len);
3858 	file_offset = be32_to_cpu(fw_entry->text.offset);
3859 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3860 
3861 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3862 	if (len) {
3863 		int j;
3864 
3865 		for (j = 0; j < (len / 4); j++, offset += 4)
3866 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3867 	}
3868 
3869 	/* Load the Data area. */
3870 	addr = be32_to_cpu(fw_entry->data.addr);
3871 	len = be32_to_cpu(fw_entry->data.len);
3872 	file_offset = be32_to_cpu(fw_entry->data.offset);
3873 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3874 
3875 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3876 	if (len) {
3877 		int j;
3878 
3879 		for (j = 0; j < (len / 4); j++, offset += 4)
3880 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3881 	}
3882 
3883 	/* Load the Read-Only area. */
3884 	addr = be32_to_cpu(fw_entry->rodata.addr);
3885 	len = be32_to_cpu(fw_entry->rodata.len);
3886 	file_offset = be32_to_cpu(fw_entry->rodata.offset);
3887 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3888 
3889 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3890 	if (len) {
3891 		int j;
3892 
3893 		for (j = 0; j < (len / 4); j++, offset += 4)
3894 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3895 	}
3896 
3897 	/* Clear the pre-fetch instruction. */
3898 	bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3899 
3900 	val = be32_to_cpu(fw_entry->start_addr);
3901 	bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3902 
3903 	/* Start the CPU. */
3904 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3905 	val &= ~cpu_reg->mode_value_halt;
3906 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3907 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3908 
3909 	return 0;
3910 }
3911 
3912 static int
3913 bnx2_init_cpus(struct bnx2 *bp)
3914 {
3915 	const struct bnx2_mips_fw_file *mips_fw =
3916 		(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3917 	const struct bnx2_rv2p_fw_file *rv2p_fw =
3918 		(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3919 	int rc;
3920 
3921 	/* Initialize the RV2P processor. */
3922 	load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3923 	load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3924 
3925 	/* Initialize the RX Processor. */
3926 	rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3927 	if (rc)
3928 		goto init_cpu_err;
3929 
3930 	/* Initialize the TX Processor. */
3931 	rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3932 	if (rc)
3933 		goto init_cpu_err;
3934 
3935 	/* Initialize the TX Patch-up Processor. */
3936 	rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3937 	if (rc)
3938 		goto init_cpu_err;
3939 
3940 	/* Initialize the Completion Processor. */
3941 	rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3942 	if (rc)
3943 		goto init_cpu_err;
3944 
3945 	/* Initialize the Command Processor. */
3946 	rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3947 
3948 init_cpu_err:
3949 	return rc;
3950 }
3951 
3952 static void
3953 bnx2_setup_wol(struct bnx2 *bp)
3954 {
3955 	int i;
3956 	u32 val, wol_msg;
3957 
3958 	if (bp->wol) {
3959 		u32 advertising;
3960 		u8 autoneg;
3961 
3962 		autoneg = bp->autoneg;
3963 		advertising = bp->advertising;
3964 
3965 		if (bp->phy_port == PORT_TP) {
3966 			bp->autoneg = AUTONEG_SPEED;
3967 			bp->advertising = ADVERTISED_10baseT_Half |
3968 				ADVERTISED_10baseT_Full |
3969 				ADVERTISED_100baseT_Half |
3970 				ADVERTISED_100baseT_Full |
3971 				ADVERTISED_Autoneg;
3972 		}
3973 
3974 		spin_lock_bh(&bp->phy_lock);
3975 		bnx2_setup_phy(bp, bp->phy_port);
3976 		spin_unlock_bh(&bp->phy_lock);
3977 
3978 		bp->autoneg = autoneg;
3979 		bp->advertising = advertising;
3980 
3981 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3982 
3983 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
3984 
3985 		/* Enable port mode. */
3986 		val &= ~BNX2_EMAC_MODE_PORT;
3987 		val |= BNX2_EMAC_MODE_MPKT_RCVD |
3988 		       BNX2_EMAC_MODE_ACPI_RCVD |
3989 		       BNX2_EMAC_MODE_MPKT;
3990 		if (bp->phy_port == PORT_TP) {
3991 			val |= BNX2_EMAC_MODE_PORT_MII;
3992 		} else {
3993 			val |= BNX2_EMAC_MODE_PORT_GMII;
3994 			if (bp->line_speed == SPEED_2500)
3995 				val |= BNX2_EMAC_MODE_25G_MODE;
3996 		}
3997 
3998 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
3999 
4000 		/* receive all multicast */
4001 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
4002 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
4003 				0xffffffff);
4004 		}
4005 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
4006 
4007 		val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
4008 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
4009 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
4010 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
4011 
4012 		/* Need to enable EMAC and RPM for WOL. */
4013 		BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4014 			BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4015 			BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4016 			BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4017 
4018 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4019 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4020 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4021 
4022 		wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4023 	} else {
4024 			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4025 	}
4026 
4027 	if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4028 		u32 val;
4029 
4030 		wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4031 		if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4032 			bnx2_fw_sync(bp, wol_msg, 1, 0);
4033 			return;
4034 		}
4035 		/* Tell firmware not to power down the PHY yet, otherwise
4036 		 * the chip will take a long time to respond to MMIO reads.
4037 		 */
4038 		val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4039 		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4040 			      val | BNX2_PORT_FEATURE_ASF_ENABLED);
4041 		bnx2_fw_sync(bp, wol_msg, 1, 0);
4042 		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4043 	}
4044 
4045 }
4046 
4047 static int
4048 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4049 {
4050 	switch (state) {
4051 	case PCI_D0: {
4052 		u32 val;
4053 
4054 		pci_enable_wake(bp->pdev, PCI_D0, false);
4055 		pci_set_power_state(bp->pdev, PCI_D0);
4056 
4057 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
4058 		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4059 		val &= ~BNX2_EMAC_MODE_MPKT;
4060 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
4061 
4062 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4063 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4064 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4065 		break;
4066 	}
4067 	case PCI_D3hot: {
4068 		bnx2_setup_wol(bp);
4069 		pci_wake_from_d3(bp->pdev, bp->wol);
4070 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4071 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4072 
4073 			if (bp->wol)
4074 				pci_set_power_state(bp->pdev, PCI_D3hot);
4075 			break;
4076 
4077 		}
4078 		if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4079 			u32 val;
4080 
4081 			/* Tell firmware not to power down the PHY yet,
4082 			 * otherwise the other port may not respond to
4083 			 * MMIO reads.
4084 			 */
4085 			val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4086 			val &= ~BNX2_CONDITION_PM_STATE_MASK;
4087 			val |= BNX2_CONDITION_PM_STATE_UNPREP;
4088 			bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4089 		}
4090 		pci_set_power_state(bp->pdev, PCI_D3hot);
4091 
4092 		/* No more memory access after this point until
4093 		 * device is brought back to D0.
4094 		 */
4095 		break;
4096 	}
4097 	default:
4098 		return -EINVAL;
4099 	}
4100 	return 0;
4101 }
4102 
4103 static int
4104 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4105 {
4106 	u32 val;
4107 	int j;
4108 
4109 	/* Request access to the flash interface. */
4110 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4111 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4112 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4113 		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4114 			break;
4115 
4116 		udelay(5);
4117 	}
4118 
4119 	if (j >= NVRAM_TIMEOUT_COUNT)
4120 		return -EBUSY;
4121 
4122 	return 0;
4123 }
4124 
4125 static int
4126 bnx2_release_nvram_lock(struct bnx2 *bp)
4127 {
4128 	int j;
4129 	u32 val;
4130 
4131 	/* Relinquish nvram interface. */
4132 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4133 
4134 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4135 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4136 		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4137 			break;
4138 
4139 		udelay(5);
4140 	}
4141 
4142 	if (j >= NVRAM_TIMEOUT_COUNT)
4143 		return -EBUSY;
4144 
4145 	return 0;
4146 }
4147 
4148 
4149 static int
4150 bnx2_enable_nvram_write(struct bnx2 *bp)
4151 {
4152 	u32 val;
4153 
4154 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4155 	BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4156 
4157 	if (bp->flash_info->flags & BNX2_NV_WREN) {
4158 		int j;
4159 
4160 		BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4161 		BNX2_WR(bp, BNX2_NVM_COMMAND,
4162 			BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4163 
4164 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4165 			udelay(5);
4166 
4167 			val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4168 			if (val & BNX2_NVM_COMMAND_DONE)
4169 				break;
4170 		}
4171 
4172 		if (j >= NVRAM_TIMEOUT_COUNT)
4173 			return -EBUSY;
4174 	}
4175 	return 0;
4176 }
4177 
4178 static void
4179 bnx2_disable_nvram_write(struct bnx2 *bp)
4180 {
4181 	u32 val;
4182 
4183 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4184 	BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4185 }
4186 
4187 
4188 static void
4189 bnx2_enable_nvram_access(struct bnx2 *bp)
4190 {
4191 	u32 val;
4192 
4193 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4194 	/* Enable both bits, even on read. */
4195 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4196 		val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4197 }
4198 
4199 static void
4200 bnx2_disable_nvram_access(struct bnx2 *bp)
4201 {
4202 	u32 val;
4203 
4204 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4205 	/* Disable both bits, even after read. */
4206 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4207 		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4208 			BNX2_NVM_ACCESS_ENABLE_WR_EN));
4209 }
4210 
4211 static int
4212 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4213 {
4214 	u32 cmd;
4215 	int j;
4216 
4217 	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4218 		/* Buffered flash, no erase needed */
4219 		return 0;
4220 
4221 	/* Build an erase command */
4222 	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4223 	      BNX2_NVM_COMMAND_DOIT;
4224 
4225 	/* Need to clear DONE bit separately. */
4226 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4227 
4228 	/* Address of the NVRAM to read from. */
4229 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4230 
4231 	/* Issue an erase command. */
4232 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4233 
4234 	/* Wait for completion. */
4235 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4236 		u32 val;
4237 
4238 		udelay(5);
4239 
4240 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4241 		if (val & BNX2_NVM_COMMAND_DONE)
4242 			break;
4243 	}
4244 
4245 	if (j >= NVRAM_TIMEOUT_COUNT)
4246 		return -EBUSY;
4247 
4248 	return 0;
4249 }
4250 
4251 static int
4252 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4253 {
4254 	u32 cmd;
4255 	int j;
4256 
4257 	/* Build the command word. */
4258 	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4259 
4260 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4261 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4262 		offset = ((offset / bp->flash_info->page_size) <<
4263 			   bp->flash_info->page_bits) +
4264 			  (offset % bp->flash_info->page_size);
4265 	}
4266 
4267 	/* Need to clear DONE bit separately. */
4268 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4269 
4270 	/* Address of the NVRAM to read from. */
4271 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4272 
4273 	/* Issue a read command. */
4274 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4275 
4276 	/* Wait for completion. */
4277 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4278 		u32 val;
4279 
4280 		udelay(5);
4281 
4282 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4283 		if (val & BNX2_NVM_COMMAND_DONE) {
4284 			__be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4285 			memcpy(ret_val, &v, 4);
4286 			break;
4287 		}
4288 	}
4289 	if (j >= NVRAM_TIMEOUT_COUNT)
4290 		return -EBUSY;
4291 
4292 	return 0;
4293 }
4294 
4295 
4296 static int
4297 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4298 {
4299 	u32 cmd;
4300 	__be32 val32;
4301 	int j;
4302 
4303 	/* Build the command word. */
4304 	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4305 
4306 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4307 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4308 		offset = ((offset / bp->flash_info->page_size) <<
4309 			  bp->flash_info->page_bits) +
4310 			 (offset % bp->flash_info->page_size);
4311 	}
4312 
4313 	/* Need to clear DONE bit separately. */
4314 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4315 
4316 	memcpy(&val32, val, 4);
4317 
4318 	/* Write the data. */
4319 	BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4320 
4321 	/* Address of the NVRAM to write to. */
4322 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4323 
4324 	/* Issue the write command. */
4325 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4326 
4327 	/* Wait for completion. */
4328 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4329 		udelay(5);
4330 
4331 		if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4332 			break;
4333 	}
4334 	if (j >= NVRAM_TIMEOUT_COUNT)
4335 		return -EBUSY;
4336 
4337 	return 0;
4338 }
4339 
4340 static int
4341 bnx2_init_nvram(struct bnx2 *bp)
4342 {
4343 	u32 val;
4344 	int j, entry_count, rc = 0;
4345 	const struct flash_spec *flash;
4346 
4347 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4348 		bp->flash_info = &flash_5709;
4349 		goto get_flash_size;
4350 	}
4351 
4352 	/* Determine the selected interface. */
4353 	val = BNX2_RD(bp, BNX2_NVM_CFG1);
4354 
4355 	entry_count = ARRAY_SIZE(flash_table);
4356 
4357 	if (val & 0x40000000) {
4358 
4359 		/* Flash interface has been reconfigured */
4360 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4361 		     j++, flash++) {
4362 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
4363 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4364 				bp->flash_info = flash;
4365 				break;
4366 			}
4367 		}
4368 	}
4369 	else {
4370 		u32 mask;
4371 		/* Not yet been reconfigured */
4372 
4373 		if (val & (1 << 23))
4374 			mask = FLASH_BACKUP_STRAP_MASK;
4375 		else
4376 			mask = FLASH_STRAP_MASK;
4377 
4378 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4379 			j++, flash++) {
4380 
4381 			if ((val & mask) == (flash->strapping & mask)) {
4382 				bp->flash_info = flash;
4383 
4384 				/* Request access to the flash interface. */
4385 				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4386 					return rc;
4387 
4388 				/* Enable access to flash interface */
4389 				bnx2_enable_nvram_access(bp);
4390 
4391 				/* Reconfigure the flash interface */
4392 				BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4393 				BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4394 				BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4395 				BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4396 
4397 				/* Disable access to flash interface */
4398 				bnx2_disable_nvram_access(bp);
4399 				bnx2_release_nvram_lock(bp);
4400 
4401 				break;
4402 			}
4403 		}
4404 	} /* if (val & 0x40000000) */
4405 
4406 	if (j == entry_count) {
4407 		bp->flash_info = NULL;
4408 		pr_alert("Unknown flash/EEPROM type\n");
4409 		return -ENODEV;
4410 	}
4411 
4412 get_flash_size:
4413 	val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4414 	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4415 	if (val)
4416 		bp->flash_size = val;
4417 	else
4418 		bp->flash_size = bp->flash_info->total_size;
4419 
4420 	return rc;
4421 }
4422 
4423 static int
4424 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4425 		int buf_size)
4426 {
4427 	int rc = 0;
4428 	u32 cmd_flags, offset32, len32, extra;
4429 
4430 	if (buf_size == 0)
4431 		return 0;
4432 
4433 	/* Request access to the flash interface. */
4434 	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4435 		return rc;
4436 
4437 	/* Enable access to flash interface */
4438 	bnx2_enable_nvram_access(bp);
4439 
4440 	len32 = buf_size;
4441 	offset32 = offset;
4442 	extra = 0;
4443 
4444 	cmd_flags = 0;
4445 
4446 	if (offset32 & 3) {
4447 		u8 buf[4];
4448 		u32 pre_len;
4449 
4450 		offset32 &= ~3;
4451 		pre_len = 4 - (offset & 3);
4452 
4453 		if (pre_len >= len32) {
4454 			pre_len = len32;
4455 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4456 				    BNX2_NVM_COMMAND_LAST;
4457 		}
4458 		else {
4459 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4460 		}
4461 
4462 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4463 
4464 		if (rc)
4465 			return rc;
4466 
4467 		memcpy(ret_buf, buf + (offset & 3), pre_len);
4468 
4469 		offset32 += 4;
4470 		ret_buf += pre_len;
4471 		len32 -= pre_len;
4472 	}
4473 	if (len32 & 3) {
4474 		extra = 4 - (len32 & 3);
4475 		len32 = (len32 + 4) & ~3;
4476 	}
4477 
4478 	if (len32 == 4) {
4479 		u8 buf[4];
4480 
4481 		if (cmd_flags)
4482 			cmd_flags = BNX2_NVM_COMMAND_LAST;
4483 		else
4484 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4485 				    BNX2_NVM_COMMAND_LAST;
4486 
4487 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4488 
4489 		memcpy(ret_buf, buf, 4 - extra);
4490 	}
4491 	else if (len32 > 0) {
4492 		u8 buf[4];
4493 
4494 		/* Read the first word. */
4495 		if (cmd_flags)
4496 			cmd_flags = 0;
4497 		else
4498 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4499 
4500 		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4501 
4502 		/* Advance to the next dword. */
4503 		offset32 += 4;
4504 		ret_buf += 4;
4505 		len32 -= 4;
4506 
4507 		while (len32 > 4 && rc == 0) {
4508 			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4509 
4510 			/* Advance to the next dword. */
4511 			offset32 += 4;
4512 			ret_buf += 4;
4513 			len32 -= 4;
4514 		}
4515 
4516 		if (rc)
4517 			return rc;
4518 
4519 		cmd_flags = BNX2_NVM_COMMAND_LAST;
4520 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4521 
4522 		memcpy(ret_buf, buf, 4 - extra);
4523 	}
4524 
4525 	/* Disable access to flash interface */
4526 	bnx2_disable_nvram_access(bp);
4527 
4528 	bnx2_release_nvram_lock(bp);
4529 
4530 	return rc;
4531 }
4532 
4533 static int
4534 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4535 		int buf_size)
4536 {
4537 	u32 written, offset32, len32;
4538 	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4539 	int rc = 0;
4540 	int align_start, align_end;
4541 
4542 	buf = data_buf;
4543 	offset32 = offset;
4544 	len32 = buf_size;
4545 	align_start = align_end = 0;
4546 
4547 	if ((align_start = (offset32 & 3))) {
4548 		offset32 &= ~3;
4549 		len32 += align_start;
4550 		if (len32 < 4)
4551 			len32 = 4;
4552 		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4553 			return rc;
4554 	}
4555 
4556 	if (len32 & 3) {
4557 		align_end = 4 - (len32 & 3);
4558 		len32 += align_end;
4559 		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4560 			return rc;
4561 	}
4562 
4563 	if (align_start || align_end) {
4564 		align_buf = kmalloc(len32, GFP_KERNEL);
4565 		if (!align_buf)
4566 			return -ENOMEM;
4567 		if (align_start) {
4568 			memcpy(align_buf, start, 4);
4569 		}
4570 		if (align_end) {
4571 			memcpy(align_buf + len32 - 4, end, 4);
4572 		}
4573 		memcpy(align_buf + align_start, data_buf, buf_size);
4574 		buf = align_buf;
4575 	}
4576 
4577 	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4578 		flash_buffer = kmalloc(264, GFP_KERNEL);
4579 		if (!flash_buffer) {
4580 			rc = -ENOMEM;
4581 			goto nvram_write_end;
4582 		}
4583 	}
4584 
4585 	written = 0;
4586 	while ((written < len32) && (rc == 0)) {
4587 		u32 page_start, page_end, data_start, data_end;
4588 		u32 addr, cmd_flags;
4589 		int i;
4590 
4591 	        /* Find the page_start addr */
4592 		page_start = offset32 + written;
4593 		page_start -= (page_start % bp->flash_info->page_size);
4594 		/* Find the page_end addr */
4595 		page_end = page_start + bp->flash_info->page_size;
4596 		/* Find the data_start addr */
4597 		data_start = (written == 0) ? offset32 : page_start;
4598 		/* Find the data_end addr */
4599 		data_end = (page_end > offset32 + len32) ?
4600 			(offset32 + len32) : page_end;
4601 
4602 		/* Request access to the flash interface. */
4603 		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4604 			goto nvram_write_end;
4605 
4606 		/* Enable access to flash interface */
4607 		bnx2_enable_nvram_access(bp);
4608 
4609 		cmd_flags = BNX2_NVM_COMMAND_FIRST;
4610 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4611 			int j;
4612 
4613 			/* Read the whole page into the buffer
4614 			 * (non-buffer flash only) */
4615 			for (j = 0; j < bp->flash_info->page_size; j += 4) {
4616 				if (j == (bp->flash_info->page_size - 4)) {
4617 					cmd_flags |= BNX2_NVM_COMMAND_LAST;
4618 				}
4619 				rc = bnx2_nvram_read_dword(bp,
4620 					page_start + j,
4621 					&flash_buffer[j],
4622 					cmd_flags);
4623 
4624 				if (rc)
4625 					goto nvram_write_end;
4626 
4627 				cmd_flags = 0;
4628 			}
4629 		}
4630 
4631 		/* Enable writes to flash interface (unlock write-protect) */
4632 		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4633 			goto nvram_write_end;
4634 
4635 		/* Loop to write back the buffer data from page_start to
4636 		 * data_start */
4637 		i = 0;
4638 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4639 			/* Erase the page */
4640 			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4641 				goto nvram_write_end;
4642 
4643 			/* Re-enable the write again for the actual write */
4644 			bnx2_enable_nvram_write(bp);
4645 
4646 			for (addr = page_start; addr < data_start;
4647 				addr += 4, i += 4) {
4648 
4649 				rc = bnx2_nvram_write_dword(bp, addr,
4650 					&flash_buffer[i], cmd_flags);
4651 
4652 				if (rc != 0)
4653 					goto nvram_write_end;
4654 
4655 				cmd_flags = 0;
4656 			}
4657 		}
4658 
4659 		/* Loop to write the new data from data_start to data_end */
4660 		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4661 			if ((addr == page_end - 4) ||
4662 				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4663 				 (addr == data_end - 4))) {
4664 
4665 				cmd_flags |= BNX2_NVM_COMMAND_LAST;
4666 			}
4667 			rc = bnx2_nvram_write_dword(bp, addr, buf,
4668 				cmd_flags);
4669 
4670 			if (rc != 0)
4671 				goto nvram_write_end;
4672 
4673 			cmd_flags = 0;
4674 			buf += 4;
4675 		}
4676 
4677 		/* Loop to write back the buffer data from data_end
4678 		 * to page_end */
4679 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4680 			for (addr = data_end; addr < page_end;
4681 				addr += 4, i += 4) {
4682 
4683 				if (addr == page_end-4) {
4684 					cmd_flags = BNX2_NVM_COMMAND_LAST;
4685                 		}
4686 				rc = bnx2_nvram_write_dword(bp, addr,
4687 					&flash_buffer[i], cmd_flags);
4688 
4689 				if (rc != 0)
4690 					goto nvram_write_end;
4691 
4692 				cmd_flags = 0;
4693 			}
4694 		}
4695 
4696 		/* Disable writes to flash interface (lock write-protect) */
4697 		bnx2_disable_nvram_write(bp);
4698 
4699 		/* Disable access to flash interface */
4700 		bnx2_disable_nvram_access(bp);
4701 		bnx2_release_nvram_lock(bp);
4702 
4703 		/* Increment written */
4704 		written += data_end - data_start;
4705 	}
4706 
4707 nvram_write_end:
4708 	kfree(flash_buffer);
4709 	kfree(align_buf);
4710 	return rc;
4711 }
4712 
4713 static void
4714 bnx2_init_fw_cap(struct bnx2 *bp)
4715 {
4716 	u32 val, sig = 0;
4717 
4718 	bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4719 	bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4720 
4721 	if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4722 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4723 
4724 	val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4725 	if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4726 		return;
4727 
4728 	if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4729 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4730 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4731 	}
4732 
4733 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4734 	    (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4735 		u32 link;
4736 
4737 		bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4738 
4739 		link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4740 		if (link & BNX2_LINK_STATUS_SERDES_LINK)
4741 			bp->phy_port = PORT_FIBRE;
4742 		else
4743 			bp->phy_port = PORT_TP;
4744 
4745 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4746 		       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4747 	}
4748 
4749 	if (netif_running(bp->dev) && sig)
4750 		bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4751 }
4752 
4753 static void
4754 bnx2_setup_msix_tbl(struct bnx2 *bp)
4755 {
4756 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4757 
4758 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4759 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4760 }
4761 
4762 static void
4763 bnx2_wait_dma_complete(struct bnx2 *bp)
4764 {
4765 	u32 val;
4766 	int i;
4767 
4768 	/*
4769 	 * Wait for the current PCI transaction to complete before
4770 	 * issuing a reset.
4771 	 */
4772 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4773 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4774 		BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4775 			BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4776 			BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4777 			BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4778 			BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4779 		val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4780 		udelay(5);
4781 	} else {  /* 5709 */
4782 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4783 		val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4784 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4785 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4786 
4787 		for (i = 0; i < 100; i++) {
4788 			msleep(1);
4789 			val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4790 			if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4791 				break;
4792 		}
4793 	}
4794 
4795 	return;
4796 }
4797 
4798 
4799 static int
4800 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4801 {
4802 	u32 val;
4803 	int i, rc = 0;
4804 	u8 old_port;
4805 
4806 	/* Wait for the current PCI transaction to complete before
4807 	 * issuing a reset. */
4808 	bnx2_wait_dma_complete(bp);
4809 
4810 	/* Wait for the firmware to tell us it is ok to issue a reset. */
4811 	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4812 
4813 	/* Deposit a driver reset signature so the firmware knows that
4814 	 * this is a soft reset. */
4815 	bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4816 		      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4817 
4818 	/* Do a dummy read to force the chip to complete all current transaction
4819 	 * before we issue a reset. */
4820 	val = BNX2_RD(bp, BNX2_MISC_ID);
4821 
4822 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4823 		BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4824 		BNX2_RD(bp, BNX2_MISC_COMMAND);
4825 		udelay(5);
4826 
4827 		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4828 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4829 
4830 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4831 
4832 	} else {
4833 		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4834 		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4835 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4836 
4837 		/* Chip reset. */
4838 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4839 
4840 		/* Reading back any register after chip reset will hang the
4841 		 * bus on 5706 A0 and A1.  The msleep below provides plenty
4842 		 * of margin for write posting.
4843 		 */
4844 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4845 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4846 			msleep(20);
4847 
4848 		/* Reset takes approximate 30 usec */
4849 		for (i = 0; i < 10; i++) {
4850 			val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4851 			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4852 				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4853 				break;
4854 			udelay(10);
4855 		}
4856 
4857 		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4858 			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4859 			pr_err("Chip reset did not complete\n");
4860 			return -EBUSY;
4861 		}
4862 	}
4863 
4864 	/* Make sure byte swapping is properly configured. */
4865 	val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4866 	if (val != 0x01020304) {
4867 		pr_err("Chip not in correct endian mode\n");
4868 		return -ENODEV;
4869 	}
4870 
4871 	/* Wait for the firmware to finish its initialization. */
4872 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4873 	if (rc)
4874 		return rc;
4875 
4876 	spin_lock_bh(&bp->phy_lock);
4877 	old_port = bp->phy_port;
4878 	bnx2_init_fw_cap(bp);
4879 	if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4880 	    old_port != bp->phy_port)
4881 		bnx2_set_default_remote_link(bp);
4882 	spin_unlock_bh(&bp->phy_lock);
4883 
4884 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4885 		/* Adjust the voltage regular to two steps lower.  The default
4886 		 * of this register is 0x0000000e. */
4887 		BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4888 
4889 		/* Remove bad rbuf memory from the free pool. */
4890 		rc = bnx2_alloc_bad_rbuf(bp);
4891 	}
4892 
4893 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4894 		bnx2_setup_msix_tbl(bp);
4895 		/* Prevent MSIX table reads and write from timing out */
4896 		BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4897 			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4898 	}
4899 
4900 	return rc;
4901 }
4902 
4903 static int
4904 bnx2_init_chip(struct bnx2 *bp)
4905 {
4906 	u32 val, mtu;
4907 	int rc, i;
4908 
4909 	/* Make sure the interrupt is not active. */
4910 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4911 
4912 	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4913 	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4914 #ifdef __BIG_ENDIAN
4915 	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4916 #endif
4917 	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4918 	      DMA_READ_CHANS << 12 |
4919 	      DMA_WRITE_CHANS << 16;
4920 
4921 	val |= (0x2 << 20) | (1 << 11);
4922 
4923 	if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4924 		val |= (1 << 23);
4925 
4926 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4927 	    (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4928 	    !(bp->flags & BNX2_FLAG_PCIX))
4929 		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4930 
4931 	BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4932 
4933 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4934 		val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4935 		val |= BNX2_TDMA_CONFIG_ONE_DMA;
4936 		BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4937 	}
4938 
4939 	if (bp->flags & BNX2_FLAG_PCIX) {
4940 		u16 val16;
4941 
4942 		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4943 				     &val16);
4944 		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4945 				      val16 & ~PCI_X_CMD_ERO);
4946 	}
4947 
4948 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4949 		BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4950 		BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4951 		BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4952 
4953 	/* Initialize context mapping and zero out the quick contexts.  The
4954 	 * context block must have already been enabled. */
4955 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4956 		rc = bnx2_init_5709_context(bp);
4957 		if (rc)
4958 			return rc;
4959 	} else
4960 		bnx2_init_context(bp);
4961 
4962 	if ((rc = bnx2_init_cpus(bp)) != 0)
4963 		return rc;
4964 
4965 	bnx2_init_nvram(bp);
4966 
4967 	bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4968 
4969 	val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4970 	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4971 	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4972 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4973 		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4974 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4975 			val |= BNX2_MQ_CONFIG_HALT_DIS;
4976 	}
4977 
4978 	BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4979 
4980 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4981 	BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4982 	BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4983 
4984 	val = (BNX2_PAGE_BITS - 8) << 24;
4985 	BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4986 
4987 	/* Configure page size. */
4988 	val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4989 	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4990 	val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4991 	BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4992 
4993 	val = bp->mac_addr[0] +
4994 	      (bp->mac_addr[1] << 8) +
4995 	      (bp->mac_addr[2] << 16) +
4996 	      bp->mac_addr[3] +
4997 	      (bp->mac_addr[4] << 8) +
4998 	      (bp->mac_addr[5] << 16);
4999 	BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
5000 
5001 	/* Program the MTU.  Also include 4 bytes for CRC32. */
5002 	mtu = bp->dev->mtu;
5003 	val = mtu + ETH_HLEN + ETH_FCS_LEN;
5004 	if (val > (MAX_ETHERNET_PACKET_SIZE + ETH_HLEN + 4))
5005 		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
5006 	BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
5007 
5008 	if (mtu < ETH_DATA_LEN)
5009 		mtu = ETH_DATA_LEN;
5010 
5011 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
5012 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
5013 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
5014 
5015 	memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
5016 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5017 		bp->bnx2_napi[i].last_status_idx = 0;
5018 
5019 	bp->idle_chk_status_idx = 0xffff;
5020 
5021 	/* Set up how to generate a link change interrupt. */
5022 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
5023 
5024 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
5025 		(u64) bp->status_blk_mapping & 0xffffffff);
5026 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
5027 
5028 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
5029 		(u64) bp->stats_blk_mapping & 0xffffffff);
5030 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
5031 		(u64) bp->stats_blk_mapping >> 32);
5032 
5033 	BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5034 		(bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
5035 
5036 	BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5037 		(bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5038 
5039 	BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5040 		(bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5041 
5042 	BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5043 
5044 	BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5045 
5046 	BNX2_WR(bp, BNX2_HC_COM_TICKS,
5047 		(bp->com_ticks_int << 16) | bp->com_ticks);
5048 
5049 	BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5050 		(bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5051 
5052 	if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5053 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5054 	else
5055 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5056 	BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
5057 
5058 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5059 		val = BNX2_HC_CONFIG_COLLECT_STATS;
5060 	else {
5061 		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5062 		      BNX2_HC_CONFIG_COLLECT_STATS;
5063 	}
5064 
5065 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
5066 		BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5067 			BNX2_HC_MSIX_BIT_VECTOR_VAL);
5068 
5069 		val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5070 	}
5071 
5072 	if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5073 		val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5074 
5075 	BNX2_WR(bp, BNX2_HC_CONFIG, val);
5076 
5077 	if (bp->rx_ticks < 25)
5078 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5079 	else
5080 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5081 
5082 	for (i = 1; i < bp->irq_nvecs; i++) {
5083 		u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5084 			   BNX2_HC_SB_CONFIG_1;
5085 
5086 		BNX2_WR(bp, base,
5087 			BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5088 			BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5089 			BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5090 
5091 		BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5092 			(bp->tx_quick_cons_trip_int << 16) |
5093 			 bp->tx_quick_cons_trip);
5094 
5095 		BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5096 			(bp->tx_ticks_int << 16) | bp->tx_ticks);
5097 
5098 		BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5099 			(bp->rx_quick_cons_trip_int << 16) |
5100 			bp->rx_quick_cons_trip);
5101 
5102 		BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5103 			(bp->rx_ticks_int << 16) | bp->rx_ticks);
5104 	}
5105 
5106 	/* Clear internal stats counters. */
5107 	BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5108 
5109 	BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5110 
5111 	/* Initialize the receive filter. */
5112 	bnx2_set_rx_mode(bp->dev);
5113 
5114 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5115 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5116 		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5117 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5118 	}
5119 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5120 			  1, 0);
5121 
5122 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5123 	BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5124 
5125 	udelay(20);
5126 
5127 	bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5128 
5129 	return rc;
5130 }
5131 
5132 static void
5133 bnx2_clear_ring_states(struct bnx2 *bp)
5134 {
5135 	struct bnx2_napi *bnapi;
5136 	struct bnx2_tx_ring_info *txr;
5137 	struct bnx2_rx_ring_info *rxr;
5138 	int i;
5139 
5140 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5141 		bnapi = &bp->bnx2_napi[i];
5142 		txr = &bnapi->tx_ring;
5143 		rxr = &bnapi->rx_ring;
5144 
5145 		txr->tx_cons = 0;
5146 		txr->hw_tx_cons = 0;
5147 		rxr->rx_prod_bseq = 0;
5148 		rxr->rx_prod = 0;
5149 		rxr->rx_cons = 0;
5150 		rxr->rx_pg_prod = 0;
5151 		rxr->rx_pg_cons = 0;
5152 	}
5153 }
5154 
5155 static void
5156 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5157 {
5158 	u32 val, offset0, offset1, offset2, offset3;
5159 	u32 cid_addr = GET_CID_ADDR(cid);
5160 
5161 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5162 		offset0 = BNX2_L2CTX_TYPE_XI;
5163 		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5164 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5165 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5166 	} else {
5167 		offset0 = BNX2_L2CTX_TYPE;
5168 		offset1 = BNX2_L2CTX_CMD_TYPE;
5169 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5170 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5171 	}
5172 	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5173 	bnx2_ctx_wr(bp, cid_addr, offset0, val);
5174 
5175 	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5176 	bnx2_ctx_wr(bp, cid_addr, offset1, val);
5177 
5178 	val = (u64) txr->tx_desc_mapping >> 32;
5179 	bnx2_ctx_wr(bp, cid_addr, offset2, val);
5180 
5181 	val = (u64) txr->tx_desc_mapping & 0xffffffff;
5182 	bnx2_ctx_wr(bp, cid_addr, offset3, val);
5183 }
5184 
5185 static void
5186 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5187 {
5188 	struct bnx2_tx_bd *txbd;
5189 	u32 cid = TX_CID;
5190 	struct bnx2_napi *bnapi;
5191 	struct bnx2_tx_ring_info *txr;
5192 
5193 	bnapi = &bp->bnx2_napi[ring_num];
5194 	txr = &bnapi->tx_ring;
5195 
5196 	if (ring_num == 0)
5197 		cid = TX_CID;
5198 	else
5199 		cid = TX_TSS_CID + ring_num - 1;
5200 
5201 	bp->tx_wake_thresh = bp->tx_ring_size / 2;
5202 
5203 	txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5204 
5205 	txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5206 	txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5207 
5208 	txr->tx_prod = 0;
5209 	txr->tx_prod_bseq = 0;
5210 
5211 	txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5212 	txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5213 
5214 	bnx2_init_tx_context(bp, cid, txr);
5215 }
5216 
5217 static void
5218 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5219 		     u32 buf_size, int num_rings)
5220 {
5221 	int i;
5222 	struct bnx2_rx_bd *rxbd;
5223 
5224 	for (i = 0; i < num_rings; i++) {
5225 		int j;
5226 
5227 		rxbd = &rx_ring[i][0];
5228 		for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5229 			rxbd->rx_bd_len = buf_size;
5230 			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5231 		}
5232 		if (i == (num_rings - 1))
5233 			j = 0;
5234 		else
5235 			j = i + 1;
5236 		rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5237 		rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5238 	}
5239 }
5240 
5241 static void
5242 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5243 {
5244 	int i;
5245 	u16 prod, ring_prod;
5246 	u32 cid, rx_cid_addr, val;
5247 	struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5248 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5249 
5250 	if (ring_num == 0)
5251 		cid = RX_CID;
5252 	else
5253 		cid = RX_RSS_CID + ring_num - 1;
5254 
5255 	rx_cid_addr = GET_CID_ADDR(cid);
5256 
5257 	bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5258 			     bp->rx_buf_use_size, bp->rx_max_ring);
5259 
5260 	bnx2_init_rx_context(bp, cid);
5261 
5262 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5263 		val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5264 		BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5265 	}
5266 
5267 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5268 	if (bp->rx_pg_ring_size) {
5269 		bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5270 				     rxr->rx_pg_desc_mapping,
5271 				     PAGE_SIZE, bp->rx_max_pg_ring);
5272 		val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5273 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5274 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5275 		       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5276 
5277 		val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5278 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5279 
5280 		val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5281 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5282 
5283 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5284 			BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5285 	}
5286 
5287 	val = (u64) rxr->rx_desc_mapping[0] >> 32;
5288 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5289 
5290 	val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5291 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5292 
5293 	ring_prod = prod = rxr->rx_pg_prod;
5294 	for (i = 0; i < bp->rx_pg_ring_size; i++) {
5295 		if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5296 			netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5297 				    ring_num, i, bp->rx_pg_ring_size);
5298 			break;
5299 		}
5300 		prod = BNX2_NEXT_RX_BD(prod);
5301 		ring_prod = BNX2_RX_PG_RING_IDX(prod);
5302 	}
5303 	rxr->rx_pg_prod = prod;
5304 
5305 	ring_prod = prod = rxr->rx_prod;
5306 	for (i = 0; i < bp->rx_ring_size; i++) {
5307 		if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5308 			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5309 				    ring_num, i, bp->rx_ring_size);
5310 			break;
5311 		}
5312 		prod = BNX2_NEXT_RX_BD(prod);
5313 		ring_prod = BNX2_RX_RING_IDX(prod);
5314 	}
5315 	rxr->rx_prod = prod;
5316 
5317 	rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5318 	rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5319 	rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5320 
5321 	BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5322 	BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5323 
5324 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5325 }
5326 
5327 static void
5328 bnx2_init_all_rings(struct bnx2 *bp)
5329 {
5330 	int i;
5331 	u32 val;
5332 
5333 	bnx2_clear_ring_states(bp);
5334 
5335 	BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5336 	for (i = 0; i < bp->num_tx_rings; i++)
5337 		bnx2_init_tx_ring(bp, i);
5338 
5339 	if (bp->num_tx_rings > 1)
5340 		BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5341 			(TX_TSS_CID << 7));
5342 
5343 	BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5344 	bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5345 
5346 	for (i = 0; i < bp->num_rx_rings; i++)
5347 		bnx2_init_rx_ring(bp, i);
5348 
5349 	if (bp->num_rx_rings > 1) {
5350 		u32 tbl_32 = 0;
5351 
5352 		for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5353 			int shift = (i % 8) << 2;
5354 
5355 			tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5356 			if ((i % 8) == 7) {
5357 				BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5358 				BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5359 					BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5360 					BNX2_RLUP_RSS_COMMAND_WRITE |
5361 					BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5362 				tbl_32 = 0;
5363 			}
5364 		}
5365 
5366 		val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5367 		      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5368 
5369 		BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5370 
5371 	}
5372 }
5373 
5374 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5375 {
5376 	u32 max, num_rings = 1;
5377 
5378 	while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5379 		ring_size -= BNX2_MAX_RX_DESC_CNT;
5380 		num_rings++;
5381 	}
5382 	/* round to next power of 2 */
5383 	max = max_size;
5384 	while ((max & num_rings) == 0)
5385 		max >>= 1;
5386 
5387 	if (num_rings != max)
5388 		max <<= 1;
5389 
5390 	return max;
5391 }
5392 
5393 static void
5394 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5395 {
5396 	u32 rx_size, rx_space, jumbo_size;
5397 
5398 	/* 8 for CRC and VLAN */
5399 	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5400 
5401 	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5402 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5403 
5404 	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5405 	bp->rx_pg_ring_size = 0;
5406 	bp->rx_max_pg_ring = 0;
5407 	bp->rx_max_pg_ring_idx = 0;
5408 	if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5409 		int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5410 
5411 		jumbo_size = size * pages;
5412 		if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5413 			jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5414 
5415 		bp->rx_pg_ring_size = jumbo_size;
5416 		bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5417 							BNX2_MAX_RX_PG_RINGS);
5418 		bp->rx_max_pg_ring_idx =
5419 			(bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5420 		rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5421 		bp->rx_copy_thresh = 0;
5422 	}
5423 
5424 	bp->rx_buf_use_size = rx_size;
5425 	/* hw alignment + build_skb() overhead*/
5426 	bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5427 		NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5428 	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5429 	bp->rx_ring_size = size;
5430 	bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5431 	bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5432 }
5433 
5434 static void
5435 bnx2_free_tx_skbs(struct bnx2 *bp)
5436 {
5437 	int i;
5438 
5439 	for (i = 0; i < bp->num_tx_rings; i++) {
5440 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5441 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5442 		int j;
5443 
5444 		if (!txr->tx_buf_ring)
5445 			continue;
5446 
5447 		for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5448 			struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5449 			struct sk_buff *skb = tx_buf->skb;
5450 			int k, last;
5451 
5452 			if (!skb) {
5453 				j = BNX2_NEXT_TX_BD(j);
5454 				continue;
5455 			}
5456 
5457 			dma_unmap_single(&bp->pdev->dev,
5458 					 dma_unmap_addr(tx_buf, mapping),
5459 					 skb_headlen(skb),
5460 					 PCI_DMA_TODEVICE);
5461 
5462 			tx_buf->skb = NULL;
5463 
5464 			last = tx_buf->nr_frags;
5465 			j = BNX2_NEXT_TX_BD(j);
5466 			for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5467 				tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5468 				dma_unmap_page(&bp->pdev->dev,
5469 					dma_unmap_addr(tx_buf, mapping),
5470 					skb_frag_size(&skb_shinfo(skb)->frags[k]),
5471 					PCI_DMA_TODEVICE);
5472 			}
5473 			dev_kfree_skb(skb);
5474 		}
5475 		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5476 	}
5477 }
5478 
5479 static void
5480 bnx2_free_rx_skbs(struct bnx2 *bp)
5481 {
5482 	int i;
5483 
5484 	for (i = 0; i < bp->num_rx_rings; i++) {
5485 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5486 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5487 		int j;
5488 
5489 		if (!rxr->rx_buf_ring)
5490 			return;
5491 
5492 		for (j = 0; j < bp->rx_max_ring_idx; j++) {
5493 			struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5494 			u8 *data = rx_buf->data;
5495 
5496 			if (!data)
5497 				continue;
5498 
5499 			dma_unmap_single(&bp->pdev->dev,
5500 					 dma_unmap_addr(rx_buf, mapping),
5501 					 bp->rx_buf_use_size,
5502 					 PCI_DMA_FROMDEVICE);
5503 
5504 			rx_buf->data = NULL;
5505 
5506 			kfree(data);
5507 		}
5508 		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5509 			bnx2_free_rx_page(bp, rxr, j);
5510 	}
5511 }
5512 
5513 static void
5514 bnx2_free_skbs(struct bnx2 *bp)
5515 {
5516 	bnx2_free_tx_skbs(bp);
5517 	bnx2_free_rx_skbs(bp);
5518 }
5519 
5520 static int
5521 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5522 {
5523 	int rc;
5524 
5525 	rc = bnx2_reset_chip(bp, reset_code);
5526 	bnx2_free_skbs(bp);
5527 	if (rc)
5528 		return rc;
5529 
5530 	if ((rc = bnx2_init_chip(bp)) != 0)
5531 		return rc;
5532 
5533 	bnx2_init_all_rings(bp);
5534 	return 0;
5535 }
5536 
5537 static int
5538 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5539 {
5540 	int rc;
5541 
5542 	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5543 		return rc;
5544 
5545 	spin_lock_bh(&bp->phy_lock);
5546 	bnx2_init_phy(bp, reset_phy);
5547 	bnx2_set_link(bp);
5548 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5549 		bnx2_remote_phy_event(bp);
5550 	spin_unlock_bh(&bp->phy_lock);
5551 	return 0;
5552 }
5553 
5554 static int
5555 bnx2_shutdown_chip(struct bnx2 *bp)
5556 {
5557 	u32 reset_code;
5558 
5559 	if (bp->flags & BNX2_FLAG_NO_WOL)
5560 		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5561 	else if (bp->wol)
5562 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5563 	else
5564 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5565 
5566 	return bnx2_reset_chip(bp, reset_code);
5567 }
5568 
5569 static int
5570 bnx2_test_registers(struct bnx2 *bp)
5571 {
5572 	int ret;
5573 	int i, is_5709;
5574 	static const struct {
5575 		u16   offset;
5576 		u16   flags;
5577 #define BNX2_FL_NOT_5709	1
5578 		u32   rw_mask;
5579 		u32   ro_mask;
5580 	} reg_tbl[] = {
5581 		{ 0x006c, 0, 0x00000000, 0x0000003f },
5582 		{ 0x0090, 0, 0xffffffff, 0x00000000 },
5583 		{ 0x0094, 0, 0x00000000, 0x00000000 },
5584 
5585 		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5586 		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5587 		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5588 		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5589 		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5590 		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5591 		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5592 		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5593 		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5594 
5595 		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5596 		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5597 		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5598 		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5599 		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5600 		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5601 
5602 		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5603 		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5604 		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5605 
5606 		{ 0x1000, 0, 0x00000000, 0x00000001 },
5607 		{ 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5608 
5609 		{ 0x1408, 0, 0x01c00800, 0x00000000 },
5610 		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
5611 		{ 0x14a8, 0, 0x00000000, 0x000001ff },
5612 		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
5613 		{ 0x14b0, 0, 0x00000002, 0x00000001 },
5614 		{ 0x14b8, 0, 0x00000000, 0x00000000 },
5615 		{ 0x14c0, 0, 0x00000000, 0x00000009 },
5616 		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
5617 		{ 0x14cc, 0, 0x00000000, 0x00000001 },
5618 		{ 0x14d0, 0, 0xffffffff, 0x00000000 },
5619 
5620 		{ 0x1800, 0, 0x00000000, 0x00000001 },
5621 		{ 0x1804, 0, 0x00000000, 0x00000003 },
5622 
5623 		{ 0x2800, 0, 0x00000000, 0x00000001 },
5624 		{ 0x2804, 0, 0x00000000, 0x00003f01 },
5625 		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5626 		{ 0x2810, 0, 0xffff0000, 0x00000000 },
5627 		{ 0x2814, 0, 0xffff0000, 0x00000000 },
5628 		{ 0x2818, 0, 0xffff0000, 0x00000000 },
5629 		{ 0x281c, 0, 0xffff0000, 0x00000000 },
5630 		{ 0x2834, 0, 0xffffffff, 0x00000000 },
5631 		{ 0x2840, 0, 0x00000000, 0xffffffff },
5632 		{ 0x2844, 0, 0x00000000, 0xffffffff },
5633 		{ 0x2848, 0, 0xffffffff, 0x00000000 },
5634 		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },
5635 
5636 		{ 0x2c00, 0, 0x00000000, 0x00000011 },
5637 		{ 0x2c04, 0, 0x00000000, 0x00030007 },
5638 
5639 		{ 0x3c00, 0, 0x00000000, 0x00000001 },
5640 		{ 0x3c04, 0, 0x00000000, 0x00070000 },
5641 		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
5642 		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5643 		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
5644 		{ 0x3c14, 0, 0x00000000, 0xffffffff },
5645 		{ 0x3c18, 0, 0x00000000, 0xffffffff },
5646 		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
5647 		{ 0x3c20, 0, 0xffffff00, 0x00000000 },
5648 
5649 		{ 0x5004, 0, 0x00000000, 0x0000007f },
5650 		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },
5651 
5652 		{ 0x5c00, 0, 0x00000000, 0x00000001 },
5653 		{ 0x5c04, 0, 0x00000000, 0x0003000f },
5654 		{ 0x5c08, 0, 0x00000003, 0x00000000 },
5655 		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5656 		{ 0x5c10, 0, 0x00000000, 0xffffffff },
5657 		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5658 		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
5659 		{ 0x5c88, 0, 0x00000000, 0x00077373 },
5660 		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },
5661 
5662 		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
5663 		{ 0x680c, 0, 0xffffffff, 0x00000000 },
5664 		{ 0x6810, 0, 0xffffffff, 0x00000000 },
5665 		{ 0x6814, 0, 0xffffffff, 0x00000000 },
5666 		{ 0x6818, 0, 0xffffffff, 0x00000000 },
5667 		{ 0x681c, 0, 0xffffffff, 0x00000000 },
5668 		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
5669 		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
5670 		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
5671 		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
5672 		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
5673 		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
5674 		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
5675 		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
5676 		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
5677 		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
5678 		{ 0x684c, 0, 0xffffffff, 0x00000000 },
5679 		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5680 		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5681 		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5682 		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5683 		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
5684 		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5685 
5686 		{ 0xffff, 0, 0x00000000, 0x00000000 },
5687 	};
5688 
5689 	ret = 0;
5690 	is_5709 = 0;
5691 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5692 		is_5709 = 1;
5693 
5694 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5695 		u32 offset, rw_mask, ro_mask, save_val, val;
5696 		u16 flags = reg_tbl[i].flags;
5697 
5698 		if (is_5709 && (flags & BNX2_FL_NOT_5709))
5699 			continue;
5700 
5701 		offset = (u32) reg_tbl[i].offset;
5702 		rw_mask = reg_tbl[i].rw_mask;
5703 		ro_mask = reg_tbl[i].ro_mask;
5704 
5705 		save_val = readl(bp->regview + offset);
5706 
5707 		writel(0, bp->regview + offset);
5708 
5709 		val = readl(bp->regview + offset);
5710 		if ((val & rw_mask) != 0) {
5711 			goto reg_test_err;
5712 		}
5713 
5714 		if ((val & ro_mask) != (save_val & ro_mask)) {
5715 			goto reg_test_err;
5716 		}
5717 
5718 		writel(0xffffffff, bp->regview + offset);
5719 
5720 		val = readl(bp->regview + offset);
5721 		if ((val & rw_mask) != rw_mask) {
5722 			goto reg_test_err;
5723 		}
5724 
5725 		if ((val & ro_mask) != (save_val & ro_mask)) {
5726 			goto reg_test_err;
5727 		}
5728 
5729 		writel(save_val, bp->regview + offset);
5730 		continue;
5731 
5732 reg_test_err:
5733 		writel(save_val, bp->regview + offset);
5734 		ret = -ENODEV;
5735 		break;
5736 	}
5737 	return ret;
5738 }
5739 
5740 static int
5741 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5742 {
5743 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5744 		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5745 	int i;
5746 
5747 	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5748 		u32 offset;
5749 
5750 		for (offset = 0; offset < size; offset += 4) {
5751 
5752 			bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5753 
5754 			if (bnx2_reg_rd_ind(bp, start + offset) !=
5755 				test_pattern[i]) {
5756 				return -ENODEV;
5757 			}
5758 		}
5759 	}
5760 	return 0;
5761 }
5762 
5763 static int
5764 bnx2_test_memory(struct bnx2 *bp)
5765 {
5766 	int ret = 0;
5767 	int i;
5768 	static struct mem_entry {
5769 		u32   offset;
5770 		u32   len;
5771 	} mem_tbl_5706[] = {
5772 		{ 0x60000,  0x4000 },
5773 		{ 0xa0000,  0x3000 },
5774 		{ 0xe0000,  0x4000 },
5775 		{ 0x120000, 0x4000 },
5776 		{ 0x1a0000, 0x4000 },
5777 		{ 0x160000, 0x4000 },
5778 		{ 0xffffffff, 0    },
5779 	},
5780 	mem_tbl_5709[] = {
5781 		{ 0x60000,  0x4000 },
5782 		{ 0xa0000,  0x3000 },
5783 		{ 0xe0000,  0x4000 },
5784 		{ 0x120000, 0x4000 },
5785 		{ 0x1a0000, 0x4000 },
5786 		{ 0xffffffff, 0    },
5787 	};
5788 	struct mem_entry *mem_tbl;
5789 
5790 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5791 		mem_tbl = mem_tbl_5709;
5792 	else
5793 		mem_tbl = mem_tbl_5706;
5794 
5795 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5796 		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5797 			mem_tbl[i].len)) != 0) {
5798 			return ret;
5799 		}
5800 	}
5801 
5802 	return ret;
5803 }
5804 
5805 #define BNX2_MAC_LOOPBACK	0
5806 #define BNX2_PHY_LOOPBACK	1
5807 
5808 static int
5809 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5810 {
5811 	unsigned int pkt_size, num_pkts, i;
5812 	struct sk_buff *skb;
5813 	u8 *data;
5814 	unsigned char *packet;
5815 	u16 rx_start_idx, rx_idx;
5816 	dma_addr_t map;
5817 	struct bnx2_tx_bd *txbd;
5818 	struct bnx2_sw_bd *rx_buf;
5819 	struct l2_fhdr *rx_hdr;
5820 	int ret = -ENODEV;
5821 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5822 	struct bnx2_tx_ring_info *txr;
5823 	struct bnx2_rx_ring_info *rxr;
5824 
5825 	tx_napi = bnapi;
5826 
5827 	txr = &tx_napi->tx_ring;
5828 	rxr = &bnapi->rx_ring;
5829 	if (loopback_mode == BNX2_MAC_LOOPBACK) {
5830 		bp->loopback = MAC_LOOPBACK;
5831 		bnx2_set_mac_loopback(bp);
5832 	}
5833 	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5834 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5835 			return 0;
5836 
5837 		bp->loopback = PHY_LOOPBACK;
5838 		bnx2_set_phy_loopback(bp);
5839 	}
5840 	else
5841 		return -EINVAL;
5842 
5843 	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5844 	skb = netdev_alloc_skb(bp->dev, pkt_size);
5845 	if (!skb)
5846 		return -ENOMEM;
5847 	packet = skb_put(skb, pkt_size);
5848 	memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5849 	memset(packet + ETH_ALEN, 0x0, 8);
5850 	for (i = 14; i < pkt_size; i++)
5851 		packet[i] = (unsigned char) (i & 0xff);
5852 
5853 	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5854 			     PCI_DMA_TODEVICE);
5855 	if (dma_mapping_error(&bp->pdev->dev, map)) {
5856 		dev_kfree_skb(skb);
5857 		return -EIO;
5858 	}
5859 
5860 	BNX2_WR(bp, BNX2_HC_COMMAND,
5861 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5862 
5863 	BNX2_RD(bp, BNX2_HC_COMMAND);
5864 
5865 	udelay(5);
5866 	rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5867 
5868 	num_pkts = 0;
5869 
5870 	txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5871 
5872 	txbd->tx_bd_haddr_hi = (u64) map >> 32;
5873 	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5874 	txbd->tx_bd_mss_nbytes = pkt_size;
5875 	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5876 
5877 	num_pkts++;
5878 	txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5879 	txr->tx_prod_bseq += pkt_size;
5880 
5881 	BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5882 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5883 
5884 	udelay(100);
5885 
5886 	BNX2_WR(bp, BNX2_HC_COMMAND,
5887 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5888 
5889 	BNX2_RD(bp, BNX2_HC_COMMAND);
5890 
5891 	udelay(5);
5892 
5893 	dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5894 	dev_kfree_skb(skb);
5895 
5896 	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5897 		goto loopback_test_done;
5898 
5899 	rx_idx = bnx2_get_hw_rx_cons(bnapi);
5900 	if (rx_idx != rx_start_idx + num_pkts) {
5901 		goto loopback_test_done;
5902 	}
5903 
5904 	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5905 	data = rx_buf->data;
5906 
5907 	rx_hdr = get_l2_fhdr(data);
5908 	data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5909 
5910 	dma_sync_single_for_cpu(&bp->pdev->dev,
5911 		dma_unmap_addr(rx_buf, mapping),
5912 		bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5913 
5914 	if (rx_hdr->l2_fhdr_status &
5915 		(L2_FHDR_ERRORS_BAD_CRC |
5916 		L2_FHDR_ERRORS_PHY_DECODE |
5917 		L2_FHDR_ERRORS_ALIGNMENT |
5918 		L2_FHDR_ERRORS_TOO_SHORT |
5919 		L2_FHDR_ERRORS_GIANT_FRAME)) {
5920 
5921 		goto loopback_test_done;
5922 	}
5923 
5924 	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5925 		goto loopback_test_done;
5926 	}
5927 
5928 	for (i = 14; i < pkt_size; i++) {
5929 		if (*(data + i) != (unsigned char) (i & 0xff)) {
5930 			goto loopback_test_done;
5931 		}
5932 	}
5933 
5934 	ret = 0;
5935 
5936 loopback_test_done:
5937 	bp->loopback = 0;
5938 	return ret;
5939 }
5940 
5941 #define BNX2_MAC_LOOPBACK_FAILED	1
5942 #define BNX2_PHY_LOOPBACK_FAILED	2
5943 #define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
5944 					 BNX2_PHY_LOOPBACK_FAILED)
5945 
5946 static int
5947 bnx2_test_loopback(struct bnx2 *bp)
5948 {
5949 	int rc = 0;
5950 
5951 	if (!netif_running(bp->dev))
5952 		return BNX2_LOOPBACK_FAILED;
5953 
5954 	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5955 	spin_lock_bh(&bp->phy_lock);
5956 	bnx2_init_phy(bp, 1);
5957 	spin_unlock_bh(&bp->phy_lock);
5958 	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5959 		rc |= BNX2_MAC_LOOPBACK_FAILED;
5960 	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5961 		rc |= BNX2_PHY_LOOPBACK_FAILED;
5962 	return rc;
5963 }
5964 
5965 #define NVRAM_SIZE 0x200
5966 #define CRC32_RESIDUAL 0xdebb20e3
5967 
5968 static int
5969 bnx2_test_nvram(struct bnx2 *bp)
5970 {
5971 	__be32 buf[NVRAM_SIZE / 4];
5972 	u8 *data = (u8 *) buf;
5973 	int rc = 0;
5974 	u32 magic, csum;
5975 
5976 	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5977 		goto test_nvram_done;
5978 
5979         magic = be32_to_cpu(buf[0]);
5980 	if (magic != 0x669955aa) {
5981 		rc = -ENODEV;
5982 		goto test_nvram_done;
5983 	}
5984 
5985 	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5986 		goto test_nvram_done;
5987 
5988 	csum = ether_crc_le(0x100, data);
5989 	if (csum != CRC32_RESIDUAL) {
5990 		rc = -ENODEV;
5991 		goto test_nvram_done;
5992 	}
5993 
5994 	csum = ether_crc_le(0x100, data + 0x100);
5995 	if (csum != CRC32_RESIDUAL) {
5996 		rc = -ENODEV;
5997 	}
5998 
5999 test_nvram_done:
6000 	return rc;
6001 }
6002 
6003 static int
6004 bnx2_test_link(struct bnx2 *bp)
6005 {
6006 	u32 bmsr;
6007 
6008 	if (!netif_running(bp->dev))
6009 		return -ENODEV;
6010 
6011 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6012 		if (bp->link_up)
6013 			return 0;
6014 		return -ENODEV;
6015 	}
6016 	spin_lock_bh(&bp->phy_lock);
6017 	bnx2_enable_bmsr1(bp);
6018 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6019 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6020 	bnx2_disable_bmsr1(bp);
6021 	spin_unlock_bh(&bp->phy_lock);
6022 
6023 	if (bmsr & BMSR_LSTATUS) {
6024 		return 0;
6025 	}
6026 	return -ENODEV;
6027 }
6028 
6029 static int
6030 bnx2_test_intr(struct bnx2 *bp)
6031 {
6032 	int i;
6033 	u16 status_idx;
6034 
6035 	if (!netif_running(bp->dev))
6036 		return -ENODEV;
6037 
6038 	status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6039 
6040 	/* This register is not touched during run-time. */
6041 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6042 	BNX2_RD(bp, BNX2_HC_COMMAND);
6043 
6044 	for (i = 0; i < 10; i++) {
6045 		if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6046 			status_idx) {
6047 
6048 			break;
6049 		}
6050 
6051 		msleep_interruptible(10);
6052 	}
6053 	if (i < 10)
6054 		return 0;
6055 
6056 	return -ENODEV;
6057 }
6058 
6059 /* Determining link for parallel detection. */
6060 static int
6061 bnx2_5706_serdes_has_link(struct bnx2 *bp)
6062 {
6063 	u32 mode_ctl, an_dbg, exp;
6064 
6065 	if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6066 		return 0;
6067 
6068 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6069 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6070 
6071 	if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6072 		return 0;
6073 
6074 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6075 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6076 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6077 
6078 	if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6079 		return 0;
6080 
6081 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6082 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6083 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6084 
6085 	if (exp & MII_EXPAND_REG1_RUDI_C)	/* receiving CONFIG */
6086 		return 0;
6087 
6088 	return 1;
6089 }
6090 
6091 static void
6092 bnx2_5706_serdes_timer(struct bnx2 *bp)
6093 {
6094 	int check_link = 1;
6095 
6096 	spin_lock(&bp->phy_lock);
6097 	if (bp->serdes_an_pending) {
6098 		bp->serdes_an_pending--;
6099 		check_link = 0;
6100 	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6101 		u32 bmcr;
6102 
6103 		bp->current_interval = BNX2_TIMER_INTERVAL;
6104 
6105 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6106 
6107 		if (bmcr & BMCR_ANENABLE) {
6108 			if (bnx2_5706_serdes_has_link(bp)) {
6109 				bmcr &= ~BMCR_ANENABLE;
6110 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6111 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6112 				bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6113 			}
6114 		}
6115 	}
6116 	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6117 		 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6118 		u32 phy2;
6119 
6120 		bnx2_write_phy(bp, 0x17, 0x0f01);
6121 		bnx2_read_phy(bp, 0x15, &phy2);
6122 		if (phy2 & 0x20) {
6123 			u32 bmcr;
6124 
6125 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6126 			bmcr |= BMCR_ANENABLE;
6127 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6128 
6129 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6130 		}
6131 	} else
6132 		bp->current_interval = BNX2_TIMER_INTERVAL;
6133 
6134 	if (check_link) {
6135 		u32 val;
6136 
6137 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6138 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6139 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6140 
6141 		if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6142 			if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6143 				bnx2_5706s_force_link_dn(bp, 1);
6144 				bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6145 			} else
6146 				bnx2_set_link(bp);
6147 		} else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6148 			bnx2_set_link(bp);
6149 	}
6150 	spin_unlock(&bp->phy_lock);
6151 }
6152 
6153 static void
6154 bnx2_5708_serdes_timer(struct bnx2 *bp)
6155 {
6156 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6157 		return;
6158 
6159 	if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6160 		bp->serdes_an_pending = 0;
6161 		return;
6162 	}
6163 
6164 	spin_lock(&bp->phy_lock);
6165 	if (bp->serdes_an_pending)
6166 		bp->serdes_an_pending--;
6167 	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6168 		u32 bmcr;
6169 
6170 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6171 		if (bmcr & BMCR_ANENABLE) {
6172 			bnx2_enable_forced_2g5(bp);
6173 			bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6174 		} else {
6175 			bnx2_disable_forced_2g5(bp);
6176 			bp->serdes_an_pending = 2;
6177 			bp->current_interval = BNX2_TIMER_INTERVAL;
6178 		}
6179 
6180 	} else
6181 		bp->current_interval = BNX2_TIMER_INTERVAL;
6182 
6183 	spin_unlock(&bp->phy_lock);
6184 }
6185 
6186 static void
6187 bnx2_timer(struct timer_list *t)
6188 {
6189 	struct bnx2 *bp = from_timer(bp, t, timer);
6190 
6191 	if (!netif_running(bp->dev))
6192 		return;
6193 
6194 	if (atomic_read(&bp->intr_sem) != 0)
6195 		goto bnx2_restart_timer;
6196 
6197 	if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6198 	     BNX2_FLAG_USING_MSI)
6199 		bnx2_chk_missed_msi(bp);
6200 
6201 	bnx2_send_heart_beat(bp);
6202 
6203 	bp->stats_blk->stat_FwRxDrop =
6204 		bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6205 
6206 	/* workaround occasional corrupted counters */
6207 	if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6208 		BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6209 			BNX2_HC_COMMAND_STATS_NOW);
6210 
6211 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6212 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6213 			bnx2_5706_serdes_timer(bp);
6214 		else
6215 			bnx2_5708_serdes_timer(bp);
6216 	}
6217 
6218 bnx2_restart_timer:
6219 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6220 }
6221 
6222 static int
6223 bnx2_request_irq(struct bnx2 *bp)
6224 {
6225 	unsigned long flags;
6226 	struct bnx2_irq *irq;
6227 	int rc = 0, i;
6228 
6229 	if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6230 		flags = 0;
6231 	else
6232 		flags = IRQF_SHARED;
6233 
6234 	for (i = 0; i < bp->irq_nvecs; i++) {
6235 		irq = &bp->irq_tbl[i];
6236 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6237 				 &bp->bnx2_napi[i]);
6238 		if (rc)
6239 			break;
6240 		irq->requested = 1;
6241 	}
6242 	return rc;
6243 }
6244 
6245 static void
6246 __bnx2_free_irq(struct bnx2 *bp)
6247 {
6248 	struct bnx2_irq *irq;
6249 	int i;
6250 
6251 	for (i = 0; i < bp->irq_nvecs; i++) {
6252 		irq = &bp->irq_tbl[i];
6253 		if (irq->requested)
6254 			free_irq(irq->vector, &bp->bnx2_napi[i]);
6255 		irq->requested = 0;
6256 	}
6257 }
6258 
6259 static void
6260 bnx2_free_irq(struct bnx2 *bp)
6261 {
6262 
6263 	__bnx2_free_irq(bp);
6264 	if (bp->flags & BNX2_FLAG_USING_MSI)
6265 		pci_disable_msi(bp->pdev);
6266 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6267 		pci_disable_msix(bp->pdev);
6268 
6269 	bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6270 }
6271 
6272 static void
6273 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6274 {
6275 	int i, total_vecs;
6276 	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6277 	struct net_device *dev = bp->dev;
6278 	const int len = sizeof(bp->irq_tbl[0].name);
6279 
6280 	bnx2_setup_msix_tbl(bp);
6281 	BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6282 	BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6283 	BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6284 
6285 	/*  Need to flush the previous three writes to ensure MSI-X
6286 	 *  is setup properly */
6287 	BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6288 
6289 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6290 		msix_ent[i].entry = i;
6291 		msix_ent[i].vector = 0;
6292 	}
6293 
6294 	total_vecs = msix_vecs;
6295 #ifdef BCM_CNIC
6296 	total_vecs++;
6297 #endif
6298 	total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6299 					   BNX2_MIN_MSIX_VEC, total_vecs);
6300 	if (total_vecs < 0)
6301 		return;
6302 
6303 	msix_vecs = total_vecs;
6304 #ifdef BCM_CNIC
6305 	msix_vecs--;
6306 #endif
6307 	bp->irq_nvecs = msix_vecs;
6308 	bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6309 	for (i = 0; i < total_vecs; i++) {
6310 		bp->irq_tbl[i].vector = msix_ent[i].vector;
6311 		snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6312 		bp->irq_tbl[i].handler = bnx2_msi_1shot;
6313 	}
6314 }
6315 
6316 static int
6317 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6318 {
6319 	int cpus = netif_get_num_default_rss_queues();
6320 	int msix_vecs;
6321 
6322 	if (!bp->num_req_rx_rings)
6323 		msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6324 	else if (!bp->num_req_tx_rings)
6325 		msix_vecs = max(cpus, bp->num_req_rx_rings);
6326 	else
6327 		msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6328 
6329 	msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6330 
6331 	bp->irq_tbl[0].handler = bnx2_interrupt;
6332 	strcpy(bp->irq_tbl[0].name, bp->dev->name);
6333 	bp->irq_nvecs = 1;
6334 	bp->irq_tbl[0].vector = bp->pdev->irq;
6335 
6336 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6337 		bnx2_enable_msix(bp, msix_vecs);
6338 
6339 	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6340 	    !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6341 		if (pci_enable_msi(bp->pdev) == 0) {
6342 			bp->flags |= BNX2_FLAG_USING_MSI;
6343 			if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6344 				bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6345 				bp->irq_tbl[0].handler = bnx2_msi_1shot;
6346 			} else
6347 				bp->irq_tbl[0].handler = bnx2_msi;
6348 
6349 			bp->irq_tbl[0].vector = bp->pdev->irq;
6350 		}
6351 	}
6352 
6353 	if (!bp->num_req_tx_rings)
6354 		bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6355 	else
6356 		bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6357 
6358 	if (!bp->num_req_rx_rings)
6359 		bp->num_rx_rings = bp->irq_nvecs;
6360 	else
6361 		bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6362 
6363 	netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6364 
6365 	return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6366 }
6367 
6368 /* Called with rtnl_lock */
6369 static int
6370 bnx2_open(struct net_device *dev)
6371 {
6372 	struct bnx2 *bp = netdev_priv(dev);
6373 	int rc;
6374 
6375 	rc = bnx2_request_firmware(bp);
6376 	if (rc < 0)
6377 		goto out;
6378 
6379 	netif_carrier_off(dev);
6380 
6381 	bnx2_disable_int(bp);
6382 
6383 	rc = bnx2_setup_int_mode(bp, disable_msi);
6384 	if (rc)
6385 		goto open_err;
6386 	bnx2_init_napi(bp);
6387 	bnx2_napi_enable(bp);
6388 	rc = bnx2_alloc_mem(bp);
6389 	if (rc)
6390 		goto open_err;
6391 
6392 	rc = bnx2_request_irq(bp);
6393 	if (rc)
6394 		goto open_err;
6395 
6396 	rc = bnx2_init_nic(bp, 1);
6397 	if (rc)
6398 		goto open_err;
6399 
6400 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6401 
6402 	atomic_set(&bp->intr_sem, 0);
6403 
6404 	memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6405 
6406 	bnx2_enable_int(bp);
6407 
6408 	if (bp->flags & BNX2_FLAG_USING_MSI) {
6409 		/* Test MSI to make sure it is working
6410 		 * If MSI test fails, go back to INTx mode
6411 		 */
6412 		if (bnx2_test_intr(bp) != 0) {
6413 			netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6414 
6415 			bnx2_disable_int(bp);
6416 			bnx2_free_irq(bp);
6417 
6418 			bnx2_setup_int_mode(bp, 1);
6419 
6420 			rc = bnx2_init_nic(bp, 0);
6421 
6422 			if (!rc)
6423 				rc = bnx2_request_irq(bp);
6424 
6425 			if (rc) {
6426 				del_timer_sync(&bp->timer);
6427 				goto open_err;
6428 			}
6429 			bnx2_enable_int(bp);
6430 		}
6431 	}
6432 	if (bp->flags & BNX2_FLAG_USING_MSI)
6433 		netdev_info(dev, "using MSI\n");
6434 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6435 		netdev_info(dev, "using MSIX\n");
6436 
6437 	netif_tx_start_all_queues(dev);
6438 out:
6439 	return rc;
6440 
6441 open_err:
6442 	bnx2_napi_disable(bp);
6443 	bnx2_free_skbs(bp);
6444 	bnx2_free_irq(bp);
6445 	bnx2_free_mem(bp);
6446 	bnx2_del_napi(bp);
6447 	bnx2_release_firmware(bp);
6448 	goto out;
6449 }
6450 
6451 static void
6452 bnx2_reset_task(struct work_struct *work)
6453 {
6454 	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6455 	int rc;
6456 	u16 pcicmd;
6457 
6458 	rtnl_lock();
6459 	if (!netif_running(bp->dev)) {
6460 		rtnl_unlock();
6461 		return;
6462 	}
6463 
6464 	bnx2_netif_stop(bp, true);
6465 
6466 	pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6467 	if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6468 		/* in case PCI block has reset */
6469 		pci_restore_state(bp->pdev);
6470 		pci_save_state(bp->pdev);
6471 	}
6472 	rc = bnx2_init_nic(bp, 1);
6473 	if (rc) {
6474 		netdev_err(bp->dev, "failed to reset NIC, closing\n");
6475 		bnx2_napi_enable(bp);
6476 		dev_close(bp->dev);
6477 		rtnl_unlock();
6478 		return;
6479 	}
6480 
6481 	atomic_set(&bp->intr_sem, 1);
6482 	bnx2_netif_start(bp, true);
6483 	rtnl_unlock();
6484 }
6485 
6486 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6487 
6488 static void
6489 bnx2_dump_ftq(struct bnx2 *bp)
6490 {
6491 	int i;
6492 	u32 reg, bdidx, cid, valid;
6493 	struct net_device *dev = bp->dev;
6494 	static const struct ftq_reg {
6495 		char *name;
6496 		u32 off;
6497 	} ftq_arr[] = {
6498 		BNX2_FTQ_ENTRY(RV2P_P),
6499 		BNX2_FTQ_ENTRY(RV2P_T),
6500 		BNX2_FTQ_ENTRY(RV2P_M),
6501 		BNX2_FTQ_ENTRY(TBDR_),
6502 		BNX2_FTQ_ENTRY(TDMA_),
6503 		BNX2_FTQ_ENTRY(TXP_),
6504 		BNX2_FTQ_ENTRY(TXP_),
6505 		BNX2_FTQ_ENTRY(TPAT_),
6506 		BNX2_FTQ_ENTRY(RXP_C),
6507 		BNX2_FTQ_ENTRY(RXP_),
6508 		BNX2_FTQ_ENTRY(COM_COMXQ_),
6509 		BNX2_FTQ_ENTRY(COM_COMTQ_),
6510 		BNX2_FTQ_ENTRY(COM_COMQ_),
6511 		BNX2_FTQ_ENTRY(CP_CPQ_),
6512 	};
6513 
6514 	netdev_err(dev, "<--- start FTQ dump --->\n");
6515 	for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6516 		netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6517 			   bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6518 
6519 	netdev_err(dev, "CPU states:\n");
6520 	for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6521 		netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6522 			   reg, bnx2_reg_rd_ind(bp, reg),
6523 			   bnx2_reg_rd_ind(bp, reg + 4),
6524 			   bnx2_reg_rd_ind(bp, reg + 8),
6525 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6526 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6527 			   bnx2_reg_rd_ind(bp, reg + 0x20));
6528 
6529 	netdev_err(dev, "<--- end FTQ dump --->\n");
6530 	netdev_err(dev, "<--- start TBDC dump --->\n");
6531 	netdev_err(dev, "TBDC free cnt: %ld\n",
6532 		   BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6533 	netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6534 	for (i = 0; i < 0x20; i++) {
6535 		int j = 0;
6536 
6537 		BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6538 		BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6539 			BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6540 		BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6541 		while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6542 			BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6543 			j++;
6544 
6545 		cid = BNX2_RD(bp, BNX2_TBDC_CID);
6546 		bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6547 		valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6548 		netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6549 			   i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6550 			   bdidx >> 24, (valid >> 8) & 0x0ff);
6551 	}
6552 	netdev_err(dev, "<--- end TBDC dump --->\n");
6553 }
6554 
6555 static void
6556 bnx2_dump_state(struct bnx2 *bp)
6557 {
6558 	struct net_device *dev = bp->dev;
6559 	u32 val1, val2;
6560 
6561 	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6562 	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6563 		   atomic_read(&bp->intr_sem), val1);
6564 	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6565 	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6566 	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6567 	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6568 		   BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6569 		   BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6570 	netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6571 		   BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6572 	netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6573 		   BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6574 	if (bp->flags & BNX2_FLAG_USING_MSIX)
6575 		netdev_err(dev, "DEBUG: PBA[%08x]\n",
6576 			   BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6577 }
6578 
6579 static void
6580 bnx2_tx_timeout(struct net_device *dev)
6581 {
6582 	struct bnx2 *bp = netdev_priv(dev);
6583 
6584 	bnx2_dump_ftq(bp);
6585 	bnx2_dump_state(bp);
6586 	bnx2_dump_mcp_state(bp);
6587 
6588 	/* This allows the netif to be shutdown gracefully before resetting */
6589 	schedule_work(&bp->reset_task);
6590 }
6591 
6592 /* Called with netif_tx_lock.
6593  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6594  * netif_wake_queue().
6595  */
6596 static netdev_tx_t
6597 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6598 {
6599 	struct bnx2 *bp = netdev_priv(dev);
6600 	dma_addr_t mapping;
6601 	struct bnx2_tx_bd *txbd;
6602 	struct bnx2_sw_tx_bd *tx_buf;
6603 	u32 len, vlan_tag_flags, last_frag, mss;
6604 	u16 prod, ring_prod;
6605 	int i;
6606 	struct bnx2_napi *bnapi;
6607 	struct bnx2_tx_ring_info *txr;
6608 	struct netdev_queue *txq;
6609 
6610 	/*  Determine which tx ring we will be placed on */
6611 	i = skb_get_queue_mapping(skb);
6612 	bnapi = &bp->bnx2_napi[i];
6613 	txr = &bnapi->tx_ring;
6614 	txq = netdev_get_tx_queue(dev, i);
6615 
6616 	if (unlikely(bnx2_tx_avail(bp, txr) <
6617 	    (skb_shinfo(skb)->nr_frags + 1))) {
6618 		netif_tx_stop_queue(txq);
6619 		netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6620 
6621 		return NETDEV_TX_BUSY;
6622 	}
6623 	len = skb_headlen(skb);
6624 	prod = txr->tx_prod;
6625 	ring_prod = BNX2_TX_RING_IDX(prod);
6626 
6627 	vlan_tag_flags = 0;
6628 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6629 		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6630 	}
6631 
6632 	if (skb_vlan_tag_present(skb)) {
6633 		vlan_tag_flags |=
6634 			(TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
6635 	}
6636 
6637 	if ((mss = skb_shinfo(skb)->gso_size)) {
6638 		u32 tcp_opt_len;
6639 		struct iphdr *iph;
6640 
6641 		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6642 
6643 		tcp_opt_len = tcp_optlen(skb);
6644 
6645 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6646 			u32 tcp_off = skb_transport_offset(skb) -
6647 				      sizeof(struct ipv6hdr) - ETH_HLEN;
6648 
6649 			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6650 					  TX_BD_FLAGS_SW_FLAGS;
6651 			if (likely(tcp_off == 0))
6652 				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6653 			else {
6654 				tcp_off >>= 3;
6655 				vlan_tag_flags |= ((tcp_off & 0x3) <<
6656 						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
6657 						  ((tcp_off & 0x10) <<
6658 						   TX_BD_FLAGS_TCP6_OFF4_SHL);
6659 				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6660 			}
6661 		} else {
6662 			iph = ip_hdr(skb);
6663 			if (tcp_opt_len || (iph->ihl > 5)) {
6664 				vlan_tag_flags |= ((iph->ihl - 5) +
6665 						   (tcp_opt_len >> 2)) << 8;
6666 			}
6667 		}
6668 	} else
6669 		mss = 0;
6670 
6671 	mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6672 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6673 		dev_kfree_skb_any(skb);
6674 		return NETDEV_TX_OK;
6675 	}
6676 
6677 	tx_buf = &txr->tx_buf_ring[ring_prod];
6678 	tx_buf->skb = skb;
6679 	dma_unmap_addr_set(tx_buf, mapping, mapping);
6680 
6681 	txbd = &txr->tx_desc_ring[ring_prod];
6682 
6683 	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6684 	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6685 	txbd->tx_bd_mss_nbytes = len | (mss << 16);
6686 	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6687 
6688 	last_frag = skb_shinfo(skb)->nr_frags;
6689 	tx_buf->nr_frags = last_frag;
6690 	tx_buf->is_gso = skb_is_gso(skb);
6691 
6692 	for (i = 0; i < last_frag; i++) {
6693 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6694 
6695 		prod = BNX2_NEXT_TX_BD(prod);
6696 		ring_prod = BNX2_TX_RING_IDX(prod);
6697 		txbd = &txr->tx_desc_ring[ring_prod];
6698 
6699 		len = skb_frag_size(frag);
6700 		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6701 					   DMA_TO_DEVICE);
6702 		if (dma_mapping_error(&bp->pdev->dev, mapping))
6703 			goto dma_error;
6704 		dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6705 				   mapping);
6706 
6707 		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6708 		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6709 		txbd->tx_bd_mss_nbytes = len | (mss << 16);
6710 		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6711 
6712 	}
6713 	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6714 
6715 	/* Sync BD data before updating TX mailbox */
6716 	wmb();
6717 
6718 	netdev_tx_sent_queue(txq, skb->len);
6719 
6720 	prod = BNX2_NEXT_TX_BD(prod);
6721 	txr->tx_prod_bseq += skb->len;
6722 
6723 	BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6724 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6725 
6726 	mmiowb();
6727 
6728 	txr->tx_prod = prod;
6729 
6730 	if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6731 		netif_tx_stop_queue(txq);
6732 
6733 		/* netif_tx_stop_queue() must be done before checking
6734 		 * tx index in bnx2_tx_avail() below, because in
6735 		 * bnx2_tx_int(), we update tx index before checking for
6736 		 * netif_tx_queue_stopped().
6737 		 */
6738 		smp_mb();
6739 		if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6740 			netif_tx_wake_queue(txq);
6741 	}
6742 
6743 	return NETDEV_TX_OK;
6744 dma_error:
6745 	/* save value of frag that failed */
6746 	last_frag = i;
6747 
6748 	/* start back at beginning and unmap skb */
6749 	prod = txr->tx_prod;
6750 	ring_prod = BNX2_TX_RING_IDX(prod);
6751 	tx_buf = &txr->tx_buf_ring[ring_prod];
6752 	tx_buf->skb = NULL;
6753 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6754 			 skb_headlen(skb), PCI_DMA_TODEVICE);
6755 
6756 	/* unmap remaining mapped pages */
6757 	for (i = 0; i < last_frag; i++) {
6758 		prod = BNX2_NEXT_TX_BD(prod);
6759 		ring_prod = BNX2_TX_RING_IDX(prod);
6760 		tx_buf = &txr->tx_buf_ring[ring_prod];
6761 		dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6762 			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6763 			       PCI_DMA_TODEVICE);
6764 	}
6765 
6766 	dev_kfree_skb_any(skb);
6767 	return NETDEV_TX_OK;
6768 }
6769 
6770 /* Called with rtnl_lock */
6771 static int
6772 bnx2_close(struct net_device *dev)
6773 {
6774 	struct bnx2 *bp = netdev_priv(dev);
6775 
6776 	bnx2_disable_int_sync(bp);
6777 	bnx2_napi_disable(bp);
6778 	netif_tx_disable(dev);
6779 	del_timer_sync(&bp->timer);
6780 	bnx2_shutdown_chip(bp);
6781 	bnx2_free_irq(bp);
6782 	bnx2_free_skbs(bp);
6783 	bnx2_free_mem(bp);
6784 	bnx2_del_napi(bp);
6785 	bp->link_up = 0;
6786 	netif_carrier_off(bp->dev);
6787 	return 0;
6788 }
6789 
6790 static void
6791 bnx2_save_stats(struct bnx2 *bp)
6792 {
6793 	u32 *hw_stats = (u32 *) bp->stats_blk;
6794 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6795 	int i;
6796 
6797 	/* The 1st 10 counters are 64-bit counters */
6798 	for (i = 0; i < 20; i += 2) {
6799 		u32 hi;
6800 		u64 lo;
6801 
6802 		hi = temp_stats[i] + hw_stats[i];
6803 		lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6804 		if (lo > 0xffffffff)
6805 			hi++;
6806 		temp_stats[i] = hi;
6807 		temp_stats[i + 1] = lo & 0xffffffff;
6808 	}
6809 
6810 	for ( ; i < sizeof(struct statistics_block) / 4; i++)
6811 		temp_stats[i] += hw_stats[i];
6812 }
6813 
6814 #define GET_64BIT_NET_STATS64(ctr)		\
6815 	(((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6816 
6817 #define GET_64BIT_NET_STATS(ctr)				\
6818 	GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +		\
6819 	GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6820 
6821 #define GET_32BIT_NET_STATS(ctr)				\
6822 	(unsigned long) (bp->stats_blk->ctr +			\
6823 			 bp->temp_stats_blk->ctr)
6824 
6825 static void
6826 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6827 {
6828 	struct bnx2 *bp = netdev_priv(dev);
6829 
6830 	if (!bp->stats_blk)
6831 		return;
6832 
6833 	net_stats->rx_packets =
6834 		GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6835 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6836 		GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6837 
6838 	net_stats->tx_packets =
6839 		GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6840 		GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6841 		GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6842 
6843 	net_stats->rx_bytes =
6844 		GET_64BIT_NET_STATS(stat_IfHCInOctets);
6845 
6846 	net_stats->tx_bytes =
6847 		GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6848 
6849 	net_stats->multicast =
6850 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6851 
6852 	net_stats->collisions =
6853 		GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6854 
6855 	net_stats->rx_length_errors =
6856 		GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6857 		GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6858 
6859 	net_stats->rx_over_errors =
6860 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6861 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6862 
6863 	net_stats->rx_frame_errors =
6864 		GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6865 
6866 	net_stats->rx_crc_errors =
6867 		GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6868 
6869 	net_stats->rx_errors = net_stats->rx_length_errors +
6870 		net_stats->rx_over_errors + net_stats->rx_frame_errors +
6871 		net_stats->rx_crc_errors;
6872 
6873 	net_stats->tx_aborted_errors =
6874 		GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6875 		GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6876 
6877 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6878 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6879 		net_stats->tx_carrier_errors = 0;
6880 	else {
6881 		net_stats->tx_carrier_errors =
6882 			GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6883 	}
6884 
6885 	net_stats->tx_errors =
6886 		GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6887 		net_stats->tx_aborted_errors +
6888 		net_stats->tx_carrier_errors;
6889 
6890 	net_stats->rx_missed_errors =
6891 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6892 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6893 		GET_32BIT_NET_STATS(stat_FwRxDrop);
6894 
6895 }
6896 
6897 /* All ethtool functions called with rtnl_lock */
6898 
6899 static int
6900 bnx2_get_link_ksettings(struct net_device *dev,
6901 			struct ethtool_link_ksettings *cmd)
6902 {
6903 	struct bnx2 *bp = netdev_priv(dev);
6904 	int support_serdes = 0, support_copper = 0;
6905 	u32 supported, advertising;
6906 
6907 	supported = SUPPORTED_Autoneg;
6908 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6909 		support_serdes = 1;
6910 		support_copper = 1;
6911 	} else if (bp->phy_port == PORT_FIBRE)
6912 		support_serdes = 1;
6913 	else
6914 		support_copper = 1;
6915 
6916 	if (support_serdes) {
6917 		supported |= SUPPORTED_1000baseT_Full |
6918 			SUPPORTED_FIBRE;
6919 		if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6920 			supported |= SUPPORTED_2500baseX_Full;
6921 	}
6922 	if (support_copper) {
6923 		supported |= SUPPORTED_10baseT_Half |
6924 			SUPPORTED_10baseT_Full |
6925 			SUPPORTED_100baseT_Half |
6926 			SUPPORTED_100baseT_Full |
6927 			SUPPORTED_1000baseT_Full |
6928 			SUPPORTED_TP;
6929 	}
6930 
6931 	spin_lock_bh(&bp->phy_lock);
6932 	cmd->base.port = bp->phy_port;
6933 	advertising = bp->advertising;
6934 
6935 	if (bp->autoneg & AUTONEG_SPEED) {
6936 		cmd->base.autoneg = AUTONEG_ENABLE;
6937 	} else {
6938 		cmd->base.autoneg = AUTONEG_DISABLE;
6939 	}
6940 
6941 	if (netif_carrier_ok(dev)) {
6942 		cmd->base.speed = bp->line_speed;
6943 		cmd->base.duplex = bp->duplex;
6944 		if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6945 			if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6946 				cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
6947 			else
6948 				cmd->base.eth_tp_mdix = ETH_TP_MDI;
6949 		}
6950 	}
6951 	else {
6952 		cmd->base.speed = SPEED_UNKNOWN;
6953 		cmd->base.duplex = DUPLEX_UNKNOWN;
6954 	}
6955 	spin_unlock_bh(&bp->phy_lock);
6956 
6957 	cmd->base.phy_address = bp->phy_addr;
6958 
6959 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
6960 						supported);
6961 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
6962 						advertising);
6963 
6964 	return 0;
6965 }
6966 
6967 static int
6968 bnx2_set_link_ksettings(struct net_device *dev,
6969 			const struct ethtool_link_ksettings *cmd)
6970 {
6971 	struct bnx2 *bp = netdev_priv(dev);
6972 	u8 autoneg = bp->autoneg;
6973 	u8 req_duplex = bp->req_duplex;
6974 	u16 req_line_speed = bp->req_line_speed;
6975 	u32 advertising = bp->advertising;
6976 	int err = -EINVAL;
6977 
6978 	spin_lock_bh(&bp->phy_lock);
6979 
6980 	if (cmd->base.port != PORT_TP && cmd->base.port != PORT_FIBRE)
6981 		goto err_out_unlock;
6982 
6983 	if (cmd->base.port != bp->phy_port &&
6984 	    !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6985 		goto err_out_unlock;
6986 
6987 	/* If device is down, we can store the settings only if the user
6988 	 * is setting the currently active port.
6989 	 */
6990 	if (!netif_running(dev) && cmd->base.port != bp->phy_port)
6991 		goto err_out_unlock;
6992 
6993 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
6994 		autoneg |= AUTONEG_SPEED;
6995 
6996 		ethtool_convert_link_mode_to_legacy_u32(
6997 			&advertising, cmd->link_modes.advertising);
6998 
6999 		if (cmd->base.port == PORT_TP) {
7000 			advertising &= ETHTOOL_ALL_COPPER_SPEED;
7001 			if (!advertising)
7002 				advertising = ETHTOOL_ALL_COPPER_SPEED;
7003 		} else {
7004 			advertising &= ETHTOOL_ALL_FIBRE_SPEED;
7005 			if (!advertising)
7006 				advertising = ETHTOOL_ALL_FIBRE_SPEED;
7007 		}
7008 		advertising |= ADVERTISED_Autoneg;
7009 	}
7010 	else {
7011 		u32 speed = cmd->base.speed;
7012 
7013 		if (cmd->base.port == PORT_FIBRE) {
7014 			if ((speed != SPEED_1000 &&
7015 			     speed != SPEED_2500) ||
7016 			    (cmd->base.duplex != DUPLEX_FULL))
7017 				goto err_out_unlock;
7018 
7019 			if (speed == SPEED_2500 &&
7020 			    !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7021 				goto err_out_unlock;
7022 		} else if (speed == SPEED_1000 || speed == SPEED_2500)
7023 			goto err_out_unlock;
7024 
7025 		autoneg &= ~AUTONEG_SPEED;
7026 		req_line_speed = speed;
7027 		req_duplex = cmd->base.duplex;
7028 		advertising = 0;
7029 	}
7030 
7031 	bp->autoneg = autoneg;
7032 	bp->advertising = advertising;
7033 	bp->req_line_speed = req_line_speed;
7034 	bp->req_duplex = req_duplex;
7035 
7036 	err = 0;
7037 	/* If device is down, the new settings will be picked up when it is
7038 	 * brought up.
7039 	 */
7040 	if (netif_running(dev))
7041 		err = bnx2_setup_phy(bp, cmd->base.port);
7042 
7043 err_out_unlock:
7044 	spin_unlock_bh(&bp->phy_lock);
7045 
7046 	return err;
7047 }
7048 
7049 static void
7050 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7051 {
7052 	struct bnx2 *bp = netdev_priv(dev);
7053 
7054 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7055 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
7056 	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7057 	strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7058 }
7059 
7060 #define BNX2_REGDUMP_LEN		(32 * 1024)
7061 
7062 static int
7063 bnx2_get_regs_len(struct net_device *dev)
7064 {
7065 	return BNX2_REGDUMP_LEN;
7066 }
7067 
7068 static void
7069 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7070 {
7071 	u32 *p = _p, i, offset;
7072 	u8 *orig_p = _p;
7073 	struct bnx2 *bp = netdev_priv(dev);
7074 	static const u32 reg_boundaries[] = {
7075 		0x0000, 0x0098, 0x0400, 0x045c,
7076 		0x0800, 0x0880, 0x0c00, 0x0c10,
7077 		0x0c30, 0x0d08, 0x1000, 0x101c,
7078 		0x1040, 0x1048, 0x1080, 0x10a4,
7079 		0x1400, 0x1490, 0x1498, 0x14f0,
7080 		0x1500, 0x155c, 0x1580, 0x15dc,
7081 		0x1600, 0x1658, 0x1680, 0x16d8,
7082 		0x1800, 0x1820, 0x1840, 0x1854,
7083 		0x1880, 0x1894, 0x1900, 0x1984,
7084 		0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7085 		0x1c80, 0x1c94, 0x1d00, 0x1d84,
7086 		0x2000, 0x2030, 0x23c0, 0x2400,
7087 		0x2800, 0x2820, 0x2830, 0x2850,
7088 		0x2b40, 0x2c10, 0x2fc0, 0x3058,
7089 		0x3c00, 0x3c94, 0x4000, 0x4010,
7090 		0x4080, 0x4090, 0x43c0, 0x4458,
7091 		0x4c00, 0x4c18, 0x4c40, 0x4c54,
7092 		0x4fc0, 0x5010, 0x53c0, 0x5444,
7093 		0x5c00, 0x5c18, 0x5c80, 0x5c90,
7094 		0x5fc0, 0x6000, 0x6400, 0x6428,
7095 		0x6800, 0x6848, 0x684c, 0x6860,
7096 		0x6888, 0x6910, 0x8000
7097 	};
7098 
7099 	regs->version = 0;
7100 
7101 	memset(p, 0, BNX2_REGDUMP_LEN);
7102 
7103 	if (!netif_running(bp->dev))
7104 		return;
7105 
7106 	i = 0;
7107 	offset = reg_boundaries[0];
7108 	p += offset;
7109 	while (offset < BNX2_REGDUMP_LEN) {
7110 		*p++ = BNX2_RD(bp, offset);
7111 		offset += 4;
7112 		if (offset == reg_boundaries[i + 1]) {
7113 			offset = reg_boundaries[i + 2];
7114 			p = (u32 *) (orig_p + offset);
7115 			i += 2;
7116 		}
7117 	}
7118 }
7119 
7120 static void
7121 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7122 {
7123 	struct bnx2 *bp = netdev_priv(dev);
7124 
7125 	if (bp->flags & BNX2_FLAG_NO_WOL) {
7126 		wol->supported = 0;
7127 		wol->wolopts = 0;
7128 	}
7129 	else {
7130 		wol->supported = WAKE_MAGIC;
7131 		if (bp->wol)
7132 			wol->wolopts = WAKE_MAGIC;
7133 		else
7134 			wol->wolopts = 0;
7135 	}
7136 	memset(&wol->sopass, 0, sizeof(wol->sopass));
7137 }
7138 
7139 static int
7140 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7141 {
7142 	struct bnx2 *bp = netdev_priv(dev);
7143 
7144 	if (wol->wolopts & ~WAKE_MAGIC)
7145 		return -EINVAL;
7146 
7147 	if (wol->wolopts & WAKE_MAGIC) {
7148 		if (bp->flags & BNX2_FLAG_NO_WOL)
7149 			return -EINVAL;
7150 
7151 		bp->wol = 1;
7152 	}
7153 	else {
7154 		bp->wol = 0;
7155 	}
7156 
7157 	device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7158 
7159 	return 0;
7160 }
7161 
7162 static int
7163 bnx2_nway_reset(struct net_device *dev)
7164 {
7165 	struct bnx2 *bp = netdev_priv(dev);
7166 	u32 bmcr;
7167 
7168 	if (!netif_running(dev))
7169 		return -EAGAIN;
7170 
7171 	if (!(bp->autoneg & AUTONEG_SPEED)) {
7172 		return -EINVAL;
7173 	}
7174 
7175 	spin_lock_bh(&bp->phy_lock);
7176 
7177 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7178 		int rc;
7179 
7180 		rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7181 		spin_unlock_bh(&bp->phy_lock);
7182 		return rc;
7183 	}
7184 
7185 	/* Force a link down visible on the other side */
7186 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7187 		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7188 		spin_unlock_bh(&bp->phy_lock);
7189 
7190 		msleep(20);
7191 
7192 		spin_lock_bh(&bp->phy_lock);
7193 
7194 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7195 		bp->serdes_an_pending = 1;
7196 		mod_timer(&bp->timer, jiffies + bp->current_interval);
7197 	}
7198 
7199 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7200 	bmcr &= ~BMCR_LOOPBACK;
7201 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7202 
7203 	spin_unlock_bh(&bp->phy_lock);
7204 
7205 	return 0;
7206 }
7207 
7208 static u32
7209 bnx2_get_link(struct net_device *dev)
7210 {
7211 	struct bnx2 *bp = netdev_priv(dev);
7212 
7213 	return bp->link_up;
7214 }
7215 
7216 static int
7217 bnx2_get_eeprom_len(struct net_device *dev)
7218 {
7219 	struct bnx2 *bp = netdev_priv(dev);
7220 
7221 	if (!bp->flash_info)
7222 		return 0;
7223 
7224 	return (int) bp->flash_size;
7225 }
7226 
7227 static int
7228 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7229 		u8 *eebuf)
7230 {
7231 	struct bnx2 *bp = netdev_priv(dev);
7232 	int rc;
7233 
7234 	/* parameters already validated in ethtool_get_eeprom */
7235 
7236 	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7237 
7238 	return rc;
7239 }
7240 
7241 static int
7242 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7243 		u8 *eebuf)
7244 {
7245 	struct bnx2 *bp = netdev_priv(dev);
7246 	int rc;
7247 
7248 	/* parameters already validated in ethtool_set_eeprom */
7249 
7250 	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7251 
7252 	return rc;
7253 }
7254 
7255 static int
7256 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7257 {
7258 	struct bnx2 *bp = netdev_priv(dev);
7259 
7260 	memset(coal, 0, sizeof(struct ethtool_coalesce));
7261 
7262 	coal->rx_coalesce_usecs = bp->rx_ticks;
7263 	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7264 	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7265 	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7266 
7267 	coal->tx_coalesce_usecs = bp->tx_ticks;
7268 	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7269 	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7270 	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7271 
7272 	coal->stats_block_coalesce_usecs = bp->stats_ticks;
7273 
7274 	return 0;
7275 }
7276 
7277 static int
7278 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7279 {
7280 	struct bnx2 *bp = netdev_priv(dev);
7281 
7282 	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7283 	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7284 
7285 	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7286 	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7287 
7288 	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7289 	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7290 
7291 	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7292 	if (bp->rx_quick_cons_trip_int > 0xff)
7293 		bp->rx_quick_cons_trip_int = 0xff;
7294 
7295 	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7296 	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7297 
7298 	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7299 	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7300 
7301 	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7302 	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7303 
7304 	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7305 	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7306 		0xff;
7307 
7308 	bp->stats_ticks = coal->stats_block_coalesce_usecs;
7309 	if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7310 		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7311 			bp->stats_ticks = USEC_PER_SEC;
7312 	}
7313 	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7314 		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7315 	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7316 
7317 	if (netif_running(bp->dev)) {
7318 		bnx2_netif_stop(bp, true);
7319 		bnx2_init_nic(bp, 0);
7320 		bnx2_netif_start(bp, true);
7321 	}
7322 
7323 	return 0;
7324 }
7325 
7326 static void
7327 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7328 {
7329 	struct bnx2 *bp = netdev_priv(dev);
7330 
7331 	ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7332 	ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7333 
7334 	ering->rx_pending = bp->rx_ring_size;
7335 	ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7336 
7337 	ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7338 	ering->tx_pending = bp->tx_ring_size;
7339 }
7340 
7341 static int
7342 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7343 {
7344 	if (netif_running(bp->dev)) {
7345 		/* Reset will erase chipset stats; save them */
7346 		bnx2_save_stats(bp);
7347 
7348 		bnx2_netif_stop(bp, true);
7349 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7350 		if (reset_irq) {
7351 			bnx2_free_irq(bp);
7352 			bnx2_del_napi(bp);
7353 		} else {
7354 			__bnx2_free_irq(bp);
7355 		}
7356 		bnx2_free_skbs(bp);
7357 		bnx2_free_mem(bp);
7358 	}
7359 
7360 	bnx2_set_rx_ring_size(bp, rx);
7361 	bp->tx_ring_size = tx;
7362 
7363 	if (netif_running(bp->dev)) {
7364 		int rc = 0;
7365 
7366 		if (reset_irq) {
7367 			rc = bnx2_setup_int_mode(bp, disable_msi);
7368 			bnx2_init_napi(bp);
7369 		}
7370 
7371 		if (!rc)
7372 			rc = bnx2_alloc_mem(bp);
7373 
7374 		if (!rc)
7375 			rc = bnx2_request_irq(bp);
7376 
7377 		if (!rc)
7378 			rc = bnx2_init_nic(bp, 0);
7379 
7380 		if (rc) {
7381 			bnx2_napi_enable(bp);
7382 			dev_close(bp->dev);
7383 			return rc;
7384 		}
7385 #ifdef BCM_CNIC
7386 		mutex_lock(&bp->cnic_lock);
7387 		/* Let cnic know about the new status block. */
7388 		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7389 			bnx2_setup_cnic_irq_info(bp);
7390 		mutex_unlock(&bp->cnic_lock);
7391 #endif
7392 		bnx2_netif_start(bp, true);
7393 	}
7394 	return 0;
7395 }
7396 
7397 static int
7398 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7399 {
7400 	struct bnx2 *bp = netdev_priv(dev);
7401 	int rc;
7402 
7403 	if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7404 		(ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7405 		(ering->tx_pending <= MAX_SKB_FRAGS)) {
7406 
7407 		return -EINVAL;
7408 	}
7409 	rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7410 				   false);
7411 	return rc;
7412 }
7413 
7414 static void
7415 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7416 {
7417 	struct bnx2 *bp = netdev_priv(dev);
7418 
7419 	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7420 	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7421 	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7422 }
7423 
7424 static int
7425 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7426 {
7427 	struct bnx2 *bp = netdev_priv(dev);
7428 
7429 	bp->req_flow_ctrl = 0;
7430 	if (epause->rx_pause)
7431 		bp->req_flow_ctrl |= FLOW_CTRL_RX;
7432 	if (epause->tx_pause)
7433 		bp->req_flow_ctrl |= FLOW_CTRL_TX;
7434 
7435 	if (epause->autoneg) {
7436 		bp->autoneg |= AUTONEG_FLOW_CTRL;
7437 	}
7438 	else {
7439 		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7440 	}
7441 
7442 	if (netif_running(dev)) {
7443 		spin_lock_bh(&bp->phy_lock);
7444 		bnx2_setup_phy(bp, bp->phy_port);
7445 		spin_unlock_bh(&bp->phy_lock);
7446 	}
7447 
7448 	return 0;
7449 }
7450 
7451 static struct {
7452 	char string[ETH_GSTRING_LEN];
7453 } bnx2_stats_str_arr[] = {
7454 	{ "rx_bytes" },
7455 	{ "rx_error_bytes" },
7456 	{ "tx_bytes" },
7457 	{ "tx_error_bytes" },
7458 	{ "rx_ucast_packets" },
7459 	{ "rx_mcast_packets" },
7460 	{ "rx_bcast_packets" },
7461 	{ "tx_ucast_packets" },
7462 	{ "tx_mcast_packets" },
7463 	{ "tx_bcast_packets" },
7464 	{ "tx_mac_errors" },
7465 	{ "tx_carrier_errors" },
7466 	{ "rx_crc_errors" },
7467 	{ "rx_align_errors" },
7468 	{ "tx_single_collisions" },
7469 	{ "tx_multi_collisions" },
7470 	{ "tx_deferred" },
7471 	{ "tx_excess_collisions" },
7472 	{ "tx_late_collisions" },
7473 	{ "tx_total_collisions" },
7474 	{ "rx_fragments" },
7475 	{ "rx_jabbers" },
7476 	{ "rx_undersize_packets" },
7477 	{ "rx_oversize_packets" },
7478 	{ "rx_64_byte_packets" },
7479 	{ "rx_65_to_127_byte_packets" },
7480 	{ "rx_128_to_255_byte_packets" },
7481 	{ "rx_256_to_511_byte_packets" },
7482 	{ "rx_512_to_1023_byte_packets" },
7483 	{ "rx_1024_to_1522_byte_packets" },
7484 	{ "rx_1523_to_9022_byte_packets" },
7485 	{ "tx_64_byte_packets" },
7486 	{ "tx_65_to_127_byte_packets" },
7487 	{ "tx_128_to_255_byte_packets" },
7488 	{ "tx_256_to_511_byte_packets" },
7489 	{ "tx_512_to_1023_byte_packets" },
7490 	{ "tx_1024_to_1522_byte_packets" },
7491 	{ "tx_1523_to_9022_byte_packets" },
7492 	{ "rx_xon_frames" },
7493 	{ "rx_xoff_frames" },
7494 	{ "tx_xon_frames" },
7495 	{ "tx_xoff_frames" },
7496 	{ "rx_mac_ctrl_frames" },
7497 	{ "rx_filtered_packets" },
7498 	{ "rx_ftq_discards" },
7499 	{ "rx_discards" },
7500 	{ "rx_fw_discards" },
7501 };
7502 
7503 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7504 
7505 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7506 
7507 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7508     STATS_OFFSET32(stat_IfHCInOctets_hi),
7509     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7510     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7511     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7512     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7513     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7514     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7515     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7516     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7517     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7518     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7519     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7520     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7521     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7522     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7523     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7524     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7525     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7526     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7527     STATS_OFFSET32(stat_EtherStatsCollisions),
7528     STATS_OFFSET32(stat_EtherStatsFragments),
7529     STATS_OFFSET32(stat_EtherStatsJabbers),
7530     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7531     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7532     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7533     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7534     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7535     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7536     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7537     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7538     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7539     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7540     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7541     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7542     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7543     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7544     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7545     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7546     STATS_OFFSET32(stat_XonPauseFramesReceived),
7547     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7548     STATS_OFFSET32(stat_OutXonSent),
7549     STATS_OFFSET32(stat_OutXoffSent),
7550     STATS_OFFSET32(stat_MacControlFramesReceived),
7551     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7552     STATS_OFFSET32(stat_IfInFTQDiscards),
7553     STATS_OFFSET32(stat_IfInMBUFDiscards),
7554     STATS_OFFSET32(stat_FwRxDrop),
7555 };
7556 
7557 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7558  * skipped because of errata.
7559  */
7560 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7561 	8,0,8,8,8,8,8,8,8,8,
7562 	4,0,4,4,4,4,4,4,4,4,
7563 	4,4,4,4,4,4,4,4,4,4,
7564 	4,4,4,4,4,4,4,4,4,4,
7565 	4,4,4,4,4,4,4,
7566 };
7567 
7568 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7569 	8,0,8,8,8,8,8,8,8,8,
7570 	4,4,4,4,4,4,4,4,4,4,
7571 	4,4,4,4,4,4,4,4,4,4,
7572 	4,4,4,4,4,4,4,4,4,4,
7573 	4,4,4,4,4,4,4,
7574 };
7575 
7576 #define BNX2_NUM_TESTS 6
7577 
7578 static struct {
7579 	char string[ETH_GSTRING_LEN];
7580 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7581 	{ "register_test (offline)" },
7582 	{ "memory_test (offline)" },
7583 	{ "loopback_test (offline)" },
7584 	{ "nvram_test (online)" },
7585 	{ "interrupt_test (online)" },
7586 	{ "link_test (online)" },
7587 };
7588 
7589 static int
7590 bnx2_get_sset_count(struct net_device *dev, int sset)
7591 {
7592 	switch (sset) {
7593 	case ETH_SS_TEST:
7594 		return BNX2_NUM_TESTS;
7595 	case ETH_SS_STATS:
7596 		return BNX2_NUM_STATS;
7597 	default:
7598 		return -EOPNOTSUPP;
7599 	}
7600 }
7601 
7602 static void
7603 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7604 {
7605 	struct bnx2 *bp = netdev_priv(dev);
7606 
7607 	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7608 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
7609 		int i;
7610 
7611 		bnx2_netif_stop(bp, true);
7612 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7613 		bnx2_free_skbs(bp);
7614 
7615 		if (bnx2_test_registers(bp) != 0) {
7616 			buf[0] = 1;
7617 			etest->flags |= ETH_TEST_FL_FAILED;
7618 		}
7619 		if (bnx2_test_memory(bp) != 0) {
7620 			buf[1] = 1;
7621 			etest->flags |= ETH_TEST_FL_FAILED;
7622 		}
7623 		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7624 			etest->flags |= ETH_TEST_FL_FAILED;
7625 
7626 		if (!netif_running(bp->dev))
7627 			bnx2_shutdown_chip(bp);
7628 		else {
7629 			bnx2_init_nic(bp, 1);
7630 			bnx2_netif_start(bp, true);
7631 		}
7632 
7633 		/* wait for link up */
7634 		for (i = 0; i < 7; i++) {
7635 			if (bp->link_up)
7636 				break;
7637 			msleep_interruptible(1000);
7638 		}
7639 	}
7640 
7641 	if (bnx2_test_nvram(bp) != 0) {
7642 		buf[3] = 1;
7643 		etest->flags |= ETH_TEST_FL_FAILED;
7644 	}
7645 	if (bnx2_test_intr(bp) != 0) {
7646 		buf[4] = 1;
7647 		etest->flags |= ETH_TEST_FL_FAILED;
7648 	}
7649 
7650 	if (bnx2_test_link(bp) != 0) {
7651 		buf[5] = 1;
7652 		etest->flags |= ETH_TEST_FL_FAILED;
7653 
7654 	}
7655 }
7656 
7657 static void
7658 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7659 {
7660 	switch (stringset) {
7661 	case ETH_SS_STATS:
7662 		memcpy(buf, bnx2_stats_str_arr,
7663 			sizeof(bnx2_stats_str_arr));
7664 		break;
7665 	case ETH_SS_TEST:
7666 		memcpy(buf, bnx2_tests_str_arr,
7667 			sizeof(bnx2_tests_str_arr));
7668 		break;
7669 	}
7670 }
7671 
7672 static void
7673 bnx2_get_ethtool_stats(struct net_device *dev,
7674 		struct ethtool_stats *stats, u64 *buf)
7675 {
7676 	struct bnx2 *bp = netdev_priv(dev);
7677 	int i;
7678 	u32 *hw_stats = (u32 *) bp->stats_blk;
7679 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7680 	u8 *stats_len_arr = NULL;
7681 
7682 	if (!hw_stats) {
7683 		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7684 		return;
7685 	}
7686 
7687 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7688 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7689 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7690 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7691 		stats_len_arr = bnx2_5706_stats_len_arr;
7692 	else
7693 		stats_len_arr = bnx2_5708_stats_len_arr;
7694 
7695 	for (i = 0; i < BNX2_NUM_STATS; i++) {
7696 		unsigned long offset;
7697 
7698 		if (stats_len_arr[i] == 0) {
7699 			/* skip this counter */
7700 			buf[i] = 0;
7701 			continue;
7702 		}
7703 
7704 		offset = bnx2_stats_offset_arr[i];
7705 		if (stats_len_arr[i] == 4) {
7706 			/* 4-byte counter */
7707 			buf[i] = (u64) *(hw_stats + offset) +
7708 				 *(temp_stats + offset);
7709 			continue;
7710 		}
7711 		/* 8-byte counter */
7712 		buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7713 			 *(hw_stats + offset + 1) +
7714 			 (((u64) *(temp_stats + offset)) << 32) +
7715 			 *(temp_stats + offset + 1);
7716 	}
7717 }
7718 
7719 static int
7720 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7721 {
7722 	struct bnx2 *bp = netdev_priv(dev);
7723 
7724 	switch (state) {
7725 	case ETHTOOL_ID_ACTIVE:
7726 		bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7727 		BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7728 		return 1;	/* cycle on/off once per second */
7729 
7730 	case ETHTOOL_ID_ON:
7731 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7732 			BNX2_EMAC_LED_1000MB_OVERRIDE |
7733 			BNX2_EMAC_LED_100MB_OVERRIDE |
7734 			BNX2_EMAC_LED_10MB_OVERRIDE |
7735 			BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7736 			BNX2_EMAC_LED_TRAFFIC);
7737 		break;
7738 
7739 	case ETHTOOL_ID_OFF:
7740 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7741 		break;
7742 
7743 	case ETHTOOL_ID_INACTIVE:
7744 		BNX2_WR(bp, BNX2_EMAC_LED, 0);
7745 		BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7746 		break;
7747 	}
7748 
7749 	return 0;
7750 }
7751 
7752 static int
7753 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7754 {
7755 	struct bnx2 *bp = netdev_priv(dev);
7756 
7757 	/* TSO with VLAN tag won't work with current firmware */
7758 	if (features & NETIF_F_HW_VLAN_CTAG_TX)
7759 		dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7760 	else
7761 		dev->vlan_features &= ~NETIF_F_ALL_TSO;
7762 
7763 	if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7764 	    !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7765 	    netif_running(dev)) {
7766 		bnx2_netif_stop(bp, false);
7767 		dev->features = features;
7768 		bnx2_set_rx_mode(dev);
7769 		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7770 		bnx2_netif_start(bp, false);
7771 		return 1;
7772 	}
7773 
7774 	return 0;
7775 }
7776 
7777 static void bnx2_get_channels(struct net_device *dev,
7778 			      struct ethtool_channels *channels)
7779 {
7780 	struct bnx2 *bp = netdev_priv(dev);
7781 	u32 max_rx_rings = 1;
7782 	u32 max_tx_rings = 1;
7783 
7784 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7785 		max_rx_rings = RX_MAX_RINGS;
7786 		max_tx_rings = TX_MAX_RINGS;
7787 	}
7788 
7789 	channels->max_rx = max_rx_rings;
7790 	channels->max_tx = max_tx_rings;
7791 	channels->max_other = 0;
7792 	channels->max_combined = 0;
7793 	channels->rx_count = bp->num_rx_rings;
7794 	channels->tx_count = bp->num_tx_rings;
7795 	channels->other_count = 0;
7796 	channels->combined_count = 0;
7797 }
7798 
7799 static int bnx2_set_channels(struct net_device *dev,
7800 			      struct ethtool_channels *channels)
7801 {
7802 	struct bnx2 *bp = netdev_priv(dev);
7803 	u32 max_rx_rings = 1;
7804 	u32 max_tx_rings = 1;
7805 	int rc = 0;
7806 
7807 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7808 		max_rx_rings = RX_MAX_RINGS;
7809 		max_tx_rings = TX_MAX_RINGS;
7810 	}
7811 	if (channels->rx_count > max_rx_rings ||
7812 	    channels->tx_count > max_tx_rings)
7813 		return -EINVAL;
7814 
7815 	bp->num_req_rx_rings = channels->rx_count;
7816 	bp->num_req_tx_rings = channels->tx_count;
7817 
7818 	if (netif_running(dev))
7819 		rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7820 					   bp->tx_ring_size, true);
7821 
7822 	return rc;
7823 }
7824 
7825 static const struct ethtool_ops bnx2_ethtool_ops = {
7826 	.get_drvinfo		= bnx2_get_drvinfo,
7827 	.get_regs_len		= bnx2_get_regs_len,
7828 	.get_regs		= bnx2_get_regs,
7829 	.get_wol		= bnx2_get_wol,
7830 	.set_wol		= bnx2_set_wol,
7831 	.nway_reset		= bnx2_nway_reset,
7832 	.get_link		= bnx2_get_link,
7833 	.get_eeprom_len		= bnx2_get_eeprom_len,
7834 	.get_eeprom		= bnx2_get_eeprom,
7835 	.set_eeprom		= bnx2_set_eeprom,
7836 	.get_coalesce		= bnx2_get_coalesce,
7837 	.set_coalesce		= bnx2_set_coalesce,
7838 	.get_ringparam		= bnx2_get_ringparam,
7839 	.set_ringparam		= bnx2_set_ringparam,
7840 	.get_pauseparam		= bnx2_get_pauseparam,
7841 	.set_pauseparam		= bnx2_set_pauseparam,
7842 	.self_test		= bnx2_self_test,
7843 	.get_strings		= bnx2_get_strings,
7844 	.set_phys_id		= bnx2_set_phys_id,
7845 	.get_ethtool_stats	= bnx2_get_ethtool_stats,
7846 	.get_sset_count		= bnx2_get_sset_count,
7847 	.get_channels		= bnx2_get_channels,
7848 	.set_channels		= bnx2_set_channels,
7849 	.get_link_ksettings	= bnx2_get_link_ksettings,
7850 	.set_link_ksettings	= bnx2_set_link_ksettings,
7851 };
7852 
7853 /* Called with rtnl_lock */
7854 static int
7855 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7856 {
7857 	struct mii_ioctl_data *data = if_mii(ifr);
7858 	struct bnx2 *bp = netdev_priv(dev);
7859 	int err;
7860 
7861 	switch(cmd) {
7862 	case SIOCGMIIPHY:
7863 		data->phy_id = bp->phy_addr;
7864 
7865 		/* fallthru */
7866 	case SIOCGMIIREG: {
7867 		u32 mii_regval;
7868 
7869 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7870 			return -EOPNOTSUPP;
7871 
7872 		if (!netif_running(dev))
7873 			return -EAGAIN;
7874 
7875 		spin_lock_bh(&bp->phy_lock);
7876 		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7877 		spin_unlock_bh(&bp->phy_lock);
7878 
7879 		data->val_out = mii_regval;
7880 
7881 		return err;
7882 	}
7883 
7884 	case SIOCSMIIREG:
7885 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7886 			return -EOPNOTSUPP;
7887 
7888 		if (!netif_running(dev))
7889 			return -EAGAIN;
7890 
7891 		spin_lock_bh(&bp->phy_lock);
7892 		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7893 		spin_unlock_bh(&bp->phy_lock);
7894 
7895 		return err;
7896 
7897 	default:
7898 		/* do nothing */
7899 		break;
7900 	}
7901 	return -EOPNOTSUPP;
7902 }
7903 
7904 /* Called with rtnl_lock */
7905 static int
7906 bnx2_change_mac_addr(struct net_device *dev, void *p)
7907 {
7908 	struct sockaddr *addr = p;
7909 	struct bnx2 *bp = netdev_priv(dev);
7910 
7911 	if (!is_valid_ether_addr(addr->sa_data))
7912 		return -EADDRNOTAVAIL;
7913 
7914 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7915 	if (netif_running(dev))
7916 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7917 
7918 	return 0;
7919 }
7920 
7921 /* Called with rtnl_lock */
7922 static int
7923 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7924 {
7925 	struct bnx2 *bp = netdev_priv(dev);
7926 
7927 	dev->mtu = new_mtu;
7928 	return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7929 				     false);
7930 }
7931 
7932 #ifdef CONFIG_NET_POLL_CONTROLLER
7933 static void
7934 poll_bnx2(struct net_device *dev)
7935 {
7936 	struct bnx2 *bp = netdev_priv(dev);
7937 	int i;
7938 
7939 	for (i = 0; i < bp->irq_nvecs; i++) {
7940 		struct bnx2_irq *irq = &bp->irq_tbl[i];
7941 
7942 		disable_irq(irq->vector);
7943 		irq->handler(irq->vector, &bp->bnx2_napi[i]);
7944 		enable_irq(irq->vector);
7945 	}
7946 }
7947 #endif
7948 
7949 static void
7950 bnx2_get_5709_media(struct bnx2 *bp)
7951 {
7952 	u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7953 	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7954 	u32 strap;
7955 
7956 	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7957 		return;
7958 	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7959 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7960 		return;
7961 	}
7962 
7963 	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7964 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7965 	else
7966 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7967 
7968 	if (bp->func == 0) {
7969 		switch (strap) {
7970 		case 0x4:
7971 		case 0x5:
7972 		case 0x6:
7973 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7974 			return;
7975 		}
7976 	} else {
7977 		switch (strap) {
7978 		case 0x1:
7979 		case 0x2:
7980 		case 0x4:
7981 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7982 			return;
7983 		}
7984 	}
7985 }
7986 
7987 static void
7988 bnx2_get_pci_speed(struct bnx2 *bp)
7989 {
7990 	u32 reg;
7991 
7992 	reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7993 	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7994 		u32 clkreg;
7995 
7996 		bp->flags |= BNX2_FLAG_PCIX;
7997 
7998 		clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7999 
8000 		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
8001 		switch (clkreg) {
8002 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
8003 			bp->bus_speed_mhz = 133;
8004 			break;
8005 
8006 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
8007 			bp->bus_speed_mhz = 100;
8008 			break;
8009 
8010 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
8011 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
8012 			bp->bus_speed_mhz = 66;
8013 			break;
8014 
8015 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
8016 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
8017 			bp->bus_speed_mhz = 50;
8018 			break;
8019 
8020 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
8021 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
8022 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
8023 			bp->bus_speed_mhz = 33;
8024 			break;
8025 		}
8026 	}
8027 	else {
8028 		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
8029 			bp->bus_speed_mhz = 66;
8030 		else
8031 			bp->bus_speed_mhz = 33;
8032 	}
8033 
8034 	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
8035 		bp->flags |= BNX2_FLAG_PCI_32BIT;
8036 
8037 }
8038 
8039 static void
8040 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8041 {
8042 	int rc, i, j;
8043 	u8 *data;
8044 	unsigned int block_end, rosize, len;
8045 
8046 #define BNX2_VPD_NVRAM_OFFSET	0x300
8047 #define BNX2_VPD_LEN		128
8048 #define BNX2_MAX_VER_SLEN	30
8049 
8050 	data = kmalloc(256, GFP_KERNEL);
8051 	if (!data)
8052 		return;
8053 
8054 	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
8055 			     BNX2_VPD_LEN);
8056 	if (rc)
8057 		goto vpd_done;
8058 
8059 	for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8060 		data[i] = data[i + BNX2_VPD_LEN + 3];
8061 		data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8062 		data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8063 		data[i + 3] = data[i + BNX2_VPD_LEN];
8064 	}
8065 
8066 	i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8067 	if (i < 0)
8068 		goto vpd_done;
8069 
8070 	rosize = pci_vpd_lrdt_size(&data[i]);
8071 	i += PCI_VPD_LRDT_TAG_SIZE;
8072 	block_end = i + rosize;
8073 
8074 	if (block_end > BNX2_VPD_LEN)
8075 		goto vpd_done;
8076 
8077 	j = pci_vpd_find_info_keyword(data, i, rosize,
8078 				      PCI_VPD_RO_KEYWORD_MFR_ID);
8079 	if (j < 0)
8080 		goto vpd_done;
8081 
8082 	len = pci_vpd_info_field_size(&data[j]);
8083 
8084 	j += PCI_VPD_INFO_FLD_HDR_SIZE;
8085 	if (j + len > block_end || len != 4 ||
8086 	    memcmp(&data[j], "1028", 4))
8087 		goto vpd_done;
8088 
8089 	j = pci_vpd_find_info_keyword(data, i, rosize,
8090 				      PCI_VPD_RO_KEYWORD_VENDOR0);
8091 	if (j < 0)
8092 		goto vpd_done;
8093 
8094 	len = pci_vpd_info_field_size(&data[j]);
8095 
8096 	j += PCI_VPD_INFO_FLD_HDR_SIZE;
8097 	if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8098 		goto vpd_done;
8099 
8100 	memcpy(bp->fw_version, &data[j], len);
8101 	bp->fw_version[len] = ' ';
8102 
8103 vpd_done:
8104 	kfree(data);
8105 }
8106 
8107 static int
8108 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8109 {
8110 	struct bnx2 *bp;
8111 	int rc, i, j;
8112 	u32 reg;
8113 	u64 dma_mask, persist_dma_mask;
8114 	int err;
8115 
8116 	SET_NETDEV_DEV(dev, &pdev->dev);
8117 	bp = netdev_priv(dev);
8118 
8119 	bp->flags = 0;
8120 	bp->phy_flags = 0;
8121 
8122 	bp->temp_stats_blk =
8123 		kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8124 
8125 	if (!bp->temp_stats_blk) {
8126 		rc = -ENOMEM;
8127 		goto err_out;
8128 	}
8129 
8130 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
8131 	rc = pci_enable_device(pdev);
8132 	if (rc) {
8133 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8134 		goto err_out;
8135 	}
8136 
8137 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8138 		dev_err(&pdev->dev,
8139 			"Cannot find PCI device base address, aborting\n");
8140 		rc = -ENODEV;
8141 		goto err_out_disable;
8142 	}
8143 
8144 	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8145 	if (rc) {
8146 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8147 		goto err_out_disable;
8148 	}
8149 
8150 	pci_set_master(pdev);
8151 
8152 	bp->pm_cap = pdev->pm_cap;
8153 	if (bp->pm_cap == 0) {
8154 		dev_err(&pdev->dev,
8155 			"Cannot find power management capability, aborting\n");
8156 		rc = -EIO;
8157 		goto err_out_release;
8158 	}
8159 
8160 	bp->dev = dev;
8161 	bp->pdev = pdev;
8162 
8163 	spin_lock_init(&bp->phy_lock);
8164 	spin_lock_init(&bp->indirect_lock);
8165 #ifdef BCM_CNIC
8166 	mutex_init(&bp->cnic_lock);
8167 #endif
8168 	INIT_WORK(&bp->reset_task, bnx2_reset_task);
8169 
8170 	bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8171 							 TX_MAX_TSS_RINGS + 1));
8172 	if (!bp->regview) {
8173 		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8174 		rc = -ENOMEM;
8175 		goto err_out_release;
8176 	}
8177 
8178 	/* Configure byte swap and enable write to the reg_window registers.
8179 	 * Rely on CPU to do target byte swapping on big endian systems
8180 	 * The chip's target access swapping will not swap all accesses
8181 	 */
8182 	BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8183 		BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8184 		BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8185 
8186 	bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8187 
8188 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8189 		if (!pci_is_pcie(pdev)) {
8190 			dev_err(&pdev->dev, "Not PCIE, aborting\n");
8191 			rc = -EIO;
8192 			goto err_out_unmap;
8193 		}
8194 		bp->flags |= BNX2_FLAG_PCIE;
8195 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8196 			bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8197 
8198 		/* AER (Advanced Error Reporting) hooks */
8199 		err = pci_enable_pcie_error_reporting(pdev);
8200 		if (!err)
8201 			bp->flags |= BNX2_FLAG_AER_ENABLED;
8202 
8203 	} else {
8204 		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8205 		if (bp->pcix_cap == 0) {
8206 			dev_err(&pdev->dev,
8207 				"Cannot find PCIX capability, aborting\n");
8208 			rc = -EIO;
8209 			goto err_out_unmap;
8210 		}
8211 		bp->flags |= BNX2_FLAG_BROKEN_STATS;
8212 	}
8213 
8214 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8215 	    BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8216 		if (pdev->msix_cap)
8217 			bp->flags |= BNX2_FLAG_MSIX_CAP;
8218 	}
8219 
8220 	if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8221 	    BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8222 		if (pdev->msi_cap)
8223 			bp->flags |= BNX2_FLAG_MSI_CAP;
8224 	}
8225 
8226 	/* 5708 cannot support DMA addresses > 40-bit.  */
8227 	if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8228 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8229 	else
8230 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8231 
8232 	/* Configure DMA attributes. */
8233 	if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8234 		dev->features |= NETIF_F_HIGHDMA;
8235 		rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8236 		if (rc) {
8237 			dev_err(&pdev->dev,
8238 				"pci_set_consistent_dma_mask failed, aborting\n");
8239 			goto err_out_unmap;
8240 		}
8241 	} else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8242 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8243 		goto err_out_unmap;
8244 	}
8245 
8246 	if (!(bp->flags & BNX2_FLAG_PCIE))
8247 		bnx2_get_pci_speed(bp);
8248 
8249 	/* 5706A0 may falsely detect SERR and PERR. */
8250 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8251 		reg = BNX2_RD(bp, PCI_COMMAND);
8252 		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8253 		BNX2_WR(bp, PCI_COMMAND, reg);
8254 	} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8255 		!(bp->flags & BNX2_FLAG_PCIX)) {
8256 
8257 		dev_err(&pdev->dev,
8258 			"5706 A1 can only be used in a PCIX bus, aborting\n");
8259 		goto err_out_unmap;
8260 	}
8261 
8262 	bnx2_init_nvram(bp);
8263 
8264 	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8265 
8266 	if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8267 		bp->func = 1;
8268 
8269 	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8270 	    BNX2_SHM_HDR_SIGNATURE_SIG) {
8271 		u32 off = bp->func << 2;
8272 
8273 		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8274 	} else
8275 		bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8276 
8277 	/* Get the permanent MAC address.  First we need to make sure the
8278 	 * firmware is actually running.
8279 	 */
8280 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8281 
8282 	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8283 	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8284 		dev_err(&pdev->dev, "Firmware not running, aborting\n");
8285 		rc = -ENODEV;
8286 		goto err_out_unmap;
8287 	}
8288 
8289 	bnx2_read_vpd_fw_ver(bp);
8290 
8291 	j = strlen(bp->fw_version);
8292 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8293 	for (i = 0; i < 3 && j < 24; i++) {
8294 		u8 num, k, skip0;
8295 
8296 		if (i == 0) {
8297 			bp->fw_version[j++] = 'b';
8298 			bp->fw_version[j++] = 'c';
8299 			bp->fw_version[j++] = ' ';
8300 		}
8301 		num = (u8) (reg >> (24 - (i * 8)));
8302 		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8303 			if (num >= k || !skip0 || k == 1) {
8304 				bp->fw_version[j++] = (num / k) + '0';
8305 				skip0 = 0;
8306 			}
8307 		}
8308 		if (i != 2)
8309 			bp->fw_version[j++] = '.';
8310 	}
8311 	reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8312 	if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8313 		bp->wol = 1;
8314 
8315 	if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8316 		bp->flags |= BNX2_FLAG_ASF_ENABLE;
8317 
8318 		for (i = 0; i < 30; i++) {
8319 			reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8320 			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8321 				break;
8322 			msleep(10);
8323 		}
8324 	}
8325 	reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8326 	reg &= BNX2_CONDITION_MFW_RUN_MASK;
8327 	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8328 	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
8329 		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8330 
8331 		if (j < 32)
8332 			bp->fw_version[j++] = ' ';
8333 		for (i = 0; i < 3 && j < 28; i++) {
8334 			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8335 			reg = be32_to_cpu(reg);
8336 			memcpy(&bp->fw_version[j], &reg, 4);
8337 			j += 4;
8338 		}
8339 	}
8340 
8341 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8342 	bp->mac_addr[0] = (u8) (reg >> 8);
8343 	bp->mac_addr[1] = (u8) reg;
8344 
8345 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8346 	bp->mac_addr[2] = (u8) (reg >> 24);
8347 	bp->mac_addr[3] = (u8) (reg >> 16);
8348 	bp->mac_addr[4] = (u8) (reg >> 8);
8349 	bp->mac_addr[5] = (u8) reg;
8350 
8351 	bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8352 	bnx2_set_rx_ring_size(bp, 255);
8353 
8354 	bp->tx_quick_cons_trip_int = 2;
8355 	bp->tx_quick_cons_trip = 20;
8356 	bp->tx_ticks_int = 18;
8357 	bp->tx_ticks = 80;
8358 
8359 	bp->rx_quick_cons_trip_int = 2;
8360 	bp->rx_quick_cons_trip = 12;
8361 	bp->rx_ticks_int = 18;
8362 	bp->rx_ticks = 18;
8363 
8364 	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8365 
8366 	bp->current_interval = BNX2_TIMER_INTERVAL;
8367 
8368 	bp->phy_addr = 1;
8369 
8370 	/* allocate stats_blk */
8371 	rc = bnx2_alloc_stats_blk(dev);
8372 	if (rc)
8373 		goto err_out_unmap;
8374 
8375 	/* Disable WOL support if we are running on a SERDES chip. */
8376 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8377 		bnx2_get_5709_media(bp);
8378 	else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8379 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8380 
8381 	bp->phy_port = PORT_TP;
8382 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8383 		bp->phy_port = PORT_FIBRE;
8384 		reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8385 		if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8386 			bp->flags |= BNX2_FLAG_NO_WOL;
8387 			bp->wol = 0;
8388 		}
8389 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8390 			/* Don't do parallel detect on this board because of
8391 			 * some board problems.  The link will not go down
8392 			 * if we do parallel detect.
8393 			 */
8394 			if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8395 			    pdev->subsystem_device == 0x310c)
8396 				bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8397 		} else {
8398 			bp->phy_addr = 2;
8399 			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8400 				bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8401 		}
8402 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8403 		   BNX2_CHIP(bp) == BNX2_CHIP_5708)
8404 		bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8405 	else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8406 		 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8407 		  BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8408 		bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8409 
8410 	bnx2_init_fw_cap(bp);
8411 
8412 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8413 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8414 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8415 	    !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8416 		bp->flags |= BNX2_FLAG_NO_WOL;
8417 		bp->wol = 0;
8418 	}
8419 
8420 	if (bp->flags & BNX2_FLAG_NO_WOL)
8421 		device_set_wakeup_capable(&bp->pdev->dev, false);
8422 	else
8423 		device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8424 
8425 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8426 		bp->tx_quick_cons_trip_int =
8427 			bp->tx_quick_cons_trip;
8428 		bp->tx_ticks_int = bp->tx_ticks;
8429 		bp->rx_quick_cons_trip_int =
8430 			bp->rx_quick_cons_trip;
8431 		bp->rx_ticks_int = bp->rx_ticks;
8432 		bp->comp_prod_trip_int = bp->comp_prod_trip;
8433 		bp->com_ticks_int = bp->com_ticks;
8434 		bp->cmd_ticks_int = bp->cmd_ticks;
8435 	}
8436 
8437 	/* Disable MSI on 5706 if AMD 8132 bridge is found.
8438 	 *
8439 	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8440 	 * with byte enables disabled on the unused 32-bit word.  This is legal
8441 	 * but causes problems on the AMD 8132 which will eventually stop
8442 	 * responding after a while.
8443 	 *
8444 	 * AMD believes this incompatibility is unique to the 5706, and
8445 	 * prefers to locally disable MSI rather than globally disabling it.
8446 	 */
8447 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8448 		struct pci_dev *amd_8132 = NULL;
8449 
8450 		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8451 						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
8452 						  amd_8132))) {
8453 
8454 			if (amd_8132->revision >= 0x10 &&
8455 			    amd_8132->revision <= 0x13) {
8456 				disable_msi = 1;
8457 				pci_dev_put(amd_8132);
8458 				break;
8459 			}
8460 		}
8461 	}
8462 
8463 	bnx2_set_default_link(bp);
8464 	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8465 
8466 	timer_setup(&bp->timer, bnx2_timer, 0);
8467 	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8468 
8469 #ifdef BCM_CNIC
8470 	if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8471 		bp->cnic_eth_dev.max_iscsi_conn =
8472 			(bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8473 			 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8474 	bp->cnic_probe = bnx2_cnic_probe;
8475 #endif
8476 	pci_save_state(pdev);
8477 
8478 	return 0;
8479 
8480 err_out_unmap:
8481 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8482 		pci_disable_pcie_error_reporting(pdev);
8483 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8484 	}
8485 
8486 	pci_iounmap(pdev, bp->regview);
8487 	bp->regview = NULL;
8488 
8489 err_out_release:
8490 	pci_release_regions(pdev);
8491 
8492 err_out_disable:
8493 	pci_disable_device(pdev);
8494 
8495 err_out:
8496 	kfree(bp->temp_stats_blk);
8497 
8498 	return rc;
8499 }
8500 
8501 static char *
8502 bnx2_bus_string(struct bnx2 *bp, char *str)
8503 {
8504 	char *s = str;
8505 
8506 	if (bp->flags & BNX2_FLAG_PCIE) {
8507 		s += sprintf(s, "PCI Express");
8508 	} else {
8509 		s += sprintf(s, "PCI");
8510 		if (bp->flags & BNX2_FLAG_PCIX)
8511 			s += sprintf(s, "-X");
8512 		if (bp->flags & BNX2_FLAG_PCI_32BIT)
8513 			s += sprintf(s, " 32-bit");
8514 		else
8515 			s += sprintf(s, " 64-bit");
8516 		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8517 	}
8518 	return str;
8519 }
8520 
8521 static void
8522 bnx2_del_napi(struct bnx2 *bp)
8523 {
8524 	int i;
8525 
8526 	for (i = 0; i < bp->irq_nvecs; i++)
8527 		netif_napi_del(&bp->bnx2_napi[i].napi);
8528 }
8529 
8530 static void
8531 bnx2_init_napi(struct bnx2 *bp)
8532 {
8533 	int i;
8534 
8535 	for (i = 0; i < bp->irq_nvecs; i++) {
8536 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8537 		int (*poll)(struct napi_struct *, int);
8538 
8539 		if (i == 0)
8540 			poll = bnx2_poll;
8541 		else
8542 			poll = bnx2_poll_msix;
8543 
8544 		netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8545 		bnapi->bp = bp;
8546 	}
8547 }
8548 
8549 static const struct net_device_ops bnx2_netdev_ops = {
8550 	.ndo_open		= bnx2_open,
8551 	.ndo_start_xmit		= bnx2_start_xmit,
8552 	.ndo_stop		= bnx2_close,
8553 	.ndo_get_stats64	= bnx2_get_stats64,
8554 	.ndo_set_rx_mode	= bnx2_set_rx_mode,
8555 	.ndo_do_ioctl		= bnx2_ioctl,
8556 	.ndo_validate_addr	= eth_validate_addr,
8557 	.ndo_set_mac_address	= bnx2_change_mac_addr,
8558 	.ndo_change_mtu		= bnx2_change_mtu,
8559 	.ndo_set_features	= bnx2_set_features,
8560 	.ndo_tx_timeout		= bnx2_tx_timeout,
8561 #ifdef CONFIG_NET_POLL_CONTROLLER
8562 	.ndo_poll_controller	= poll_bnx2,
8563 #endif
8564 };
8565 
8566 static int
8567 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8568 {
8569 	static int version_printed = 0;
8570 	struct net_device *dev;
8571 	struct bnx2 *bp;
8572 	int rc;
8573 	char str[40];
8574 
8575 	if (version_printed++ == 0)
8576 		pr_info("%s", version);
8577 
8578 	/* dev zeroed in init_etherdev */
8579 	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8580 	if (!dev)
8581 		return -ENOMEM;
8582 
8583 	rc = bnx2_init_board(pdev, dev);
8584 	if (rc < 0)
8585 		goto err_free;
8586 
8587 	dev->netdev_ops = &bnx2_netdev_ops;
8588 	dev->watchdog_timeo = TX_TIMEOUT;
8589 	dev->ethtool_ops = &bnx2_ethtool_ops;
8590 
8591 	bp = netdev_priv(dev);
8592 
8593 	pci_set_drvdata(pdev, dev);
8594 
8595 	/*
8596 	 * In-flight DMA from 1st kernel could continue going in kdump kernel.
8597 	 * New io-page table has been created before bnx2 does reset at open stage.
8598 	 * We have to wait for the in-flight DMA to complete to avoid it look up
8599 	 * into the newly created io-page table.
8600 	 */
8601 	if (is_kdump_kernel())
8602 		bnx2_wait_dma_complete(bp);
8603 
8604 	memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8605 
8606 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8607 		NETIF_F_TSO | NETIF_F_TSO_ECN |
8608 		NETIF_F_RXHASH | NETIF_F_RXCSUM;
8609 
8610 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8611 		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8612 
8613 	dev->vlan_features = dev->hw_features;
8614 	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8615 	dev->features |= dev->hw_features;
8616 	dev->priv_flags |= IFF_UNICAST_FLT;
8617 	dev->min_mtu = MIN_ETHERNET_PACKET_SIZE;
8618 	dev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE;
8619 
8620 	if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
8621 		dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
8622 
8623 	if ((rc = register_netdev(dev))) {
8624 		dev_err(&pdev->dev, "Cannot register net device\n");
8625 		goto error;
8626 	}
8627 
8628 	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8629 		    "node addr %pM\n", board_info[ent->driver_data].name,
8630 		    ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8631 		    ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8632 		    bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8633 		    pdev->irq, dev->dev_addr);
8634 
8635 	return 0;
8636 
8637 error:
8638 	pci_iounmap(pdev, bp->regview);
8639 	pci_release_regions(pdev);
8640 	pci_disable_device(pdev);
8641 err_free:
8642 	bnx2_free_stats_blk(dev);
8643 	free_netdev(dev);
8644 	return rc;
8645 }
8646 
8647 static void
8648 bnx2_remove_one(struct pci_dev *pdev)
8649 {
8650 	struct net_device *dev = pci_get_drvdata(pdev);
8651 	struct bnx2 *bp = netdev_priv(dev);
8652 
8653 	unregister_netdev(dev);
8654 
8655 	del_timer_sync(&bp->timer);
8656 	cancel_work_sync(&bp->reset_task);
8657 
8658 	pci_iounmap(bp->pdev, bp->regview);
8659 
8660 	bnx2_free_stats_blk(dev);
8661 	kfree(bp->temp_stats_blk);
8662 
8663 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8664 		pci_disable_pcie_error_reporting(pdev);
8665 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8666 	}
8667 
8668 	bnx2_release_firmware(bp);
8669 
8670 	free_netdev(dev);
8671 
8672 	pci_release_regions(pdev);
8673 	pci_disable_device(pdev);
8674 }
8675 
8676 #ifdef CONFIG_PM_SLEEP
8677 static int
8678 bnx2_suspend(struct device *device)
8679 {
8680 	struct pci_dev *pdev = to_pci_dev(device);
8681 	struct net_device *dev = pci_get_drvdata(pdev);
8682 	struct bnx2 *bp = netdev_priv(dev);
8683 
8684 	if (netif_running(dev)) {
8685 		cancel_work_sync(&bp->reset_task);
8686 		bnx2_netif_stop(bp, true);
8687 		netif_device_detach(dev);
8688 		del_timer_sync(&bp->timer);
8689 		bnx2_shutdown_chip(bp);
8690 		__bnx2_free_irq(bp);
8691 		bnx2_free_skbs(bp);
8692 	}
8693 	bnx2_setup_wol(bp);
8694 	return 0;
8695 }
8696 
8697 static int
8698 bnx2_resume(struct device *device)
8699 {
8700 	struct pci_dev *pdev = to_pci_dev(device);
8701 	struct net_device *dev = pci_get_drvdata(pdev);
8702 	struct bnx2 *bp = netdev_priv(dev);
8703 
8704 	if (!netif_running(dev))
8705 		return 0;
8706 
8707 	bnx2_set_power_state(bp, PCI_D0);
8708 	netif_device_attach(dev);
8709 	bnx2_request_irq(bp);
8710 	bnx2_init_nic(bp, 1);
8711 	bnx2_netif_start(bp, true);
8712 	return 0;
8713 }
8714 
8715 static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8716 #define BNX2_PM_OPS (&bnx2_pm_ops)
8717 
8718 #else
8719 
8720 #define BNX2_PM_OPS NULL
8721 
8722 #endif /* CONFIG_PM_SLEEP */
8723 /**
8724  * bnx2_io_error_detected - called when PCI error is detected
8725  * @pdev: Pointer to PCI device
8726  * @state: The current pci connection state
8727  *
8728  * This function is called after a PCI bus error affecting
8729  * this device has been detected.
8730  */
8731 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8732 					       pci_channel_state_t state)
8733 {
8734 	struct net_device *dev = pci_get_drvdata(pdev);
8735 	struct bnx2 *bp = netdev_priv(dev);
8736 
8737 	rtnl_lock();
8738 	netif_device_detach(dev);
8739 
8740 	if (state == pci_channel_io_perm_failure) {
8741 		rtnl_unlock();
8742 		return PCI_ERS_RESULT_DISCONNECT;
8743 	}
8744 
8745 	if (netif_running(dev)) {
8746 		bnx2_netif_stop(bp, true);
8747 		del_timer_sync(&bp->timer);
8748 		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8749 	}
8750 
8751 	pci_disable_device(pdev);
8752 	rtnl_unlock();
8753 
8754 	/* Request a slot slot reset. */
8755 	return PCI_ERS_RESULT_NEED_RESET;
8756 }
8757 
8758 /**
8759  * bnx2_io_slot_reset - called after the pci bus has been reset.
8760  * @pdev: Pointer to PCI device
8761  *
8762  * Restart the card from scratch, as if from a cold-boot.
8763  */
8764 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8765 {
8766 	struct net_device *dev = pci_get_drvdata(pdev);
8767 	struct bnx2 *bp = netdev_priv(dev);
8768 	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8769 	int err = 0;
8770 
8771 	rtnl_lock();
8772 	if (pci_enable_device(pdev)) {
8773 		dev_err(&pdev->dev,
8774 			"Cannot re-enable PCI device after reset\n");
8775 	} else {
8776 		pci_set_master(pdev);
8777 		pci_restore_state(pdev);
8778 		pci_save_state(pdev);
8779 
8780 		if (netif_running(dev))
8781 			err = bnx2_init_nic(bp, 1);
8782 
8783 		if (!err)
8784 			result = PCI_ERS_RESULT_RECOVERED;
8785 	}
8786 
8787 	if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8788 		bnx2_napi_enable(bp);
8789 		dev_close(dev);
8790 	}
8791 	rtnl_unlock();
8792 
8793 	if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8794 		return result;
8795 
8796 	err = pci_cleanup_aer_uncorrect_error_status(pdev);
8797 	if (err) {
8798 		dev_err(&pdev->dev,
8799 			"pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8800 			 err); /* non-fatal, continue */
8801 	}
8802 
8803 	return result;
8804 }
8805 
8806 /**
8807  * bnx2_io_resume - called when traffic can start flowing again.
8808  * @pdev: Pointer to PCI device
8809  *
8810  * This callback is called when the error recovery driver tells us that
8811  * its OK to resume normal operation.
8812  */
8813 static void bnx2_io_resume(struct pci_dev *pdev)
8814 {
8815 	struct net_device *dev = pci_get_drvdata(pdev);
8816 	struct bnx2 *bp = netdev_priv(dev);
8817 
8818 	rtnl_lock();
8819 	if (netif_running(dev))
8820 		bnx2_netif_start(bp, true);
8821 
8822 	netif_device_attach(dev);
8823 	rtnl_unlock();
8824 }
8825 
8826 static void bnx2_shutdown(struct pci_dev *pdev)
8827 {
8828 	struct net_device *dev = pci_get_drvdata(pdev);
8829 	struct bnx2 *bp;
8830 
8831 	if (!dev)
8832 		return;
8833 
8834 	bp = netdev_priv(dev);
8835 	if (!bp)
8836 		return;
8837 
8838 	rtnl_lock();
8839 	if (netif_running(dev))
8840 		dev_close(bp->dev);
8841 
8842 	if (system_state == SYSTEM_POWER_OFF)
8843 		bnx2_set_power_state(bp, PCI_D3hot);
8844 
8845 	rtnl_unlock();
8846 }
8847 
8848 static const struct pci_error_handlers bnx2_err_handler = {
8849 	.error_detected	= bnx2_io_error_detected,
8850 	.slot_reset	= bnx2_io_slot_reset,
8851 	.resume		= bnx2_io_resume,
8852 };
8853 
8854 static struct pci_driver bnx2_pci_driver = {
8855 	.name		= DRV_MODULE_NAME,
8856 	.id_table	= bnx2_pci_tbl,
8857 	.probe		= bnx2_init_one,
8858 	.remove		= bnx2_remove_one,
8859 	.driver.pm	= BNX2_PM_OPS,
8860 	.err_handler	= &bnx2_err_handler,
8861 	.shutdown	= bnx2_shutdown,
8862 };
8863 
8864 module_pci_driver(bnx2_pci_driver);
8865